Merge tag '6.7-rc-smb3-client-fixes-part2' of git://git.samba.org/sfrench/cifs-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 12 Nov 2023 01:17:22 +0000 (17:17 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 12 Nov 2023 01:17:22 +0000 (17:17 -0800)
Pull smb client fixes from Steve French:

 - ctime caching fix (for setxattr)

 - encryption fix

 - DNS resolver mount fix

 - debugging improvements

 - multichannel fixes including cases where server stops or starts
   supporting multichannel after mount

 - reconnect fix

 - minor cleanups

* tag '6.7-rc-smb3-client-fixes-part2' of git://git.samba.org/sfrench/cifs-2.6:
  cifs: update internal module version number for cifs.ko
  cifs: handle when server stops supporting multichannel
  cifs: handle when server starts supporting multichannel
  Missing field not being returned in ioctl CIFS_IOC_GET_MNT_INFO
  smb3: allow dumping session and tcon id to improve stats analysis and debugging
  smb: client: fix mount when dns_resolver key is not available
  smb3: fix caching of ctime on setxattr
  smb3: minor cleanup of session handling code
  cifs: reconnect work should have reference on server struct
  cifs: do not pass cifs_sb when trying to add channels
  cifs: account for primary channel in the interface list
  cifs: distribute channels across interfaces based on speed
  cifs: handle cases where a channel is closed
  smb3: more minor cleanups for session handling routines
  smb3: minor RDMA cleanup
  cifs: Fix encryption of cleared, but unset rq_iter data buffers

2196 files changed:
Documentation/ABI/testing/configfs-tsm [new file with mode: 0644]
Documentation/ABI/testing/sysfs-bus-cxl
Documentation/ABI/testing/sysfs-bus-i3c
Documentation/ABI/testing/sysfs-bus-vdpa
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/media/mgb4.rst [new file with mode: 0644]
Documentation/admin-guide/media/pci-cardlist.rst
Documentation/admin-guide/media/v4l-drivers.rst
Documentation/admin-guide/media/visl.rst
Documentation/arch/arm64/elf_hwcaps.rst
Documentation/arch/riscv/hwprobe.rst
Documentation/arch/riscv/uabi.rst
Documentation/bpf/kfuncs.rst
Documentation/devicetree/bindings/display/renesas,shmobile-lcdc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/display/solomon,ssd132x.yaml
Documentation/devicetree/bindings/eeprom/at24.yaml
Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt [deleted file]
Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml
Documentation/devicetree/bindings/i3c/i3c.yaml
Documentation/devicetree/bindings/input/fsl,scu-key.yaml
Documentation/devicetree/bindings/input/touchscreen/cypress,tt21000.yaml
Documentation/devicetree/bindings/iommu/arm,smmu.yaml
Documentation/devicetree/bindings/leds/irled/pwm-ir-tx.yaml
Documentation/devicetree/bindings/mailbox/fsl,mu.yaml
Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.yaml
Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml
Documentation/devicetree/bindings/mailbox/xlnx,zynqmp-ipi-mailbox.yaml
Documentation/devicetree/bindings/media/amlogic,meson6-ir.yaml
Documentation/devicetree/bindings/media/cdns,csi2rx.yaml
Documentation/devicetree/bindings/media/i2c/hynix,hi846.yaml
Documentation/devicetree/bindings/media/i2c/onnn,mt9m114.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/media/i2c/ovti,ov02a10.yaml
Documentation/devicetree/bindings/media/i2c/ovti,ov4689.yaml
Documentation/devicetree/bindings/media/i2c/ovti,ov5640.yaml
Documentation/devicetree/bindings/media/i2c/ovti,ov5642.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/media/i2c/ovti,ov5693.yaml
Documentation/devicetree/bindings/media/i2c/sony,imx214.yaml
Documentation/devicetree/bindings/media/i2c/sony,imx415.yaml
Documentation/devicetree/bindings/media/nokia,n900-ir [deleted file]
Documentation/devicetree/bindings/media/nuvoton,npcm-ece.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/media/nuvoton,npcm-vcd.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/media/qcom,sdm845-venus-v2.yaml
Documentation/devicetree/bindings/media/rockchip-vpu.yaml
Documentation/devicetree/bindings/media/samsung,exynos4212-fimc-is.yaml
Documentation/devicetree/bindings/media/samsung,fimc.yaml
Documentation/devicetree/bindings/media/ti,j721e-csi2rx-shim.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/media/video-interfaces.yaml
Documentation/devicetree/bindings/mtd/partitions/fixed-partitions.yaml
Documentation/devicetree/bindings/pwm/mxs-pwm.yaml
Documentation/devicetree/bindings/remoteproc/mtk,scp.yaml
Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml
Documentation/devicetree/bindings/remoteproc/qcom,msm8996-mss-pil.yaml
Documentation/devicetree/bindings/remoteproc/qcom,sc7180-pas.yaml
Documentation/devicetree/bindings/remoteproc/qcom,sm6375-pas.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/remoteproc/ti,pru-rproc.yaml
Documentation/devicetree/bindings/rtc/cirrus,ep9301-rtc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/rtc/epson,rtc7301.txt [deleted file]
Documentation/devicetree/bindings/rtc/epson,rtc7301.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/rtc/maxim,mcp795.txt [deleted file]
Documentation/devicetree/bindings/rtc/microcrystal,rv3032.yaml
Documentation/devicetree/bindings/rtc/mstar,ssd202d-rtc.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/rtc/nxp,pcf2123.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/rtc/nxp,pcf8523.txt [deleted file]
Documentation/devicetree/bindings/rtc/nxp,pcf8523.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/rtc/nxp,rtc-2123.txt [deleted file]
Documentation/devicetree/bindings/rtc/trivial-rtc.yaml
Documentation/devicetree/bindings/soc/nuvoton/nuvoton,gfxi.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/trivial-devices.yaml
Documentation/devicetree/bindings/watchdog/amlogic,meson-gxbb-wdt.yaml
Documentation/devicetree/bindings/watchdog/aspeed-wdt.txt
Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml
Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
Documentation/driver-api/i3c/protocol.rst
Documentation/driver-api/media/camera-sensor.rst
Documentation/driver-api/media/drivers/ccs/ccs.rst
Documentation/driver-api/media/v4l2-core.rst
Documentation/driver-api/media/v4l2-dev.rst
Documentation/driver-api/media/v4l2-videobuf.rst [deleted file]
Documentation/filesystems/nfs/exporting.rst
Documentation/filesystems/overlayfs.rst
Documentation/filesystems/porting.rst
Documentation/i2c/busses/i2c-i801.rst
Documentation/i2c/fault-codes.rst
Documentation/netlink/specs/devlink.yaml
Documentation/networking/smc-sysctl.rst
Documentation/trace/fprobetrace.rst
Documentation/trace/kprobetrace.rst
Documentation/translations/zh_CN/video4linux/v4l2-framework.txt
Documentation/userspace-api/media/drivers/camera-sensor.rst [new file with mode: 0644]
Documentation/userspace-api/media/drivers/index.rst
Documentation/userspace-api/media/drivers/npcm-video.rst [new file with mode: 0644]
Documentation/userspace-api/media/gen-errors.rst
Documentation/userspace-api/media/v4l/buffer.rst
Documentation/userspace-api/media/v4l/control.rst
Documentation/userspace-api/media/v4l/dev-subdev.rst
Documentation/userspace-api/media/v4l/dv-timings.rst
Documentation/userspace-api/media/v4l/pixfmt-reserved.rst
Documentation/userspace-api/media/v4l/pixfmt-srggb12p.rst
Documentation/userspace-api/media/v4l/subdev-formats.rst
MAINTAINERS
arch/arc/include/asm/kprobes.h
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/omap2plus_defconfig
arch/arm/configs/pxa_defconfig
arch/arm/configs/tegra_defconfig
arch/arm/include/asm/arm_pmuv3.h
arch/arm/include/asm/kprobes.h
arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
arch/arm64/include/asm/arm_pmuv3.h
arch/arm64/include/asm/kprobes.h
arch/arm64/include/asm/syscall_wrapper.h
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/smp.c
arch/mips/Kbuild.platforms
arch/mips/Kconfig
arch/mips/ar7/Makefile [deleted file]
arch/mips/ar7/Platform [deleted file]
arch/mips/ar7/clock.c [deleted file]
arch/mips/ar7/gpio.c [deleted file]
arch/mips/ar7/irq.c [deleted file]
arch/mips/ar7/memory.c [deleted file]
arch/mips/ar7/platform.c [deleted file]
arch/mips/ar7/prom.c [deleted file]
arch/mips/ar7/setup.c [deleted file]
arch/mips/ar7/time.c [deleted file]
arch/mips/boot/compressed/uart-16550.c
arch/mips/boot/dts/ingenic/jz4725b.dtsi
arch/mips/boot/dts/ingenic/jz4770.dtsi
arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc1.dts
arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc2.dts
arch/mips/boot/dts/ralink/mt7621.dtsi
arch/mips/configs/ar7_defconfig [deleted file]
arch/mips/configs/fuloong2e_defconfig
arch/mips/configs/jazz_defconfig
arch/mips/configs/lemote2f_defconfig
arch/mips/configs/malta_defconfig
arch/mips/configs/malta_kvm_defconfig
arch/mips/configs/maltaup_xpa_defconfig
arch/mips/configs/rm200_defconfig
arch/mips/include/asm/kprobes.h
arch/mips/include/asm/mach-ar7/ar7.h [deleted file]
arch/mips/include/asm/mach-ar7/irq.h [deleted file]
arch/mips/include/asm/mach-ar7/prom.h [deleted file]
arch/mips/include/asm/mach-ar7/spaces.h [deleted file]
arch/mips/include/asm/mach-loongson32/dma.h [deleted file]
arch/mips/include/asm/mach-loongson32/nand.h [deleted file]
arch/mips/include/asm/mach-loongson32/platform.h
arch/mips/kernel/relocate_kernel.S
arch/mips/loongson32/common/platform.c
arch/mips/loongson32/ls1b/board.c
arch/mips/pci/fixup-lantiq.c
arch/powerpc/include/asm/kprobes.h
arch/powerpc/kernel/iommu.c
arch/riscv/Kconfig
arch/riscv/Kconfig.debug
arch/riscv/Makefile
arch/riscv/boot/Makefile
arch/riscv/configs/defconfig
arch/riscv/include/asm/acpi.h
arch/riscv/include/asm/asm-prototypes.h
arch/riscv/include/asm/asm.h
arch/riscv/include/asm/bitops.h
arch/riscv/include/asm/cpufeature.h
arch/riscv/include/asm/elf.h
arch/riscv/include/asm/entry-common.h
arch/riscv/include/asm/errata_list.h
arch/riscv/include/asm/hwcap.h
arch/riscv/include/asm/hwprobe.h
arch/riscv/include/asm/insn-def.h
arch/riscv/include/asm/irq_stack.h
arch/riscv/include/asm/page.h
arch/riscv/include/asm/pgtable-32.h
arch/riscv/include/asm/pgtable-64.h
arch/riscv/include/asm/pgtable-bits.h
arch/riscv/include/asm/pgtable.h
arch/riscv/include/asm/processor.h
arch/riscv/include/asm/sbi.h
arch/riscv/include/asm/scs.h [new file with mode: 0644]
arch/riscv/include/asm/switch_to.h
arch/riscv/include/asm/thread_info.h
arch/riscv/include/asm/tlb.h
arch/riscv/include/asm/tlbflush.h
arch/riscv/include/asm/vdso/processor.h
arch/riscv/include/asm/vector.h
arch/riscv/include/uapi/asm/elf.h
arch/riscv/include/uapi/asm/hwprobe.h
arch/riscv/kernel/Makefile
arch/riscv/kernel/acpi.c
arch/riscv/kernel/asm-offsets.c
arch/riscv/kernel/copy-unaligned.S
arch/riscv/kernel/cpu.c
arch/riscv/kernel/cpufeature.c
arch/riscv/kernel/entry.S
arch/riscv/kernel/fpu.S
arch/riscv/kernel/head.S
arch/riscv/kernel/hibernate-asm.S
arch/riscv/kernel/irq.c
arch/riscv/kernel/kexec_relocate.S
arch/riscv/kernel/mcount-dyn.S
arch/riscv/kernel/mcount.S
arch/riscv/kernel/module.c
arch/riscv/kernel/probes/rethook_trampoline.S
arch/riscv/kernel/probes/simulate-insn.c
arch/riscv/kernel/probes/uprobes.c
arch/riscv/kernel/process.c
arch/riscv/kernel/sbi.c
arch/riscv/kernel/setup.c
arch/riscv/kernel/signal.c
arch/riscv/kernel/smpboot.c
arch/riscv/kernel/suspend_entry.S
arch/riscv/kernel/sys_riscv.c
arch/riscv/kernel/tests/Kconfig.debug [new file with mode: 0644]
arch/riscv/kernel/tests/Makefile [new file with mode: 0644]
arch/riscv/kernel/tests/module_test/Makefile [new file with mode: 0644]
arch/riscv/kernel/tests/module_test/test_module_linking_main.c [new file with mode: 0644]
arch/riscv/kernel/tests/module_test/test_set16.S [new file with mode: 0644]
arch/riscv/kernel/tests/module_test/test_set32.S [new file with mode: 0644]
arch/riscv/kernel/tests/module_test/test_set6.S [new file with mode: 0644]
arch/riscv/kernel/tests/module_test/test_set8.S [new file with mode: 0644]
arch/riscv/kernel/tests/module_test/test_sub16.S [new file with mode: 0644]
arch/riscv/kernel/tests/module_test/test_sub32.S [new file with mode: 0644]
arch/riscv/kernel/tests/module_test/test_sub6.S [new file with mode: 0644]
arch/riscv/kernel/tests/module_test/test_sub64.S [new file with mode: 0644]
arch/riscv/kernel/tests/module_test/test_sub8.S [new file with mode: 0644]
arch/riscv/kernel/tests/module_test/test_uleb128.S [new file with mode: 0644]
arch/riscv/kernel/traps.c
arch/riscv/kernel/traps_misaligned.c
arch/riscv/kernel/vdso/Makefile
arch/riscv/kernel/vdso/flush_icache.S
arch/riscv/kernel/vdso/getcpu.S
arch/riscv/kernel/vdso/hwprobe.c
arch/riscv/kernel/vdso/rt_sigreturn.S
arch/riscv/kernel/vdso/sys_hwprobe.S
arch/riscv/kernel/vdso/vdso.lds.S
arch/riscv/kvm/aia.c
arch/riscv/kvm/main.c
arch/riscv/kvm/tlb.c
arch/riscv/kvm/vcpu_fp.c
arch/riscv/kvm/vcpu_onereg.c
arch/riscv/kvm/vcpu_vector.c
arch/riscv/lib/clear_page.S
arch/riscv/lib/memcpy.S
arch/riscv/lib/memmove.S
arch/riscv/lib/memset.S
arch/riscv/lib/uaccess.S
arch/riscv/mm/Makefile
arch/riscv/mm/cache-ops.c [new file with mode: 0644]
arch/riscv/mm/cacheflush.c
arch/riscv/mm/dma-noncoherent.c
arch/riscv/mm/init.c
arch/riscv/mm/pageattr.c
arch/riscv/mm/pmem.c
arch/riscv/mm/ptdump.c
arch/riscv/mm/tlbflush.c
arch/riscv/purgatory/Makefile
arch/riscv/purgatory/entry.S
arch/s390/Kconfig
arch/s390/boot/ipl_parm.c
arch/s390/boot/startup.c
arch/s390/boot/vmem.c
arch/s390/include/asm/kprobes.h
arch/s390/include/asm/mmu.h
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/page-states.h
arch/s390/include/asm/page.h
arch/s390/include/asm/pci.h
arch/s390/include/asm/pci_clp.h
arch/s390/include/asm/pci_dma.h
arch/s390/include/asm/pgalloc.h
arch/s390/include/asm/setup.h
arch/s390/include/asm/stacktrace.h
arch/s390/include/asm/tlb.h
arch/s390/kernel/early.c
arch/s390/kernel/perf_event.c
arch/s390/kernel/stacktrace.c
arch/s390/mm/gmap.c
arch/s390/mm/init.c
arch/s390/mm/page-states.c
arch/s390/mm/pgalloc.c
arch/s390/mm/vmem.c
arch/s390/pci/Makefile
arch/s390/pci/pci.c
arch/s390/pci/pci_bus.c
arch/s390/pci/pci_debug.c
arch/s390/pci/pci_dma.c [deleted file]
arch/s390/pci/pci_event.c
arch/s390/pci/pci_sysfs.c
arch/sh/include/asm/kprobes.h
arch/sparc/include/asm/kprobes.h
arch/x86/coco/tdx/tdx.c
arch/x86/include/asm/kprobes.h
arch/x86/include/asm/shared/tdx.h
arch/x86/include/asm/tdx.h
block/blk-core.c
crypto/Kconfig
crypto/ahash.c
drivers/Kconfig
drivers/Makefile
drivers/acpi/Kconfig
drivers/acpi/riscv/rhct.c
drivers/acpi/tables.c
drivers/ata/libata-core.c
drivers/ata/pata_falcon.c
drivers/ata/pata_gayle.c
drivers/base/regmap/regmap.c
drivers/block/nbd.c
drivers/block/virtio_blk.c
drivers/clocksource/timer-riscv.c
drivers/cpufreq/cpufreq-dt-platdev.c
drivers/cpufreq/qcom-cpufreq-nvmem.c
drivers/cxl/acpi.c
drivers/cxl/core/core.h
drivers/cxl/core/hdm.c
drivers/cxl/core/mbox.c
drivers/cxl/core/memdev.c
drivers/cxl/core/pci.c
drivers/cxl/core/port.c
drivers/cxl/core/region.c
drivers/cxl/core/regs.c
drivers/cxl/cxl.h
drivers/cxl/cxlmem.h
drivers/cxl/mem.c
drivers/cxl/pci.c
drivers/cxl/port.c
drivers/firewire/core.h
drivers/firmware/efi/libstub/Makefile
drivers/gpio/gpio-aspeed.c
drivers/gpio/gpio-em.c
drivers/gpio/gpio-mvebu.c
drivers/gpio/gpio-pxa.c
drivers/gpio/gpio-rcar.c
drivers/gpio/gpio-rockchip.c
drivers/gpio/gpio-tegra.c
drivers/gpio/gpio-vf610.c
drivers/gpio/gpiolib-cdev.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
drivers/gpu/drm/amd/amdgpu/soc15_common.h
drivers/gpu/drm/amd/amdgpu/soc21.c
drivers/gpu/drm/amd/amdgpu/umc_v12_0.c
drivers/gpu/drm/amd/amdgpu/umc_v12_0.h
drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
drivers/gpu/drm/amd/display/dc/dc_dp_types.h
drivers/gpu/drm/amd/display/dc/dc_types.h
drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.h
drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c
drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.c
drivers/gpu/drm/amd/display/dc/dcn35/dcn35_hubp.h
drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c
drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h
drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c
drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
drivers/gpu/drm/amd/display/dc/dml2/Makefile
drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h
drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
drivers/gpu/drm/amd/display/dc/inc/hw/optc.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_2_sh_mask.h
drivers/gpu/drm/amd/include/kgd_pp_interface.h
drivers/gpu/drm/amd/pm/amdgpu_dpm.c
drivers/gpu/drm/amd/pm/amdgpu_pm.c
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/pptable_v1_0.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_pptable.h
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h
drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
drivers/gpu/drm/bridge/ti-sn65dsi86.c
drivers/gpu/drm/drm_syncobj.c
drivers/gpu/drm/i915/display/intel_cdclk.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_tc.c
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gt/intel_ggtt.c
drivers/gpu/drm/i915/gt/intel_rc6.c
drivers/gpu/drm/i915/i915_debugfs_params.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/nouveau/dispnv50/core.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/include/nvif/cl0080.h
drivers/gpu/drm/nouveau/include/nvif/class.h
drivers/gpu/drm/nouveau/include/nvkm/core/device.h
drivers/gpu/drm/nouveau/include/nvkm/core/falcon.h
drivers/gpu/drm/nouveau/include/nvkm/core/firmware.h
drivers/gpu/drm/nouveau/include/nvkm/core/layout.h
drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
drivers/gpu/drm/nouveau/include/nvkm/engine/nvenc.h
drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvkm/subdev/bar.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/nouveau_bios.c
drivers/gpu/drm/nouveau/nvif/disp.c
drivers/gpu/drm/nouveau/nvkm/core/firmware.c
drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/ce/ga100.c
drivers/gpu/drm/nouveau/nvkm/engine/ce/ga102.c
drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/ce/tu102.c
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/chan.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/fifo/base.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/cgrp.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga100.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/ga102.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/fifo/runl.h
drivers/gpu/drm/nouveau/nvkm/engine/fifo/tu102.c
drivers/gpu/drm/nouveau/nvkm/engine/fifo/uchan.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/gr/base.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/ga102.c
drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga102.c
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gm107.c
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvenc/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvenc/base.c
drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvenc/gm107.c
drivers/gpu/drm/nouveau/nvkm/engine/nvenc/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
drivers/gpu/drm/nouveau/nvkm/engine/sec2/ga102.c
drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
drivers/gpu/drm/nouveau/nvkm/engine/sec2/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
drivers/gpu/drm/nouveau/nvkm/falcon/base.c
drivers/gpu/drm/nouveau/nvkm/falcon/ga100.c
drivers/gpu/drm/nouveau/nvkm/falcon/ga102.c
drivers/gpu/drm/nouveau/nvkm/falcon/tu102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/acr/ga102.c
drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/bar/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/bar/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/bar/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/bar/tu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/bus/gf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/ga100.c
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/fault/tu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga100.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ga102.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/fb/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.c
drivers/gpu/drm/nouveau/nvkm/subdev/fb/tu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/fuse/gm107.c
drivers/gpu/drm/nouveau/nvkm/subdev/gpio/ga102.c
drivers/gpu/drm/nouveau/nvkm/subdev/gpio/gk104.c
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga102.c
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/gv100.c
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/ga102.c
drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp102.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/ga100.c
drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/tu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c
drivers/gpu/drm/nouveau/nvkm/subdev/privring/gm200.c
drivers/gpu/drm/nouveau/nvkm/subdev/therm/gp100.c
drivers/gpu/drm/nouveau/nvkm/subdev/top/ga100.c
drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/Kbuild
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/ga100.c
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/priv.h
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/nvkm/subdev/vfn/tu102.c
drivers/gpu/drm/qxl/qxl_display.c
drivers/gpu/drm/radeon/atombios.h
drivers/gpu/drm/renesas/shmobile/Kconfig
drivers/gpu/drm/renesas/shmobile/Makefile
drivers/gpu/drm/renesas/shmobile/shmob_drm_backlight.c [deleted file]
drivers/gpu/drm/renesas/shmobile/shmob_drm_backlight.h [deleted file]
drivers/gpu/drm/renesas/shmobile/shmob_drm_crtc.c
drivers/gpu/drm/renesas/shmobile/shmob_drm_crtc.h
drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.c
drivers/gpu/drm/renesas/shmobile/shmob_drm_drv.h
drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c
drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.h
drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.c
drivers/gpu/drm/renesas/shmobile/shmob_drm_plane.h
drivers/gpu/drm/solomon/ssd130x.c
drivers/gpu/drm/vc4/tests/vc4_mock_crtc.c
drivers/gpu/drm/vc4/tests/vc4_mock_output.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-at91-core.c
drivers/i2c/busses/i2c-axxia.c
drivers/i2c/busses/i2c-bcm-iproc.c
drivers/i2c/busses/i2c-brcmstb.c
drivers/i2c/busses/i2c-cp2615.c
drivers/i2c/busses/i2c-designware-master.c
drivers/i2c/busses/i2c-exynos5.c
drivers/i2c/busses/i2c-gpio.c
drivers/i2c/busses/i2c-i801.c
drivers/i2c/busses/i2c-mt65xx.c
drivers/i2c/busses/i2c-mv64xxx.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/busses/i2c-powermac.c
drivers/i2c/busses/i2c-pxa.c
drivers/i2c/busses/i2c-qcom-geni.c
drivers/i2c/busses/i2c-rcar.c
drivers/i2c/busses/i2c-riic.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/i2c/busses/i2c-stm32f4.c
drivers/i2c/busses/i2c-stm32f7.c
drivers/i2c/busses/i2c-sun6i-p2wi.c
drivers/i2c/i2c-atr.c
drivers/i2c/i2c-core-base.c
drivers/i2c/i2c-dev.c
drivers/i2c/muxes/i2c-demux-pinctrl.c
drivers/i2c/muxes/i2c-mux-gpio.c
drivers/i3c/master.c
drivers/i3c/master/dw-i3c-master.c
drivers/i3c/master/i3c-master-cdns.c
drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
drivers/i3c/master/mipi-i3c-hci/core.c
drivers/i3c/master/mipi-i3c-hci/dat_v1.c
drivers/i3c/master/mipi-i3c-hci/dma.c
drivers/i3c/master/svc-i3c-master.c
drivers/input/evdev.c
drivers/input/input-leds.c
drivers/input/joystick/walkera0701.c
drivers/input/keyboard/adp5520-keys.c
drivers/input/keyboard/cros_ec_keyb.c
drivers/input/keyboard/ep93xx_keypad.c
drivers/input/keyboard/iqs62x-keys.c
drivers/input/keyboard/matrix_keypad.c
drivers/input/keyboard/omap-keypad.c
drivers/input/keyboard/omap4-keypad.c
drivers/input/keyboard/samsung-keypad.c
drivers/input/keyboard/sh_keysc.c
drivers/input/keyboard/spear-keyboard.c
drivers/input/keyboard/stmpe-keypad.c
drivers/input/keyboard/tegra-kbc.c
drivers/input/misc/88pm80x_onkey.c
drivers/input/misc/axp20x-pek.c
drivers/input/misc/da9052_onkey.c
drivers/input/misc/da9055_onkey.c
drivers/input/misc/ideapad_slidebar.c
drivers/input/misc/iqs269a.c
drivers/input/misc/kxtj9.c
drivers/input/misc/m68kspkr.c
drivers/input/misc/max8997_haptic.c
drivers/input/misc/mc13783-pwrbutton.c
drivers/input/misc/palmas-pwrbutton.c
drivers/input/misc/pcap_keys.c
drivers/input/misc/pcf50633-input.c
drivers/input/misc/pcspkr.c
drivers/input/misc/pm8941-pwrkey.c
drivers/input/misc/soc_button_array.c
drivers/input/misc/sparcspkr.c
drivers/input/misc/wistron_btns.c
drivers/input/misc/wm831x-on.c
drivers/input/mouse/cyapa.c
drivers/input/mouse/navpoint.c
drivers/input/rmi4/rmi_bus.c
drivers/input/rmi4/rmi_f34.c
drivers/input/serio/altera_ps2.c
drivers/input/serio/ams_delta_serio.c
drivers/input/serio/apbps2.c
drivers/input/serio/arc_ps2.c
drivers/input/serio/ct82c710.c
drivers/input/serio/i8042-sparcio.h
drivers/input/serio/i8042.c
drivers/input/serio/ioc3kbd.c
drivers/input/serio/maceps2.c
drivers/input/serio/olpc_apsp.c
drivers/input/serio/ps2-gpio.c
drivers/input/serio/q40kbd.c
drivers/input/serio/rpckbd.c
drivers/input/serio/sun4i-ps2.c
drivers/input/serio/xilinx_ps2.c
drivers/input/touchscreen/ad7877.c
drivers/input/touchscreen/ad7879-i2c.c
drivers/input/touchscreen/ad7879-spi.c
drivers/input/touchscreen/ad7879.c
drivers/input/touchscreen/ad7879.h
drivers/input/touchscreen/ads7846.c
drivers/input/touchscreen/cyttsp5.c
drivers/input/touchscreen/da9052_tsi.c
drivers/input/touchscreen/edt-ft5x06.c
drivers/input/touchscreen/elants_i2c.c
drivers/input/touchscreen/exc3000.c
drivers/input/touchscreen/hideep.c
drivers/input/touchscreen/hycon-hy46xx.c
drivers/input/touchscreen/ili210x.c
drivers/input/touchscreen/ilitek_ts_i2c.c
drivers/input/touchscreen/iqs5xx.c
drivers/input/touchscreen/mainstone-wm97xx.c
drivers/input/touchscreen/mc13783_ts.c
drivers/input/touchscreen/melfas_mip4.c
drivers/input/touchscreen/pcap_ts.c
drivers/input/touchscreen/raydium_i2c_ts.c
drivers/input/touchscreen/rohm_bu21023.c
drivers/input/touchscreen/s6sy761.c
drivers/input/touchscreen/stmfts.c
drivers/input/touchscreen/stmpe-ts.c
drivers/input/touchscreen/sun4i-ts.c
drivers/input/touchscreen/ti_am335x_tsc.c
drivers/input/touchscreen/tsc2004.c
drivers/input/touchscreen/tsc2005.c
drivers/input/touchscreen/tsc200x-core.c
drivers/input/touchscreen/tsc200x-core.h
drivers/input/touchscreen/wdt87xx_i2c.c
drivers/input/touchscreen/wm831x-ts.c
drivers/input/touchscreen/wm97xx-core.c
drivers/iommu/Kconfig
drivers/iommu/Makefile
drivers/iommu/amd/Kconfig
drivers/iommu/amd/Makefile
drivers/iommu/amd/amd_iommu.h
drivers/iommu/amd/amd_iommu_types.h
drivers/iommu/amd/init.c
drivers/iommu/amd/io_pgtable_v2.c
drivers/iommu/amd/iommu.c
drivers/iommu/amd/iommu_v2.c [deleted file]
drivers/iommu/apple-dart.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
drivers/iommu/arm/arm-smmu/qcom_iommu.c
drivers/iommu/dma-iommu.c
drivers/iommu/exynos-iommu.c
drivers/iommu/fsl_pamu_domain.c
drivers/iommu/intel/debugfs.c
drivers/iommu/intel/iommu.c
drivers/iommu/intel/iommu.h
drivers/iommu/iommu.c
drivers/iommu/iommufd/selftest.c
drivers/iommu/iova.c
drivers/iommu/ipmmu-vmsa.c
drivers/iommu/msm_iommu.c
drivers/iommu/mtk_iommu.c
drivers/iommu/mtk_iommu_v1.c
drivers/iommu/omap-iommu.c
drivers/iommu/omap-iommu.h
drivers/iommu/rockchip-iommu.c
drivers/iommu/s390-iommu.c
drivers/iommu/sprd-iommu.c
drivers/iommu/sun50i-iommu.c
drivers/iommu/tegra-gart.c [deleted file]
drivers/iommu/tegra-smmu.c
drivers/iommu/virtio-iommu.c
drivers/irqchip/irq-gic-v3.c
drivers/leds/rgb/leds-qcom-lpg.c
drivers/mailbox/bcm-pdc-mailbox.c
drivers/mailbox/imx-mailbox.c
drivers/mailbox/mailbox-sti.c
drivers/mailbox/mtk-cmdq-mailbox.c
drivers/mailbox/ti-msgmgr.c
drivers/media/cec/platform/Makefile
drivers/media/cec/platform/cros-ec/cros-ec-cec.c
drivers/media/common/siano/smsdvb-debugfs.c
drivers/media/common/videobuf2/frame_vector.c
drivers/media/common/videobuf2/videobuf2-core.c
drivers/media/common/videobuf2/videobuf2-dma-contig.c
drivers/media/common/videobuf2/videobuf2-vmalloc.c
drivers/media/dvb-frontends/drx39xyj/drxj.c
drivers/media/dvb-frontends/m88ds3103.c
drivers/media/i2c/Kconfig
drivers/media/i2c/Makefile
drivers/media/i2c/adp1653.c
drivers/media/i2c/adv7180.c
drivers/media/i2c/ar0521.c
drivers/media/i2c/ccs/ccs-core.c
drivers/media/i2c/ccs/ccs-quirk.h
drivers/media/i2c/ccs/ccs.h
drivers/media/i2c/cx25840/cx25840-core.c
drivers/media/i2c/ds90ub913.c
drivers/media/i2c/ds90ub953.c
drivers/media/i2c/ds90ub960.c
drivers/media/i2c/hi556.c
drivers/media/i2c/hi846.c
drivers/media/i2c/hi847.c
drivers/media/i2c/imx208.c
drivers/media/i2c/imx214.c
drivers/media/i2c/imx219.c
drivers/media/i2c/imx258.c
drivers/media/i2c/imx296.c
drivers/media/i2c/imx319.c
drivers/media/i2c/imx334.c
drivers/media/i2c/imx335.c
drivers/media/i2c/imx355.c
drivers/media/i2c/imx412.c
drivers/media/i2c/imx415.c
drivers/media/i2c/max9286.c
drivers/media/i2c/msp3400-driver.c
drivers/media/i2c/mt9m001.c
drivers/media/i2c/mt9m111.c
drivers/media/i2c/mt9m114.c [new file with mode: 0644]
drivers/media/i2c/mt9v011.c
drivers/media/i2c/mt9v032.c
drivers/media/i2c/mt9v111.c
drivers/media/i2c/og01a1b.c
drivers/media/i2c/ov01a10.c
drivers/media/i2c/ov02a10.c
drivers/media/i2c/ov08d10.c
drivers/media/i2c/ov08x40.c
drivers/media/i2c/ov13858.c
drivers/media/i2c/ov13b10.c
drivers/media/i2c/ov2640.c
drivers/media/i2c/ov2659.c
drivers/media/i2c/ov2685.c
drivers/media/i2c/ov2740.c
drivers/media/i2c/ov4689.c
drivers/media/i2c/ov5640.c
drivers/media/i2c/ov5647.c
drivers/media/i2c/ov5670.c
drivers/media/i2c/ov5675.c
drivers/media/i2c/ov5693.c
drivers/media/i2c/ov5695.c
drivers/media/i2c/ov7251.c
drivers/media/i2c/ov7670.c
drivers/media/i2c/ov772x.c
drivers/media/i2c/ov7740.c
drivers/media/i2c/ov8856.c
drivers/media/i2c/ov9282.c
drivers/media/i2c/ov9734.c
drivers/media/i2c/rdacm20.c
drivers/media/i2c/st-vgxy61.c
drivers/media/i2c/tc358746.c
drivers/media/i2c/tvp514x.c
drivers/media/i2c/video-i2c.c
drivers/media/mc/mc-entity.c
drivers/media/pci/Kconfig
drivers/media/pci/Makefile
drivers/media/pci/bt8xx/bttv-cards.c
drivers/media/pci/bt8xx/bttv-driver.c
drivers/media/pci/bt8xx/dvb-bt8xx.c
drivers/media/pci/cobalt/cobalt-driver.c
drivers/media/pci/cobalt/cobalt-v4l2.c
drivers/media/pci/cx18/cx18-driver.h
drivers/media/pci/cx18/cx18-mailbox.c
drivers/media/pci/intel/ivsc/Kconfig
drivers/media/pci/intel/ivsc/mei_ace.c
drivers/media/pci/intel/ivsc/mei_csi.c
drivers/media/pci/mgb4/Kconfig [new file with mode: 0644]
drivers/media/pci/mgb4/Makefile [new file with mode: 0644]
drivers/media/pci/mgb4/mgb4_cmt.c [new file with mode: 0644]
drivers/media/pci/mgb4/mgb4_cmt.h [new file with mode: 0644]
drivers/media/pci/mgb4/mgb4_core.c [new file with mode: 0644]
drivers/media/pci/mgb4/mgb4_core.h [new file with mode: 0644]
drivers/media/pci/mgb4/mgb4_dma.c [new file with mode: 0644]
drivers/media/pci/mgb4/mgb4_dma.h [new file with mode: 0644]
drivers/media/pci/mgb4/mgb4_i2c.c [new file with mode: 0644]
drivers/media/pci/mgb4/mgb4_i2c.h [new file with mode: 0644]
drivers/media/pci/mgb4/mgb4_io.h [new file with mode: 0644]
drivers/media/pci/mgb4/mgb4_regs.c [new file with mode: 0644]
drivers/media/pci/mgb4/mgb4_regs.h [new file with mode: 0644]
drivers/media/pci/mgb4/mgb4_sysfs.h [new file with mode: 0644]
drivers/media/pci/mgb4/mgb4_sysfs_in.c [new file with mode: 0644]
drivers/media/pci/mgb4/mgb4_sysfs_out.c [new file with mode: 0644]
drivers/media/pci/mgb4/mgb4_sysfs_pci.c [new file with mode: 0644]
drivers/media/pci/mgb4/mgb4_trigger.c [new file with mode: 0644]
drivers/media/pci/mgb4/mgb4_trigger.h [new file with mode: 0644]
drivers/media/pci/mgb4/mgb4_vin.c [new file with mode: 0644]
drivers/media/pci/mgb4/mgb4_vin.h [new file with mode: 0644]
drivers/media/pci/mgb4/mgb4_vout.c [new file with mode: 0644]
drivers/media/pci/mgb4/mgb4_vout.h [new file with mode: 0644]
drivers/media/pci/zoran/zoran.h
drivers/media/platform/Kconfig
drivers/media/platform/Makefile
drivers/media/platform/allegro-dvt/allegro-mail.c
drivers/media/platform/allegro-dvt/allegro-mail.h
drivers/media/platform/amphion/vpu_defs.h
drivers/media/platform/amphion/vpu_helpers.c
drivers/media/platform/amphion/vpu_malone.c
drivers/media/platform/amphion/vpu_msgs.c
drivers/media/platform/aspeed/aspeed-video.c
drivers/media/platform/cadence/Kconfig
drivers/media/platform/cadence/cdns-csi2rx.c
drivers/media/platform/cadence/cdns-csi2tx.c
drivers/media/platform/mediatek/jpeg/mtk_jpeg_enc_hw.c
drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_scp.c
drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_fw_vpu.c
drivers/media/platform/mediatek/vcodec/common/mtk_vcodec_util.c
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc.c
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.c
drivers/media/platform/mediatek/vcodec/encoder/mtk_vcodec_enc_pm.h
drivers/media/platform/mediatek/vcodec/encoder/venc_drv_if.c
drivers/media/platform/mediatek/vcodec/encoder/venc_vpu_if.c
drivers/media/platform/microchip/microchip-isc-base.c
drivers/media/platform/nuvoton/Kconfig [new file with mode: 0644]
drivers/media/platform/nuvoton/Makefile [new file with mode: 0644]
drivers/media/platform/nuvoton/npcm-regs.h [new file with mode: 0644]
drivers/media/platform/nuvoton/npcm-video.c [new file with mode: 0644]
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg-hw.h
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.c
drivers/media/platform/nxp/imx-jpeg/mxc-jpeg.h
drivers/media/platform/nxp/imx-mipi-csis.c
drivers/media/platform/nxp/imx8-isi/imx8-isi-core.c
drivers/media/platform/qcom/camss/camss-csid-4-1.c
drivers/media/platform/qcom/camss/camss-csid-4-7.c
drivers/media/platform/qcom/camss/camss-csid-gen2.c
drivers/media/platform/qcom/camss/camss-csid.c
drivers/media/platform/qcom/camss/camss-csid.h
drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
drivers/media/platform/qcom/camss/camss-csiphy.c
drivers/media/platform/qcom/camss/camss-csiphy.h
drivers/media/platform/qcom/camss/camss-ispif.c
drivers/media/platform/qcom/camss/camss-ispif.h
drivers/media/platform/qcom/camss/camss-vfe-170.c
drivers/media/platform/qcom/camss/camss-vfe-4-1.c
drivers/media/platform/qcom/camss/camss-vfe-4-7.c
drivers/media/platform/qcom/camss/camss-vfe-4-8.c
drivers/media/platform/qcom/camss/camss-vfe-480.c
drivers/media/platform/qcom/camss/camss-vfe.c
drivers/media/platform/qcom/camss/camss-vfe.h
drivers/media/platform/qcom/camss/camss-video.c
drivers/media/platform/qcom/camss/camss.c
drivers/media/platform/qcom/camss/camss.h
drivers/media/platform/qcom/venus/core.c
drivers/media/platform/qcom/venus/hfi_cmds.h
drivers/media/platform/qcom/venus/hfi_msgs.c
drivers/media/platform/qcom/venus/hfi_parser.c
drivers/media/platform/qcom/venus/hfi_venus.c
drivers/media/platform/qcom/venus/pm_helpers.c
drivers/media/platform/renesas/rcar-isp.c
drivers/media/platform/renesas/rcar-vin/rcar-csi2.c
drivers/media/platform/renesas/rcar_drif.c
drivers/media/platform/renesas/renesas-ceu.c
drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
drivers/media/platform/rockchip/rkisp1/rkisp1-csi.c
drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c
drivers/media/platform/samsung/exynos4-is/fimc-is.c
drivers/media/platform/samsung/s3c-camif/camif-capture.c
drivers/media/platform/samsung/s5p-mfc/s5p_mfc_enc.c
drivers/media/platform/st/sti/c8sectpfe/c8sectpfe-core.c
drivers/media/platform/ti/Kconfig
drivers/media/platform/ti/Makefile
drivers/media/platform/ti/am437x/am437x-vpfe.c
drivers/media/platform/ti/j721e-csi2rx/Makefile [new file with mode: 0644]
drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c [new file with mode: 0644]
drivers/media/platform/ti/omap3isp/ispstat.c
drivers/media/platform/verisilicon/hantro_drv.c
drivers/media/platform/verisilicon/hantro_postproc.c
drivers/media/platform/verisilicon/rockchip_av1_filmgrain.c
drivers/media/platform/verisilicon/rockchip_vpu_hw.c
drivers/media/platform/xilinx/xilinx-dma.c
drivers/media/radio/radio-isa.c
drivers/media/radio/radio-miropcm20.c
drivers/media/radio/radio-si476x.c
drivers/media/radio/radio-wl1273.c
drivers/media/rc/Kconfig
drivers/media/rc/Makefile
drivers/media/rc/ati_remote.c
drivers/media/rc/imon.c
drivers/media/rc/ir-rx51.c [deleted file]
drivers/media/rc/ir-sharp-decoder.c
drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c
drivers/media/rc/keymaps/rc-alink-dtu-m.c
drivers/media/rc/keymaps/rc-anysee.c
drivers/media/rc/keymaps/rc-apac-viewcomp.c
drivers/media/rc/keymaps/rc-astrometa-t2hybrid.c
drivers/media/rc/keymaps/rc-asus-pc39.c
drivers/media/rc/keymaps/rc-asus-ps3-100.c
drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c
drivers/media/rc/keymaps/rc-ati-x10.c
drivers/media/rc/keymaps/rc-avermedia-a16d.c
drivers/media/rc/keymaps/rc-avermedia-cardbus.c
drivers/media/rc/keymaps/rc-avermedia-dvbt.c
drivers/media/rc/keymaps/rc-avermedia-m135a.c
drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c
drivers/media/rc/keymaps/rc-avermedia-rm-ks.c
drivers/media/rc/keymaps/rc-avermedia.c
drivers/media/rc/keymaps/rc-avertv-303.c
drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c
drivers/media/rc/keymaps/rc-beelink-gs1.c
drivers/media/rc/keymaps/rc-beelink-mxiii.c
drivers/media/rc/keymaps/rc-behold-columbus.c
drivers/media/rc/keymaps/rc-behold.c
drivers/media/rc/keymaps/rc-budget-ci-old.c
drivers/media/rc/keymaps/rc-cinergy-1400.c
drivers/media/rc/keymaps/rc-cinergy.c
drivers/media/rc/keymaps/rc-ct-90405.c
drivers/media/rc/keymaps/rc-d680-dmb.c
drivers/media/rc/keymaps/rc-dib0700-nec.c
drivers/media/rc/keymaps/rc-dib0700-rc5.c
drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c
drivers/media/rc/keymaps/rc-digittrade.c
drivers/media/rc/keymaps/rc-dm1105-nec.c
drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c
drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c
drivers/media/rc/keymaps/rc-dreambox.c
drivers/media/rc/keymaps/rc-dtt200u.c
drivers/media/rc/keymaps/rc-dvbsky.c
drivers/media/rc/keymaps/rc-dvico-mce.c
drivers/media/rc/keymaps/rc-dvico-portable.c
drivers/media/rc/keymaps/rc-em-terratec.c
drivers/media/rc/keymaps/rc-encore-enltv-fm53.c
drivers/media/rc/keymaps/rc-encore-enltv.c
drivers/media/rc/keymaps/rc-encore-enltv2.c
drivers/media/rc/keymaps/rc-evga-indtube.c
drivers/media/rc/keymaps/rc-eztv.c
drivers/media/rc/keymaps/rc-flydvb.c
drivers/media/rc/keymaps/rc-flyvideo.c
drivers/media/rc/keymaps/rc-fusionhdtv-mce.c
drivers/media/rc/keymaps/rc-gadmei-rm008z.c
drivers/media/rc/keymaps/rc-geekbox.c
drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c
drivers/media/rc/keymaps/rc-gotview7135.c
drivers/media/rc/keymaps/rc-hauppauge.c
drivers/media/rc/keymaps/rc-hisi-poplar.c
drivers/media/rc/keymaps/rc-hisi-tv-demo.c
drivers/media/rc/keymaps/rc-imon-mce.c
drivers/media/rc/keymaps/rc-imon-pad.c
drivers/media/rc/keymaps/rc-imon-rsc.c
drivers/media/rc/keymaps/rc-iodata-bctv7e.c
drivers/media/rc/keymaps/rc-it913x-v1.c
drivers/media/rc/keymaps/rc-it913x-v2.c
drivers/media/rc/keymaps/rc-kaiomy.c
drivers/media/rc/keymaps/rc-khadas.c
drivers/media/rc/keymaps/rc-khamsin.c
drivers/media/rc/keymaps/rc-kworld-315u.c
drivers/media/rc/keymaps/rc-kworld-pc150u.c
drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c
drivers/media/rc/keymaps/rc-leadtek-y04g0051.c
drivers/media/rc/keymaps/rc-lme2510.c
drivers/media/rc/keymaps/rc-manli.c
drivers/media/rc/keymaps/rc-mecool-kii-pro.c
drivers/media/rc/keymaps/rc-mecool-kiii-pro.c
drivers/media/rc/keymaps/rc-medion-x10.c
drivers/media/rc/keymaps/rc-minix-neo.c
drivers/media/rc/keymaps/rc-msi-digivox-ii.c
drivers/media/rc/keymaps/rc-msi-digivox-iii.c
drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c
drivers/media/rc/keymaps/rc-msi-tvanywhere.c
drivers/media/rc/keymaps/rc-nebula.c
drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c
drivers/media/rc/keymaps/rc-norwood.c
drivers/media/rc/keymaps/rc-npgtech.c
drivers/media/rc/keymaps/rc-odroid.c
drivers/media/rc/keymaps/rc-pctv-sedna.c
drivers/media/rc/keymaps/rc-pine64.c
drivers/media/rc/keymaps/rc-pinnacle-color.c
drivers/media/rc/keymaps/rc-pinnacle-grey.c
drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
drivers/media/rc/keymaps/rc-pixelview-002t.c
drivers/media/rc/keymaps/rc-pixelview-mk12.c
drivers/media/rc/keymaps/rc-pixelview-new.c
drivers/media/rc/keymaps/rc-pixelview.c
drivers/media/rc/keymaps/rc-powercolor-real-angel.c
drivers/media/rc/keymaps/rc-proteus-2309.c
drivers/media/rc/keymaps/rc-purpletv.c
drivers/media/rc/keymaps/rc-pv951.c
drivers/media/rc/keymaps/rc-rc6-mce.c
drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c
drivers/media/rc/keymaps/rc-reddo.c
drivers/media/rc/keymaps/rc-snapstream-firefly.c
drivers/media/rc/keymaps/rc-streamzap.c
drivers/media/rc/keymaps/rc-su3000.c
drivers/media/rc/keymaps/rc-tanix-tx3mini.c
drivers/media/rc/keymaps/rc-tanix-tx5max.c
drivers/media/rc/keymaps/rc-tbs-nec.c
drivers/media/rc/keymaps/rc-technisat-ts35.c
drivers/media/rc/keymaps/rc-technisat-usb2.c
drivers/media/rc/keymaps/rc-terratec-cinergy-c-pci.c
drivers/media/rc/keymaps/rc-terratec-cinergy-s2-hd.c
drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c
drivers/media/rc/keymaps/rc-terratec-slim-2.c
drivers/media/rc/keymaps/rc-terratec-slim.c
drivers/media/rc/keymaps/rc-tevii-nec.c
drivers/media/rc/keymaps/rc-tivo.c
drivers/media/rc/keymaps/rc-total-media-in-hand-02.c
drivers/media/rc/keymaps/rc-total-media-in-hand.c
drivers/media/rc/keymaps/rc-trekstor.c
drivers/media/rc/keymaps/rc-tt-1500.c
drivers/media/rc/keymaps/rc-twinhan-dtv-cab-ci.c
drivers/media/rc/keymaps/rc-twinhan1027.c
drivers/media/rc/keymaps/rc-vega-s9x.c
drivers/media/rc/keymaps/rc-videomate-m1f.c
drivers/media/rc/keymaps/rc-videomate-s350.c
drivers/media/rc/keymaps/rc-videomate-tv-pvr.c
drivers/media/rc/keymaps/rc-videostrong-kii-pro.c
drivers/media/rc/keymaps/rc-wetek-hub.c
drivers/media/rc/keymaps/rc-wetek-play2.c
drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c
drivers/media/rc/keymaps/rc-winfast.c
drivers/media/rc/keymaps/rc-x96max.c
drivers/media/rc/keymaps/rc-xbox-360.c
drivers/media/rc/keymaps/rc-xbox-dvd.c
drivers/media/rc/keymaps/rc-zx-irdec.c
drivers/media/rc/lirc_dev.c
drivers/media/rc/meson-ir.c
drivers/media/rc/pwm-ir-tx.c
drivers/media/test-drivers/vidtv/vidtv_mux.c
drivers/media/test-drivers/vidtv/vidtv_psi.c
drivers/media/test-drivers/vivid/vivid-core.c
drivers/media/test-drivers/vivid/vivid-rds-gen.c
drivers/media/usb/cx231xx/cx231xx-417.c
drivers/media/usb/cx231xx/cx231xx-core.c
drivers/media/usb/cx231xx/cx231xx.h
drivers/media/usb/dvb-usb-v2/af9035.c
drivers/media/usb/dvb-usb/gp8psk.c
drivers/media/usb/gspca/cpia1.c
drivers/media/usb/siano/smsusb.c
drivers/media/v4l2-core/Kconfig
drivers/media/v4l2-core/Makefile
drivers/media/v4l2-core/v4l2-event.c
drivers/media/v4l2-core/v4l2-ioctl.c
drivers/media/v4l2-core/v4l2-subdev.c
drivers/media/v4l2-core/videobuf-core.c [deleted file]
drivers/media/v4l2-core/videobuf-dma-contig.c [deleted file]
drivers/media/v4l2-core/videobuf-dma-sg.c [deleted file]
drivers/media/v4l2-core/videobuf-vmalloc.c [deleted file]
drivers/memory/tegra/mc.c
drivers/memory/tegra/tegra20.c
drivers/misc/eeprom/at24.c
drivers/misc/lkdtm/cfi.c
drivers/mmc/core/block.c
drivers/mmc/core/card.h
drivers/mmc/core/mmc.c
drivers/mmc/core/quirks.h
drivers/mmc/host/sdhci-pci-gli.c
drivers/mmc/host/sdhci_am654.c
drivers/mmc/host/vub300.c
drivers/mtd/chips/cfi_cmdset_0001.c
drivers/mtd/chips/map_ram.c
drivers/mtd/devices/bcm47xxsflash.c
drivers/mtd/devices/docg3.c
drivers/mtd/devices/phram.c
drivers/mtd/devices/powernv_flash.c
drivers/mtd/devices/spear_smi.c
drivers/mtd/devices/st_spi_fsm.c
drivers/mtd/hyperbus/hbmc-am654.c
drivers/mtd/hyperbus/rpc-if.c
drivers/mtd/lpddr/lpddr2_nvm.c
drivers/mtd/lpddr/lpddr_cmds.c
drivers/mtd/maps/lantiq-flash.c
drivers/mtd/maps/physmap-core.c
drivers/mtd/maps/plat-ram.c
drivers/mtd/maps/pxa2xx-flash.c
drivers/mtd/maps/sa1100-flash.c
drivers/mtd/maps/sun_uflash.c
drivers/mtd/mtdcore.c
drivers/mtd/mtdpart.c
drivers/mtd/nand/raw/arasan-nand-controller.c
drivers/mtd/nand/raw/atmel/nand-controller.c
drivers/mtd/nand/raw/cadence-nand-controller.c
drivers/mtd/nand/raw/denali.h
drivers/mtd/nand/raw/ingenic/ingenic_nand_drv.c
drivers/mtd/nand/raw/intel-nand-controller.c
drivers/mtd/nand/raw/internals.h
drivers/mtd/nand/raw/marvell_nand.c
drivers/mtd/nand/raw/meson_nand.c
drivers/mtd/nand/raw/mtk_nand.c
drivers/mtd/nand/raw/nand_base.c
drivers/mtd/nand/raw/omap2.c
drivers/mtd/nand/raw/renesas-nand-controller.c
drivers/mtd/nand/raw/rockchip-nand-controller.c
drivers/mtd/nand/raw/sh_flctl.c
drivers/mtd/nand/raw/sunxi_nand.c
drivers/mtd/nand/raw/tegra_nand.c
drivers/mtd/nand/raw/vf610_nfc.c
drivers/mtd/nand/raw/xway_nand.c
drivers/mtd/nand/spi/Makefile
drivers/mtd/nand/spi/core.c
drivers/mtd/nand/spi/foresee.c [new file with mode: 0644]
drivers/mtd/nand/spi/winbond.c
drivers/mtd/nand/spi/xtx.c
drivers/mtd/parsers/Kconfig
drivers/mtd/parsers/Makefile
drivers/mtd/parsers/ar7part.c [deleted file]
drivers/mtd/spi-nor/Makefile
drivers/mtd/spi-nor/atmel.c
drivers/mtd/spi-nor/catalyst.c [deleted file]
drivers/mtd/spi-nor/controllers/hisi-sfc.c
drivers/mtd/spi-nor/controllers/nxp-spifi.c
drivers/mtd/spi-nor/core.c
drivers/mtd/spi-nor/core.h
drivers/mtd/spi-nor/eon.c
drivers/mtd/spi-nor/esmt.c
drivers/mtd/spi-nor/everspin.c
drivers/mtd/spi-nor/fujitsu.c [deleted file]
drivers/mtd/spi-nor/gigadevice.c
drivers/mtd/spi-nor/intel.c
drivers/mtd/spi-nor/issi.c
drivers/mtd/spi-nor/macronix.c
drivers/mtd/spi-nor/micron-st.c
drivers/mtd/spi-nor/spansion.c
drivers/mtd/spi-nor/sst.c
drivers/mtd/spi-nor/swp.c
drivers/mtd/spi-nor/sysfs.c
drivers/mtd/spi-nor/winbond.c
drivers/mtd/spi-nor/xilinx.c
drivers/mtd/spi-nor/xmc.c
drivers/mtd/ubi/block.c
drivers/mtd/ubi/build.c
drivers/mtd/ubi/cdev.c
drivers/mtd/ubi/eba.c
drivers/mtd/ubi/fastmap-wl.c
drivers/mtd/ubi/fastmap.c
drivers/mtd/ubi/ubi.h
drivers/mtd/ubi/wl.c
drivers/mtd/ubi/wl.h
drivers/net/dsa/lan9303_mdio.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/freescale/enetc/enetc.c
drivers/net/ethernet/intel/i40e/i40e_devlink.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/ice/ice_lag.c
drivers/net/ethernet/intel/ice/ice_tc_lib.c
drivers/net/ethernet/intel/idpf/idpf_txrx.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
drivers/net/ethernet/ti/am65-cpsw-nuss.c
drivers/net/ethernet/ti/icssg/icss_iep.c
drivers/net/ethernet/xscale/ixp4xx_eth.c
drivers/net/mdio/acpi_mdio.c
drivers/net/mdio/fwnode_mdio.c
drivers/net/mdio/mdio-aspeed.c
drivers/net/mdio/mdio-bitbang.c
drivers/net/mdio/of_mdio.c
drivers/net/phy/bcm-phy-ptp.c
drivers/net/phy/bcm87xx.c
drivers/net/phy/phylink.c
drivers/net/phy/sfp.c
drivers/net/ppp/ppp_generic.c
drivers/nvme/Makefile
drivers/nvme/common/Kconfig
drivers/nvme/common/Makefile
drivers/nvme/common/auth.c
drivers/nvme/common/keyring.c
drivers/nvme/host/Kconfig
drivers/nvme/host/auth.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/ioctl.c
drivers/nvme/host/tcp.c
drivers/nvme/target/Kconfig
drivers/nvme/target/fabrics-cmd-auth.c
drivers/nvme/target/loop.c
drivers/of/property.c
drivers/pci/pcie/Kconfig
drivers/pci/pcie/aer.c
drivers/pcmcia/cs.c
drivers/pcmcia/ds.c
drivers/pcmcia/pcmcia_resource.c
drivers/pcmcia/tcic.c
drivers/perf/arm_cspmu/arm_cspmu.c
drivers/perf/arm_pmuv3.c
drivers/perf/riscv_pmu_sbi.c
drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
drivers/pinctrl/cirrus/pinctrl-cs42l43.c
drivers/pinctrl/cirrus/pinctrl-lochnagar.c
drivers/pinctrl/core.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/intel/pinctrl-intel.c
drivers/pinctrl/intel/pinctrl-lynxpoint.c
drivers/pinctrl/mediatek/pinctrl-moore.c
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
drivers/pinctrl/mediatek/pinctrl-paris.c
drivers/pinctrl/nuvoton/pinctrl-npcm7xx.c
drivers/pinctrl/nuvoton/pinctrl-npcm8xx.c
drivers/pinctrl/pinctrl-as3722.c
drivers/pinctrl/pinctrl-axp209.c
drivers/pinctrl/pinctrl-cy8c95x0.c
drivers/pinctrl/pinctrl-ingenic.c
drivers/pinctrl/pinctrl-ocelot.c
drivers/pinctrl/pinctrl-rk805.c
drivers/pinctrl/pinctrl-st.c
drivers/pinctrl/renesas/gpio.c
drivers/pinctrl/renesas/pinctrl-rzg2l.c
drivers/pinctrl/renesas/pinctrl-rzv2m.c
drivers/pinctrl/spear/pinctrl-plgpio.c
drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c
drivers/pinctrl/starfive/pinctrl-starfive-jh7110.c
drivers/pinctrl/stm32/pinctrl-stm32.c
drivers/pinctrl/vt8500/pinctrl-wmt.c
drivers/ptp/ptp_chardev.c
drivers/ptp/ptp_clock.c
drivers/ptp/ptp_private.h
drivers/pwm/Kconfig
drivers/pwm/Makefile
drivers/pwm/core.c
drivers/pwm/pwm-ab8500.c
drivers/pwm/pwm-apple.c
drivers/pwm/pwm-atmel-hlcdc.c
drivers/pwm/pwm-atmel-tcb.c
drivers/pwm/pwm-atmel.c
drivers/pwm/pwm-bcm-iproc.c
drivers/pwm/pwm-bcm-kona.c
drivers/pwm/pwm-bcm2835.c
drivers/pwm/pwm-berlin.c
drivers/pwm/pwm-brcmstb.c
drivers/pwm/pwm-clk.c
drivers/pwm/pwm-clps711x.c
drivers/pwm/pwm-crc.c
drivers/pwm/pwm-cros-ec.c
drivers/pwm/pwm-dwc-core.c [new file with mode: 0644]
drivers/pwm/pwm-dwc.c
drivers/pwm/pwm-dwc.h [new file with mode: 0644]
drivers/pwm/pwm-ep93xx.c
drivers/pwm/pwm-fsl-ftm.c
drivers/pwm/pwm-hibvt.c
drivers/pwm/pwm-img.c
drivers/pwm/pwm-imx-tpm.c
drivers/pwm/pwm-imx1.c
drivers/pwm/pwm-imx27.c
drivers/pwm/pwm-intel-lgm.c
drivers/pwm/pwm-iqs620a.c
drivers/pwm/pwm-jz4740.c
drivers/pwm/pwm-keembay.c
drivers/pwm/pwm-lp3943.c
drivers/pwm/pwm-lpc18xx-sct.c
drivers/pwm/pwm-lpc32xx.c
drivers/pwm/pwm-lpss.c
drivers/pwm/pwm-mediatek.c
drivers/pwm/pwm-meson.c
drivers/pwm/pwm-microchip-core.c
drivers/pwm/pwm-mtk-disp.c
drivers/pwm/pwm-mxs.c
drivers/pwm/pwm-ntxec.c
drivers/pwm/pwm-omap-dmtimer.c
drivers/pwm/pwm-pca9685.c
drivers/pwm/pwm-pxa.c
drivers/pwm/pwm-raspberrypi-poe.c
drivers/pwm/pwm-rcar.c
drivers/pwm/pwm-renesas-tpu.c
drivers/pwm/pwm-rockchip.c
drivers/pwm/pwm-rz-mtu3.c
drivers/pwm/pwm-samsung.c
drivers/pwm/pwm-sifive.c
drivers/pwm/pwm-sl28cpld.c
drivers/pwm/pwm-spear.c
drivers/pwm/pwm-sprd.c
drivers/pwm/pwm-sti.c
drivers/pwm/pwm-stm32-lp.c
drivers/pwm/pwm-stm32.c
drivers/pwm/pwm-stmpe.c
drivers/pwm/pwm-sun4i.c
drivers/pwm/pwm-sunplus.c
drivers/pwm/pwm-tegra.c
drivers/pwm/pwm-tiecap.c
drivers/pwm/pwm-tiehrpwm.c
drivers/pwm/pwm-twl-led.c
drivers/pwm/pwm-twl.c
drivers/pwm/pwm-visconti.c
drivers/pwm/pwm-vt8500.c
drivers/pwm/pwm-xilinx.c
drivers/remoteproc/mtk_common.h
drivers/remoteproc/mtk_scp.c
drivers/remoteproc/mtk_scp_ipi.c
drivers/remoteproc/qcom_q6v5_mss.c
drivers/remoteproc/qcom_q6v5_pas.c
drivers/remoteproc/st_remoteproc.c
drivers/remoteproc/stm32_rproc.c
drivers/remoteproc/xlnx_r5_remoteproc.c
drivers/rpmsg/rpmsg_core.c
drivers/rpmsg/rpmsg_ns.c
drivers/rpmsg/virtio_rpmsg_bus.c
drivers/rtc/Kconfig
drivers/rtc/Makefile
drivers/rtc/rtc-at91rm9200.c
drivers/rtc/rtc-brcmstb-waketimer.c
drivers/rtc/rtc-efi.c
drivers/rtc/rtc-ep93xx.c
drivers/rtc/rtc-imxdi.c
drivers/rtc/rtc-mv.c
drivers/rtc/rtc-omap.c
drivers/rtc/rtc-pcap.c
drivers/rtc/rtc-pcf85363.c
drivers/rtc/rtc-pxa.c
drivers/rtc/rtc-r7301.c
drivers/rtc/rtc-sh.c
drivers/rtc/rtc-ssd202d.c [new file with mode: 0644]
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_bus.h
drivers/s390/crypto/ap_queue.c
drivers/s390/crypto/zcrypt_card.c
drivers/s390/crypto/zcrypt_queue.c
drivers/s390/net/qeth_core_main.c
drivers/spi/Kconfig
drivers/spi/spi.c
drivers/staging/greybus/pwm.c
drivers/staging/media/atomisp/Kconfig
drivers/staging/media/atomisp/Makefile
drivers/staging/media/atomisp/TODO
drivers/staging/media/atomisp/i2c/Kconfig
drivers/staging/media/atomisp/i2c/Makefile
drivers/staging/media/atomisp/i2c/atomisp-gc0310.c
drivers/staging/media/atomisp/i2c/ov5693/Makefile [deleted file]
drivers/staging/media/atomisp/i2c/ov5693/ad5823.h [deleted file]
drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c [deleted file]
drivers/staging/media/atomisp/i2c/ov5693/ov5693.h [deleted file]
drivers/staging/media/atomisp/include/linux/atomisp.h
drivers/staging/media/atomisp/pci/atomisp_cmd.c
drivers/staging/media/atomisp/pci/atomisp_cmd.h
drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
drivers/staging/media/atomisp/pci/atomisp_csi2_bridge.c
drivers/staging/media/atomisp/pci/atomisp_fops.c
drivers/staging/media/atomisp/pci/atomisp_ioctl.c
drivers/staging/media/atomisp/pci/atomisp_subdev.c
drivers/staging/media/atomisp/pci/atomisp_subdev.h
drivers/staging/media/atomisp/pci/atomisp_v4l2.c
drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_local.h
drivers/staging/media/atomisp/pci/css_2401_system/host/isys_irq_private.h
drivers/staging/media/atomisp/pci/css_2401_system/isys_irq_global.h
drivers/staging/media/atomisp/pci/hive_isp_css_common/debug_global.h
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma.c
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/dma_local.h
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_formatter.c
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/input_system.c
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq.c
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/irq_local.h
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp.c
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/isp_local.h
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp.c
drivers/staging/media/atomisp/pci/hive_isp_css_common/host/sp_local.h
drivers/staging/media/atomisp/pci/hive_isp_css_include/host/csi_rx_public.h
drivers/staging/media/atomisp/pci/hive_isp_css_include/host/dma_public.h
drivers/staging/media/atomisp/pci/hive_isp_css_include/host/irq_public.h
drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isp_public.h
drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_dma_public.h
drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_irq_public.h
drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_public.h [deleted file]
drivers/staging/media/atomisp/pci/hive_isp_css_include/host/pixelgen_public.h
drivers/staging/media/atomisp/pci/hive_isp_css_include/host/sp_public.h
drivers/staging/media/atomisp/pci/hive_isp_css_include/isys_irq.h
drivers/staging/media/atomisp/pci/ia_css_acc_types.h
drivers/staging/media/atomisp/pci/ia_css_mipi.h
drivers/staging/media/atomisp/pci/input_system_local.h
drivers/staging/media/atomisp/pci/input_system_private.h
drivers/staging/media/atomisp/pci/input_system_public.h
drivers/staging/media/atomisp/pci/isp/kernels/raw/raw_1.0/ia_css_raw.host.c
drivers/staging/media/atomisp/pci/isp2400_input_system_local.h
drivers/staging/media/atomisp/pci/isp2400_input_system_private.h
drivers/staging/media/atomisp/pci/isp2400_input_system_public.h
drivers/staging/media/atomisp/pci/isp2401_input_system_local.h
drivers/staging/media/atomisp/pci/isp2401_input_system_private.h
drivers/staging/media/atomisp/pci/runtime/binary/src/binary.c
drivers/staging/media/atomisp/pci/runtime/debug/interface/ia_css_debug.h
drivers/staging/media/atomisp/pci/runtime/debug/src/ia_css_debug.c
drivers/staging/media/atomisp/pci/runtime/ifmtr/src/ifmtr.c
drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys.h
drivers/staging/media/atomisp/pci/runtime/isys/interface/ia_css_isys_comm.h
drivers/staging/media/atomisp/pci/runtime/isys/src/csi_rx_rmgr.c
drivers/staging/media/atomisp/pci/runtime/isys/src/isys_dma_rmgr.c
drivers/staging/media/atomisp/pci/runtime/isys/src/isys_init.c
drivers/staging/media/atomisp/pci/runtime/isys/src/isys_stream2mmio_rmgr.c
drivers/staging/media/atomisp/pci/runtime/isys/src/rx.c
drivers/staging/media/atomisp/pci/runtime/isys/src/virtual_isys.c
drivers/staging/media/atomisp/pci/runtime/pipeline/interface/ia_css_pipeline.h
drivers/staging/media/atomisp/pci/runtime/pipeline/src/pipeline.c
drivers/staging/media/atomisp/pci/sh_css.c
drivers/staging/media/atomisp/pci/sh_css_internal.h
drivers/staging/media/atomisp/pci/sh_css_mipi.c
drivers/staging/media/atomisp/pci/sh_css_params.c
drivers/staging/media/atomisp/pci/sh_css_sp.c
drivers/staging/media/atomisp/pci/sh_css_sp.h
drivers/staging/media/deprecated/atmel/atmel-isc-base.c
drivers/staging/media/ipu3/ipu3-css-params.c
drivers/staging/media/ipu3/ipu3.c
drivers/staging/media/omap4iss/iss_csi2.c
drivers/staging/media/sunxi/cedrus/cedrus.c
drivers/staging/media/sunxi/cedrus/cedrus_hw.c
drivers/staging/media/tegra-video/csi.c
drivers/staging/media/tegra-video/vip.c
drivers/tty/serial/8250/8250_port.c
drivers/vdpa/mlx5/core/mlx5_vdpa.h
drivers/vdpa/mlx5/core/mr.c
drivers/vdpa/mlx5/core/resources.c
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vdpa/vdpa_sim/vdpa_sim.c
drivers/vdpa/vdpa_user/vduse_dev.c
drivers/vhost/scsi.c
drivers/vhost/vdpa.c
drivers/video/fbdev/amifb.c
drivers/video/fbdev/atmel_lcdfb.c
drivers/video/fbdev/fsl-diu-fb.c
drivers/video/fbdev/hyperv_fb.c
drivers/video/fbdev/imsttfb.c
drivers/video/fbdev/offb.c
drivers/video/fbdev/omap/omapfb_main.c
drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c
drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c
drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c
drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c
drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
drivers/video/fbdev/omap2/omapfb/vrfb.c
drivers/video/fbdev/via/viafbdev.c
drivers/virt/Kconfig
drivers/virt/Makefile
drivers/virt/coco/Kconfig [new file with mode: 0644]
drivers/virt/coco/Makefile [new file with mode: 0644]
drivers/virt/coco/sev-guest/Kconfig
drivers/virt/coco/sev-guest/sev-guest.c
drivers/virt/coco/tdx-guest/Kconfig
drivers/virt/coco/tdx-guest/tdx-guest.c
drivers/virt/coco/tsm.c [new file with mode: 0644]
drivers/virtio/virtio_balloon.c
drivers/virtio/virtio_pci_modern.c
drivers/virtio/virtio_pci_modern_dev.c
drivers/virtio/virtio_vdpa.c
drivers/vlynq/Kconfig [deleted file]
drivers/vlynq/Makefile [deleted file]
drivers/vlynq/vlynq.c [deleted file]
drivers/watchdog/Kconfig
drivers/watchdog/Makefile
drivers/watchdog/apple_wdt.c
drivers/watchdog/ar7_wdt.c [deleted file]
drivers/watchdog/aspeed_wdt.c
drivers/watchdog/at91sam9_wdt.c
drivers/watchdog/ath79_wdt.c
drivers/watchdog/gpio_wdt.c
drivers/watchdog/imx7ulp_wdt.c
drivers/watchdog/imx_sc_wdt.c
drivers/watchdog/it87_wdt.c
drivers/watchdog/ixp4xx_wdt.c
drivers/watchdog/marvell_gti_wdt.c
drivers/watchdog/mlx_wdt.c
drivers/watchdog/of_xilinx_wdt.c
drivers/watchdog/sbsa_gwdt.c
drivers/watchdog/st_lpc_wdt.c
drivers/watchdog/sunplus_wdt.c
drivers/watchdog/wdat_wdt.c
fs/9p/v9fs.c
fs/9p/v9fs_vfs.h
fs/9p/xattr.c
fs/affs/namei.c
fs/bcachefs/Kconfig
fs/bcachefs/Makefile
fs/bcachefs/alloc_background.c
fs/bcachefs/alloc_background.h
fs/bcachefs/alloc_foreground.c
fs/bcachefs/backpointers.c
fs/bcachefs/backpointers.h
fs/bcachefs/bbpos.h
fs/bcachefs/bbpos_types.h [new file with mode: 0644]
fs/bcachefs/bcachefs.h
fs/bcachefs/bcachefs_format.h
fs/bcachefs/bkey.h
fs/bcachefs/bkey_methods.c
fs/bcachefs/bkey_methods.h
fs/bcachefs/bkey_sort.c
fs/bcachefs/btree_cache.c
fs/bcachefs/btree_cache.h
fs/bcachefs/btree_gc.c
fs/bcachefs/btree_io.c
fs/bcachefs/btree_iter.c
fs/bcachefs/btree_iter.h
fs/bcachefs/btree_key_cache.c
fs/bcachefs/btree_locking.c
fs/bcachefs/btree_locking.h
fs/bcachefs/btree_trans_commit.c
fs/bcachefs/btree_types.h
fs/bcachefs/btree_update_interior.c
fs/bcachefs/btree_update_interior.h
fs/bcachefs/buckets.c
fs/bcachefs/buckets.h
fs/bcachefs/chardev.c
fs/bcachefs/compress.c
fs/bcachefs/compress.h
fs/bcachefs/darray.h
fs/bcachefs/data_update.c
fs/bcachefs/data_update.h
fs/bcachefs/debug.c
fs/bcachefs/dirent.c
fs/bcachefs/dirent.h
fs/bcachefs/disk_groups.c
fs/bcachefs/disk_groups.h
fs/bcachefs/disk_groups_types.h [new file with mode: 0644]
fs/bcachefs/ec.c
fs/bcachefs/ec.h
fs/bcachefs/errcode.h
fs/bcachefs/error.c
fs/bcachefs/error.h
fs/bcachefs/extents.c
fs/bcachefs/extents.h
fs/bcachefs/fs-common.c
fs/bcachefs/fs-io-buffered.c
fs/bcachefs/fs-io-direct.c
fs/bcachefs/fs-ioctl.c
fs/bcachefs/fs-ioctl.h
fs/bcachefs/fs.c
fs/bcachefs/fsck.c
fs/bcachefs/fsck.h
fs/bcachefs/inode.c
fs/bcachefs/inode.h
fs/bcachefs/io_misc.c
fs/bcachefs/io_misc.h
fs/bcachefs/io_read.c
fs/bcachefs/io_write.c
fs/bcachefs/journal.c
fs/bcachefs/journal.h
fs/bcachefs/journal_io.c
fs/bcachefs/lru.c
fs/bcachefs/lru.h
fs/bcachefs/move.c
fs/bcachefs/move.h
fs/bcachefs/move_types.h
fs/bcachefs/movinggc.c
fs/bcachefs/opts.c
fs/bcachefs/opts.h
fs/bcachefs/printbuf.c
fs/bcachefs/quota.c
fs/bcachefs/quota.h
fs/bcachefs/rebalance.c
fs/bcachefs/rebalance.h
fs/bcachefs/rebalance_types.h
fs/bcachefs/recovery.c
fs/bcachefs/recovery_types.h
fs/bcachefs/reflink.c
fs/bcachefs/reflink.h
fs/bcachefs/replicas.c
fs/bcachefs/sb-clean.c
fs/bcachefs/sb-errors.c [new file with mode: 0644]
fs/bcachefs/sb-errors.h [new file with mode: 0644]
fs/bcachefs/sb-errors_types.h [new file with mode: 0644]
fs/bcachefs/sb-members.c
fs/bcachefs/sb-members.h
fs/bcachefs/six.c
fs/bcachefs/snapshot.c
fs/bcachefs/snapshot.h
fs/bcachefs/subvolume.c
fs/bcachefs/subvolume.h
fs/bcachefs/super-io.c
fs/bcachefs/super-io.h
fs/bcachefs/super.c
fs/bcachefs/super_types.h
fs/bcachefs/sysfs.c
fs/bcachefs/trace.c
fs/bcachefs/trace.h
fs/bcachefs/util.c
fs/bcachefs/util.h
fs/bcachefs/xattr.c
fs/bcachefs/xattr.h
fs/befs/linuxvfs.c
fs/ceph/acl.c
fs/ceph/addr.c
fs/ceph/cache.c
fs/ceph/caps.c
fs/ceph/crypto.c
fs/ceph/debugfs.c
fs/ceph/dir.c
fs/ceph/export.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/ioctl.c
fs/ceph/locks.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/mdsmap.c
fs/ceph/mdsmap.h [new file with mode: 0644]
fs/ceph/metric.c
fs/ceph/quota.c
fs/ceph/snap.c
fs/ceph/super.c
fs/ceph/super.h
fs/ceph/xattr.c
fs/efivarfs/super.c
fs/efs/super.c
fs/erofs/super.c
fs/exfat/file.c
fs/exfat/inode.c
fs/exportfs/expfs.c
fs/ext2/super.c
fs/ext4/super.c
fs/f2fs/compress.c
fs/f2fs/data.c
fs/f2fs/extent_cache.c
fs/f2fs/file.c
fs/f2fs/inode.c
fs/f2fs/node.c
fs/f2fs/segment.c
fs/f2fs/segment.h
fs/f2fs/super.c
fs/f2fs/xattr.c
fs/fat/nfs.c
fs/fhandle.c
fs/freevxfs/vxfs_super.c
fs/fuse/inode.c
fs/gfs2/acl.h
fs/gfs2/aops.c
fs/gfs2/aops.h
fs/gfs2/bmap.c
fs/gfs2/bmap.h
fs/gfs2/dir.c
fs/gfs2/dir.h
fs/gfs2/file.c
fs/gfs2/glock.c
fs/gfs2/glock.h
fs/gfs2/glops.c
fs/gfs2/glops.h
fs/gfs2/incore.h
fs/gfs2/inode.c
fs/gfs2/inode.h
fs/gfs2/log.h
fs/gfs2/lops.h
fs/gfs2/meta_io.h
fs/gfs2/ops_fstype.c
fs/gfs2/quota.c
fs/gfs2/quota.h
fs/gfs2/recovery.h
fs/gfs2/rgrp.c
fs/gfs2/rgrp.h
fs/gfs2/super.c
fs/gfs2/super.h
fs/gfs2/trans.h
fs/gfs2/util.h
fs/gfs2/xattr.c
fs/gfs2/xattr.h
fs/hugetlbfs/inode.c
fs/jffs2/super.c
fs/jfs/super.c
fs/libfs.c
fs/mnt_idmapping.c
fs/nfs/Kconfig
fs/nfs/delegation.c
fs/nfs/delegation.h
fs/nfs/dir.c
fs/nfs/nfs3proc.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4proc.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfs/proc.c
fs/nfs/super.c
fs/nfs/write.c
fs/nfsd/export.c
fs/notify/fanotify/fanotify_user.c
fs/ntfs/namei.c
fs/ntfs3/super.c
fs/overlayfs/Makefile
fs/overlayfs/copy_up.c
fs/overlayfs/dir.c
fs/overlayfs/export.c
fs/overlayfs/file.c
fs/overlayfs/inode.c
fs/overlayfs/namei.c
fs/overlayfs/overlayfs.h
fs/overlayfs/params.c
fs/overlayfs/params.h
fs/overlayfs/readdir.c
fs/overlayfs/super.c
fs/overlayfs/util.c
fs/overlayfs/xattrs.c [new file with mode: 0644]
fs/smb/client/export.c
fs/smb/server/smb_common.c
fs/smb/server/smbacl.c
fs/smb/server/vfs.c
fs/squashfs/export.c
fs/super.c
fs/ubifs/dir.c
fs/ubifs/file.c
fs/ubifs/journal.c
fs/ubifs/super.c
fs/ubifs/tnc.c
fs/ufs/super.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_format.h
fs/xfs/libxfs/xfs_rtbitmap.c
fs/xfs/libxfs/xfs_rtbitmap.h [new file with mode: 0644]
fs/xfs/libxfs/xfs_sb.c
fs/xfs/libxfs/xfs_sb.h
fs/xfs/libxfs/xfs_trans_resv.c
fs/xfs/libxfs/xfs_types.c
fs/xfs/libxfs/xfs_types.h
fs/xfs/scrub/bmap.c
fs/xfs/scrub/fscounters.c
fs/xfs/scrub/inode.c
fs/xfs/scrub/rtbitmap.c
fs/xfs/scrub/rtsummary.c
fs/xfs/scrub/trace.c
fs/xfs/scrub/trace.h
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_file.c
fs/xfs/xfs_fsmap.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_linux.h
fs/xfs/xfs_mount.h
fs/xfs/xfs_ondisk.h
fs/xfs/xfs_reflink.c
fs/xfs/xfs_rtalloc.c
fs/xfs/xfs_rtalloc.h
fs/xfs/xfs_super.c
fs/xfs/xfs_trans.c
include/drm/amd_asic_type.h
include/dt-bindings/watchdog/aspeed-wdt.h [new file with mode: 0644]
include/linux/acpi.h
include/linux/aer.h
include/linux/amd-iommu.h
include/linux/btf.h
include/linux/ceph/ceph_debug.h
include/linux/ceph/ceph_fs.h
include/linux/ceph/mdsmap.h [deleted file]
include/linux/closure.h
include/linux/dma-mapping.h
include/linux/ethtool.h
include/linux/exportfs.h
include/linux/f2fs_fs.h
include/linux/fw_table.h [new file with mode: 0644]
include/linux/gpio/driver.h
include/linux/i3c/device.h
include/linux/i3c/master.h
include/linux/idr.h
include/linux/input.h
include/linux/input/mt.h
include/linux/iommu.h
include/linux/kprobes.h
include/linux/lsm_hook_defs.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/mlx5_ifc_vdpa.h
include/linux/mmc/card.h
include/linux/mnt_idmapping.h
include/linux/mtd/cfi.h
include/linux/mtd/qinfo.h
include/linux/mtd/spinand.h
include/linux/nfs_fs_sb.h
include/linux/nfs_xdr.h
include/linux/nvme-keyring.h
include/linux/nvme.h
include/linux/pinctrl/consumer.h
include/linux/platform_data/cros_ec_commands.h
include/linux/platform_data/shmob_drm.h
include/linux/pwm.h
include/linux/slab.h
include/linux/spi/spi.h
include/linux/sunrpc/clnt.h
include/linux/tcp.h
include/linux/tsm.h [new file with mode: 0644]
include/linux/vdpa.h
include/linux/virtio_pci_modern.h
include/linux/vlynq.h [deleted file]
include/media/cec.h
include/media/ipu-bridge.h
include/media/mipi-csi2.h
include/media/v4l2-dev.h
include/media/v4l2-device.h
include/media/v4l2-event.h
include/media/v4l2-mc.h
include/media/v4l2-subdev.h
include/media/videobuf-core.h [deleted file]
include/media/videobuf-dma-contig.h [deleted file]
include/media/videobuf-dma-sg.h [deleted file]
include/media/videobuf-vmalloc.h [deleted file]
include/net/flow.h
include/net/netfilter/nf_conntrack_act_ct.h
include/net/tcp_ao.h
include/soc/tegra/mc.h
include/uapi/linux/media-bus-format.h
include/uapi/linux/nfsd_netlink.h
include/uapi/linux/npcm-video.h [new file with mode: 0644]
include/uapi/linux/psp-sev.h
include/uapi/linux/sev-guest.h
include/uapi/linux/v4l2-controls.h
include/uapi/linux/vhost.h
include/uapi/linux/vhost_types.h
include/uapi/linux/videodev2.h
include/uapi/linux/virtio_config.h
include/uapi/mtd/ubi-user.h
io_uring/kbuf.c
io_uring/kbuf.h
io_uring/net.c
io_uring/opdef.c
io_uring/rw.c
io_uring/rw.h
kernel/bpf/bpf_iter.c
kernel/bpf/cgroup_iter.c
kernel/bpf/cpumask.c
kernel/bpf/helpers.c
kernel/bpf/map_iter.c
kernel/bpf/task_iter.c
kernel/bpf/verifier.c
kernel/cgroup/rstat.c
kernel/debug/debug_core.c
kernel/debug/kdb/kdb_main.c
kernel/dma/direct.c
kernel/dma/direct.h
kernel/dma/mapping.c
kernel/dma/swiotlb.c
kernel/rcu/rcu.h
kernel/rcu/tasks.h
kernel/rcu/tree.c
kernel/trace/bpf_trace.c
kernel/trace/trace_fprobe.c
kernel/trace/trace_kprobe.c
lib/Kconfig
lib/Makefile
lib/closure.c
lib/fw_table.c [new file with mode: 0644]
lib/test_objpool.c
mm/memblock.c
net/9p/client.c
net/9p/trans_fd.c
net/9p/trans_xen.c
net/bpf/test_run.c
net/bridge/netfilter/ebtable_broute.c
net/bridge/netfilter/ebtable_filter.c
net/bridge/netfilter/ebtable_nat.c
net/bridge/netfilter/ebtables.c
net/bridge/netfilter/nf_conntrack_bridge.c
net/core/filter.c
net/core/page_pool.c
net/core/xdp.c
net/dccp/ipv4.c
net/dccp/ipv6.c
net/devlink/netlink_gen.c
net/hsr/hsr_forward.c
net/ipv4/fou_bpf.c
net/ipv4/netfilter/iptable_nat.c
net/ipv4/netfilter/iptable_raw.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/netfilter/nf_reject_ipv4.c
net/ipv4/syncookies.c
net/ipv4/tcp_ao.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/tcp_sigpool.c
net/ipv6/netfilter/ip6table_nat.c
net/ipv6/netfilter/ip6table_raw.c
net/ipv6/netfilter/nf_defrag_ipv6_hooks.c
net/ipv6/netfilter/nf_reject_ipv6.c
net/ipv6/syncookies.c
net/kcm/kcmsock.c
net/llc/llc_input.c
net/llc/llc_s_ac.c
net/llc/llc_station.c
net/netfilter/ipvs/ip_vs_core.c
net/netfilter/ipvs/ip_vs_dh.c
net/netfilter/ipvs/ip_vs_fo.c
net/netfilter/ipvs/ip_vs_ftp.c
net/netfilter/ipvs/ip_vs_lblc.c
net/netfilter/ipvs/ip_vs_lblcr.c
net/netfilter/ipvs/ip_vs_lc.c
net/netfilter/ipvs/ip_vs_nq.c
net/netfilter/ipvs/ip_vs_ovf.c
net/netfilter/ipvs/ip_vs_pe_sip.c
net/netfilter/ipvs/ip_vs_rr.c
net/netfilter/ipvs/ip_vs_sed.c
net/netfilter/ipvs/ip_vs_sh.c
net/netfilter/ipvs/ip_vs_twos.c
net/netfilter/ipvs/ip_vs_wlc.c
net/netfilter/ipvs/ip_vs_wrr.c
net/netfilter/nf_conntrack_bpf.c
net/netfilter/nf_conntrack_broadcast.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_conntrack_proto.c
net/netfilter/nf_nat_bpf.c
net/netfilter/nf_nat_core.c
net/netfilter/nf_nat_redirect.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_osf.c
net/netfilter/nft_chain_nat.c
net/netfilter/nft_fib.c
net/netfilter/nft_fwd_netdev.c
net/netfilter/xt_recent.c
net/netlink/diag.c
net/openvswitch/conntrack.c
net/rxrpc/conn_object.c
net/rxrpc/local_object.c
net/sched/act_api.c
net/sched/act_ct.c
net/sched/act_gate.c
net/sched/cls_api.c
net/sched/cls_basic.c
net/sched/cls_cgroup.c
net/sched/cls_fw.c
net/sched/cls_route.c
net/sched/cls_u32.c
net/sched/sch_cbs.c
net/sched/sch_choke.c
net/sched/sch_drr.c
net/sched/sch_etf.c
net/sched/sch_ets.c
net/sched/sch_fifo.c
net/sched/sch_fq.c
net/sched/sch_gred.c
net/sched/sch_hfsc.c
net/sched/sch_htb.c
net/sched/sch_ingress.c
net/sched/sch_mqprio.c
net/sched/sch_mqprio_lib.c
net/sched/sch_multiq.c
net/sched/sch_netem.c
net/sched/sch_plug.c
net/sched/sch_prio.c
net/sched/sch_qfq.c
net/sched/sch_red.c
net/sched/sch_sfq.c
net/sched/sch_skbprio.c
net/sched/sch_taprio.c
net/sched/sch_tbf.c
net/sched/sch_teql.c
net/smc/af_smc.c
net/smc/smc.h
net/smc/smc_cdc.c
net/smc/smc_close.c
net/socket.c
net/sunrpc/clnt.c
net/sunrpc/rpcb_clnt.c
net/sunrpc/xprt.c
net/sunrpc/xprtsock.c
net/tipc/netlink.c
net/vmw_vsock/virtio_transport_common.c
net/xfrm/xfrm_interface_bpf.c
sound/core/info.c
sound/oss/dmasound/dmasound_paula.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/acp/acp-i2s.c
sound/soc/codecs/aw88395/aw88395.c
sound/soc/codecs/aw88399.c
sound/soc/codecs/aw88399.h
sound/soc/codecs/da7219-aad.c
sound/soc/codecs/hdmi-codec.c
sound/soc/codecs/nau8540.c
sound/soc/codecs/nau8540.h
sound/soc/codecs/rt712-sdca.c
sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
sound/soc/rockchip/rockchip_i2s_tdm.c
sound/soc/soc-dapm.c
sound/soc/sof/sof-client.c
sound/soc/ti/omap-mcbsp.c
tools/net/ynl/generated/devlink-user.c
tools/net/ynl/generated/nfsd-user.c
tools/net/ynl/generated/nfsd-user.h
tools/net/ynl/ynl-gen-c.py
tools/power/cpupower/man/cpupower-powercap-info.1
tools/testing/cxl/test/cxl.c
tools/testing/cxl/test/mem.c
tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
tools/testing/selftests/bpf/map_tests/map_percpu_stats.c
tools/testing/selftests/bpf/prog_tests/cgroup_iter.c
tools/testing/selftests/bpf/prog_tests/iters.c
tools/testing/selftests/bpf/prog_tests/test_bpffs.c
tools/testing/selftests/bpf/prog_tests/verifier.c
tools/testing/selftests/bpf/progs/iters_css_task.c
tools/testing/selftests/bpf/progs/iters_task_failure.c
tools/testing/selftests/bpf/progs/verifier_precision.c [new file with mode: 0644]
tools/testing/selftests/bpf/verifier/bpf_st_mem.c
tools/testing/selftests/bpf/xdp_hw_metadata.c
tools/testing/selftests/net/pmtu.sh
tools/testing/selftests/riscv/hwprobe/Makefile
tools/testing/selftests/riscv/hwprobe/cbo.c [new file with mode: 0644]
tools/testing/selftests/riscv/hwprobe/hwprobe.c
tools/testing/selftests/riscv/hwprobe/hwprobe.h [new file with mode: 0644]
tools/testing/vsock/util.c
tools/testing/vsock/util.h
tools/testing/vsock/vsock_test.c

diff --git a/Documentation/ABI/testing/configfs-tsm b/Documentation/ABI/testing/configfs-tsm
new file mode 100644 (file)
index 0000000..dd24202
--- /dev/null
@@ -0,0 +1,82 @@
+What:          /sys/kernel/config/tsm/report/$name/inblob
+Date:          September, 2023
+KernelVersion: v6.7
+Contact:       linux-coco@lists.linux.dev
+Description:
+               (WO) Up to 64 bytes of user specified binary data. For replay
+               protection this should include a nonce, but the kernel does not
+               place any restrictions on the content.
+
+What:          /sys/kernel/config/tsm/report/$name/outblob
+Date:          September, 2023
+KernelVersion: v6.7
+Contact:       linux-coco@lists.linux.dev
+Description:
+               (RO) Binary attestation report generated from @inblob and other
+               options The format of the report is implementation specific
+               where the implementation is conveyed via the @provider
+               attribute.
+
+What:          /sys/kernel/config/tsm/report/$name/auxblob
+Date:          October, 2023
+KernelVersion: v6.7
+Contact:       linux-coco@lists.linux.dev
+Description:
+               (RO) Optional supplemental data that a TSM may emit, visibility
+               of this attribute depends on TSM, and may be empty if no
+               auxiliary data is available.
+
+               When @provider is "sev_guest" this file contains the
+               "cert_table" from SEV-ES Guest-Hypervisor Communication Block
+               Standardization v2.03 Section 4.1.8.1 MSG_REPORT_REQ.
+               https://www.amd.com/content/dam/amd/en/documents/epyc-technical-docs/specifications/56421.pdf
+
+What:          /sys/kernel/config/tsm/report/$name/provider
+Date:          September, 2023
+KernelVersion: v6.7
+Contact:       linux-coco@lists.linux.dev
+Description:
+               (RO) A name for the format-specification of @outblob like
+               "sev_guest" [1] or "tdx_guest" [2] in the near term, or a
+               common standard format in the future.
+
+               [1]: SEV Secure Nested Paging Firmware ABI Specification
+               Revision 1.55 Table 22
+               https://www.amd.com/content/dam/amd/en/documents/epyc-technical-docs/specifications/56860.pdf
+
+               [2]: Intel® Trust Domain Extensions Data Center Attestation
+               Primitives : Quote Generation Library and Quote Verification
+               Library Revision 0.8 Appendix 4,5
+               https://download.01.org/intel-sgx/latest/dcap-latest/linux/docs/Intel_TDX_DCAP_Quoting_Library_API.pdf
+
+What:          /sys/kernel/config/tsm/report/$name/generation
+Date:          September, 2023
+KernelVersion: v6.7
+Contact:       linux-coco@lists.linux.dev
+Description:
+               (RO) The value in this attribute increments each time @inblob or
+               any option is written. Userspace can detect conflicts by
+               checking generation before writing to any attribute and making
+               sure the number of writes matches expectations after reading
+               @outblob, or it can prevent conflicts by creating a report
+               instance per requesting context.
+
+What:          /sys/kernel/config/tsm/report/$name/privlevel
+Date:          September, 2023
+KernelVersion: v6.7
+Contact:       linux-coco@lists.linux.dev
+Description:
+               (WO) Attribute is visible if a TSM implementation provider
+               supports the concept of attestation reports for TVMs running at
+               different privilege levels, like SEV-SNP "VMPL", specify the
+               privilege level via this attribute.  The minimum acceptable
+               value is conveyed via @privlevel_floor and the maximum
+               acceptable value is TSM_PRIVLEVEL_MAX (3).
+
+What:          /sys/kernel/config/tsm/report/$name/privlevel_floor
+Date:          September, 2023
+KernelVersion: v6.7
+Contact:       linux-coco@lists.linux.dev
+Description:
+               (RO) Indicates the minimum permissible value that can be written
+               to @privlevel.
index 087f762ebfd534e171434833fda4d772bd04ec1f..e76c3600607f8cc697e0ec2eb6753fffa6eb1647 100644 (file)
@@ -178,6 +178,21 @@ Description:
                hardware decoder target list.
 
 
+What:          /sys/bus/cxl/devices/portX/decoders_committed
+Date:          October, 2023
+KernelVersion: v6.7
+Contact:       linux-cxl@vger.kernel.org
+Description:
+               (RO) A memory device is considered active when any of its
+               decoders are in the "committed" state (See CXL 3.0 8.2.4.19.7
+               CXL HDM Decoder n Control Register). Hotplug and destructive
+               operations like "sanitize" are blocked while device is actively
+               decoding a Host Physical Address range. Note that this number
+               may be elevated without any regionX objects active or even
+               enumerated, as this may be due to decoders established by
+               platform firwmare or a previous kernel (kexec).
+
+
 What:          /sys/bus/cxl/devices/decoderX.Y
 Date:          June, 2021
 KernelVersion: v5.14
@@ -369,6 +384,21 @@ Description:
                provided it is currently idle / not bound to a driver.
 
 
+What:          /sys/bus/cxl/devices/decoderX.Y/qos_class
+Date:          May, 2023
+KernelVersion: v6.5
+Contact:       linux-cxl@vger.kernel.org
+Description:
+               (RO) For CXL host platforms that support "QoS Telemmetry" this
+               root-decoder-only attribute conveys a platform specific cookie
+               that identifies a QoS performance class for the CXL Window.
+               This class-id can be compared against a similar "qos_class"
+               published for each memory-type that an endpoint supports. While
+               it is not required that endpoints map their local memory-class
+               to a matching platform class, mismatches are not recommended and
+               there are platform specific side-effects that may result.
+
+
 What:          /sys/bus/cxl/devices/regionZ/uuid
 Date:          May, 2022
 KernelVersion: v6.0
index 1f4a2662335bb1a68cd401c1a6b85056994eb94b..e5248fd67a563b489d8c4a65a7b32ddd2605d952 100644 (file)
@@ -67,7 +67,7 @@ What:         /sys/bus/i3c/devices/i3c-<bus-id>/pid
 KernelVersion:  5.0
 Contact:       linux-i3c@vger.kernel.org
 Description:
-               PID stands for Provisional ID and is used to uniquely identify
+               PID stands for Provisioned ID and is used to uniquely identify
                a device on a bus. This PID contains information about the
                vendor, the part and an instance ID so that several devices of
                the same type can be connected on the same bus.
@@ -123,7 +123,7 @@ What:               /sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>/pid
 KernelVersion:  5.0
 Contact:       linux-i3c@vger.kernel.org
 Description:
-               PID stands for Provisional ID and is used to uniquely identify
+               PID stands for Provisioned ID and is used to uniquely identify
                a device on a bus. This PID contains information about the
                vendor, the part and an instance ID so that several devices of
                the same type can be connected on the same bus.
index 28a6111202ba15d28b94ef6777ac3a77a5bf9540..4da53878bff6b80456da7761c357ac985b7fe6b3 100644 (file)
@@ -1,4 +1,4 @@
-What:          /sys/bus/vdpa/driver_autoprobe
+What:          /sys/bus/vdpa/drivers_autoprobe
 Date:          March 2020
 Contact:       virtualization@lists.linux-foundation.org
 Description:
@@ -17,7 +17,7 @@ Description:
                Writing a device name to this file will cause the kernel binds
                devices to a compatible driver.
 
-               This can be useful when /sys/bus/vdpa/driver_autoprobe is
+               This can be useful when /sys/bus/vdpa/drivers_autoprobe is
                disabled.
 
 What:          /sys/bus/vdpa/drivers/.../bind
index 2a4bc78c27ecb77419ecd9d853280b7ab633e1a0..65731b060e3fef98cb97f03695ca8757ee197700 100644 (file)
                          forcing Dual Address Cycle for PCI cards supporting
                          greater than 32-bit addressing.
 
-       iommu.strict=   [ARM64, X86] Configure TLB invalidation behaviour
+       iommu.strict=   [ARM64, X86, S390] Configure TLB invalidation behaviour
                        Format: { "0" | "1" }
                        0 - Lazy mode.
                          Request that DMA unmap operations use deferred
                        [NFS] set the TCP port on which the NFSv4 callback
                        channel should listen.
 
+       nfs.delay_retrans=
+                       [NFS] specifies the number of times the NFSv4 client
+                       retries the request before returning an EAGAIN error,
+                       after a reply of NFS4ERR_DELAY from the server.
+                       Only applies if the softerr mount option is enabled,
+                       and the specified value is >= 0.
+
        nfs.enable_ino64=
                        [NFS] enable 64-bit inode numbers.
                        If zero, the NFS client will fake up a 32-bit inode
        s390_iommu=     [HW,S390]
                        Set s390 IOTLB flushing mode
                strict
-                       With strict flushing every unmap operation will result in
-                       an IOTLB flush. Default is lazy flushing before reuse,
-                       which is faster.
+                       With strict flushing every unmap operation will result
+                       in an IOTLB flush. Default is lazy flushing before
+                       reuse, which is faster. Deprecated, equivalent to
+                       iommu.strict=1.
 
        s390_iommu_aperture=    [KNL,S390]
                        Specifies the size of the per device DMA address space
diff --git a/Documentation/admin-guide/media/mgb4.rst b/Documentation/admin-guide/media/mgb4.rst
new file mode 100644 (file)
index 0000000..2977f74
--- /dev/null
@@ -0,0 +1,374 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====================
+mgb4 sysfs interface
+====================
+
+The mgb4 driver provides a sysfs interface, that is used to configure video
+stream related parameters (some of them must be set properly before the v4l2
+device can be opened) and obtain the video device/stream status.
+
+There are two types of parameters - global / PCI card related, found under
+``/sys/class/video4linux/videoX/device`` and module specific found under
+``/sys/class/video4linux/videoX``.
+
+
+Global (PCI card) parameters
+============================
+
+**module_type** (R):
+    Module type.
+
+    | 0 - No module present
+    | 1 - FPDL3
+    | 2 - GMSL
+
+**module_version** (R):
+    Module version number. Zero in case of a missing module.
+
+**fw_type** (R):
+    Firmware type.
+
+    | 1 - FPDL3
+    | 2 - GMSL
+
+**fw_version** (R):
+    Firmware version number.
+
+**serial_number** (R):
+    Card serial number. The format is::
+
+        PRODUCT-REVISION-SERIES-SERIAL
+
+    where each component is a 8b number.
+
+
+Common FPDL3/GMSL input parameters
+==================================
+
+**input_id** (R):
+    Input number ID, zero based.
+
+**oldi_lane_width** (RW):
+    Number of deserializer output lanes.
+
+    | 0 - single
+    | 1 - dual (default)
+
+**color_mapping** (RW):
+    Mapping of the incoming bits in the signal to the colour bits of the pixels.
+
+    | 0 - OLDI/JEIDA
+    | 1 - SPWG/VESA (default)
+
+**link_status** (R):
+    Video link status. If the link is locked, chips are properly connected and
+    communicating at the same speed and protocol. The link can be locked without
+    an active video stream.
+
+    A value of 0 is equivalent to the V4L2_IN_ST_NO_SYNC flag of the V4L2
+    VIDIOC_ENUMINPUT status bits.
+
+    | 0 - unlocked
+    | 1 - locked
+
+**stream_status** (R):
+    Video stream status. A stream is detected if the link is locked, the input
+    pixel clock is running and the DE signal is moving.
+
+    A value of 0 is equivalent to the V4L2_IN_ST_NO_SIGNAL flag of the V4L2
+    VIDIOC_ENUMINPUT status bits.
+
+    | 0 - not detected
+    | 1 - detected
+
+**video_width** (R):
+    Video stream width. This is the actual width as detected by the HW.
+
+    The value is identical to what VIDIOC_QUERY_DV_TIMINGS returns in the width
+    field of the v4l2_bt_timings struct.
+
+**video_height** (R):
+    Video stream height. This is the actual height as detected by the HW.
+
+    The value is identical to what VIDIOC_QUERY_DV_TIMINGS returns in the height
+    field of the v4l2_bt_timings struct.
+
+**vsync_status** (R):
+    The type of VSYNC pulses as detected by the video format detector.
+
+    The value is equivalent to the flags returned by VIDIOC_QUERY_DV_TIMINGS in
+    the polarities field of the v4l2_bt_timings struct.
+
+    | 0 - active low
+    | 1 - active high
+    | 2 - not available
+
+**hsync_status** (R):
+    The type of HSYNC pulses as detected by the video format detector.
+
+    The value is equivalent to the flags returned by VIDIOC_QUERY_DV_TIMINGS in
+    the polarities field of the v4l2_bt_timings struct.
+
+    | 0 - active low
+    | 1 - active high
+    | 2 - not available
+
+**vsync_gap_length** (RW):
+    If the incoming video signal does not contain synchronization VSYNC and
+    HSYNC pulses, these must be generated internally in the FPGA to achieve
+    the correct frame ordering. This value indicates, how many "empty" pixels
+    (pixels with deasserted Data Enable signal) are necessary to generate the
+    internal VSYNC pulse.
+
+**hsync_gap_length** (RW):
+    If the incoming video signal does not contain synchronization VSYNC and
+    HSYNC pulses, these must be generated internally in the FPGA to achieve
+    the correct frame ordering. This value indicates, how many "empty" pixels
+    (pixels with deasserted Data Enable signal) are necessary to generate the
+    internal HSYNC pulse. The value must be greater than 1 and smaller than
+    vsync_gap_length.
+
+**pclk_frequency** (R):
+    Input pixel clock frequency in kHz.
+
+    The value is identical to what VIDIOC_QUERY_DV_TIMINGS returns in
+    the pixelclock field of the v4l2_bt_timings struct.
+
+    *Note: The frequency_range parameter must be set properly first to get
+    a valid frequency here.*
+
+**hsync_width** (R):
+    Width of the HSYNC signal in PCLK clock ticks.
+
+    The value is identical to what VIDIOC_QUERY_DV_TIMINGS returns in
+    the hsync field of the v4l2_bt_timings struct.
+
+**vsync_width** (R):
+    Width of the VSYNC signal in PCLK clock ticks.
+
+    The value is identical to what VIDIOC_QUERY_DV_TIMINGS returns in
+    the vsync field of the v4l2_bt_timings struct.
+
+**hback_porch** (R):
+    Number of PCLK pulses between deassertion of the HSYNC signal and the first
+    valid pixel in the video line (marked by DE=1).
+
+    The value is identical to what VIDIOC_QUERY_DV_TIMINGS returns in
+    the hbackporch field of the v4l2_bt_timings struct.
+
+**hfront_porch** (R):
+    Number of PCLK pulses between the end of the last valid pixel in the video
+    line (marked by DE=1) and assertion of the HSYNC signal.
+
+    The value is identical to what VIDIOC_QUERY_DV_TIMINGS returns in
+    the hfrontporch field of the v4l2_bt_timings struct.
+
+**vback_porch** (R):
+    Number of video lines between deassertion of the VSYNC signal and the video
+    line with the first valid pixel (marked by DE=1).
+
+    The value is identical to what VIDIOC_QUERY_DV_TIMINGS returns in
+    the vbackporch field of the v4l2_bt_timings struct.
+
+**vfront_porch** (R):
+    Number of video lines between the end of the last valid pixel line (marked
+    by DE=1) and assertion of the VSYNC signal.
+
+    The value is identical to what VIDIOC_QUERY_DV_TIMINGS returns in
+    the vfrontporch field of the v4l2_bt_timings struct.
+
+**frequency_range** (RW)
+    PLL frequency range of the OLDI input clock generator. The PLL frequency is
+    derived from the Pixel Clock Frequency (PCLK) and is equal to PCLK if
+    oldi_lane_width is set to "single" and PCLK/2 if oldi_lane_width is set to
+    "dual".
+
+    | 0 - PLL < 50MHz (default)
+    | 1 - PLL >= 50MHz
+
+    *Note: This parameter can not be changed while the input v4l2 device is
+    open.*
+
+
+Common FPDL3/GMSL output parameters
+===================================
+
+**output_id** (R):
+    Output number ID, zero based.
+
+**video_source** (RW):
+    Output video source. If set to 0 or 1, the source is the corresponding card
+    input and the v4l2 output devices are disabled. If set to 2 or 3, the source
+    is the corresponding v4l2 video output device. The default is
+    the corresponding v4l2 output, i.e. 2 for OUT1 and 3 for OUT2.
+
+    | 0 - input 0
+    | 1 - input 1
+    | 2 - v4l2 output 0
+    | 3 - v4l2 output 1
+
+    *Note: This parameter can not be changed while ANY of the input/output v4l2
+    devices is open.*
+
+**display_width** (RW):
+    Display width. There is no autodetection of the connected display, so the
+    proper value must be set before the start of streaming. The default width
+    is 1280.
+
+    *Note: This parameter can not be changed while the output v4l2 device is
+    open.*
+
+**display_height** (RW):
+    Display height. There is no autodetection of the connected display, so the
+    proper value must be set before the start of streaming. The default height
+    is 640.
+
+    *Note: This parameter can not be changed while the output v4l2 device is
+    open.*
+
+**frame_rate** (RW):
+    Output video frame rate in frames per second. The default frame rate is
+    60Hz.
+
+**hsync_polarity** (RW):
+    HSYNC signal polarity.
+
+    | 0 - active low (default)
+    | 1 - active high
+
+**vsync_polarity** (RW):
+    VSYNC signal polarity.
+
+    | 0 - active low (default)
+    | 1 - active high
+
+**de_polarity** (RW):
+    DE signal polarity.
+
+    | 0 - active low
+    | 1 - active high (default)
+
+**pclk_frequency** (RW):
+    Output pixel clock frequency. Allowed values are between 25000-190000(kHz)
+    and there is a non-linear stepping between two consecutive allowed
+    frequencies. The driver finds the nearest allowed frequency to the given
+    value and sets it. When reading this property, you get the exact
+    frequency set by the driver. The default frequency is 70000kHz.
+
+    *Note: This parameter can not be changed while the output v4l2 device is
+    open.*
+
+**hsync_width** (RW):
+    Width of the HSYNC signal in pixels. The default value is 16.
+
+**vsync_width** (RW):
+    Width of the VSYNC signal in video lines. The default value is 2.
+
+**hback_porch** (RW):
+    Number of PCLK pulses between deassertion of the HSYNC signal and the first
+    valid pixel in the video line (marked by DE=1). The default value is 32.
+
+**hfront_porch** (RW):
+    Number of PCLK pulses between the end of the last valid pixel in the video
+    line (marked by DE=1) and assertion of the HSYNC signal. The default value
+    is 32.
+
+**vback_porch** (RW):
+    Number of video lines between deassertion of the VSYNC signal and the video
+    line with the first valid pixel (marked by DE=1). The default value is 2.
+
+**vfront_porch** (RW):
+    Number of video lines between the end of the last valid pixel line (marked
+    by DE=1) and assertion of the VSYNC signal. The default value is 2.
+
+
+FPDL3 specific input parameters
+===============================
+
+**fpdl3_input_width** (RW):
+    Number of deserializer input lines.
+
+    | 0 - auto (default)
+    | 1 - single
+    | 2 - dual
+
+FPDL3 specific output parameters
+================================
+
+**fpdl3_output_width** (RW):
+    Number of serializer output lines.
+
+    | 0 - auto (default)
+    | 1 - single
+    | 2 - dual
+
+GMSL specific input parameters
+==============================
+
+**gmsl_mode** (RW):
+    GMSL speed mode.
+
+    | 0 - 12Gb/s (default)
+    | 1 - 6Gb/s
+    | 2 - 3Gb/s
+    | 3 - 1.5Gb/s
+
+**gmsl_stream_id** (RW):
+    The GMSL multi-stream contains up to four video streams. This parameter
+    selects which stream is captured by the video input. The value is the
+    zero-based index of the stream. The default stream id is 0.
+
+    *Note: This parameter can not be changed while the input v4l2 device is
+    open.*
+
+**gmsl_fec** (RW):
+    GMSL Forward Error Correction (FEC).
+
+    | 0 - disabled
+    | 1 - enabled (default)
+
+
+====================
+mgb4 mtd partitions
+====================
+
+The mgb4 driver creates a MTD device with two partitions:
+ - mgb4-fw.X - FPGA firmware.
+ - mgb4-data.X - Factory settings, e.g. card serial number.
+
+The *mgb4-fw* partition is writable and is used for FW updates, *mgb4-data* is
+read-only. The *X* attached to the partition name represents the card number.
+Depending on the CONFIG_MTD_PARTITIONED_MASTER kernel configuration, you may
+also have a third partition named *mgb4-flash* available in the system. This
+partition represents the whole, unpartitioned, card's FLASH memory and one should
+not fiddle with it...
+
+====================
+mgb4 iio (triggers)
+====================
+
+The mgb4 driver creates an Industrial I/O (IIO) device that provides trigger and
+signal level status capability. The following scan elements are available:
+
+**activity**:
+       The trigger levels and pending status.
+
+       | bit 1 - trigger 1 pending
+       | bit 2 - trigger 2 pending
+       | bit 5 - trigger 1 level
+       | bit 6 - trigger 2 level
+
+**timestamp**:
+       The trigger event timestamp.
+
+The iio device can operate either in "raw" mode where you can fetch the signal
+levels (activity bits 5 and 6) using sysfs access or in triggered buffer mode.
+In the triggered buffer mode you can follow the signal level changes (activity
+bits 1 and 2) using the iio device in /dev. If you enable the timestamps, you
+will also get the exact trigger event time that can be matched to a video frame
+(every mgb4 video frame has a timestamp with the same clock source).
+
+*Note: although the activity sample always contains all the status bits, it makes
+no sense to get the pending bits in raw mode or the level bits in the triggered
+buffer mode - the values do not represent valid data in such case.*
index 42528795d4dad1af4de88c679f554150a2fc9efa..7d8e3c8987dba95f4713025dcbd8e0200ebf8561 100644 (file)
@@ -77,6 +77,7 @@ ipu3-cio2         Intel ipu3-cio2 driver
 ivtv              Conexant cx23416/cx23415 MPEG encoder/decoder
 ivtvfb            Conexant cx23415 framebuffer
 mantis            MANTIS based cards
+mgb4              Digiteq Automotive MGB4 frame grabber
 mxb               Siemens-Nixdorf 'Multimedia eXtension Board'
 netup-unidvb      NetUP Universal DVB card
 ngene             Micronas nGene
index 1c41f87c391717e770f7df4d0007427c9c13cf3e..61283d67ceefb90c332ecc3b519c2902255bf1cd 100644 (file)
@@ -17,6 +17,7 @@ Video4Linux (V4L) driver-specific documentation
        imx7
        ipu3
        ivtv
+       mgb4
        omap3isp
        omap4_camera
        philips
index 7d2dc78341c937937f76333b95e7fe38aad1d4d7..4328c6c72d305a546760bb89ed0a3ba1e3fa50c8 100644 (file)
@@ -78,7 +78,7 @@ The trace events are defined on a per-codec basis, e.g.:
 
 .. code-block:: bash
 
-        $ ls /sys/kernel/debug/tracing/events/ | grep visl
+        $ ls /sys/kernel/tracing/events/ | grep visl
         visl_fwht_controls
         visl_h264_controls
         visl_hevc_controls
@@ -90,13 +90,13 @@ For example, in order to dump HEVC SPS data:
 
 .. code-block:: bash
 
-        $ echo 1 >  /sys/kernel/debug/tracing/events/visl_hevc_controls/v4l2_ctrl_hevc_sps/enable
+        $ echo 1 >  /sys/kernel/tracing/events/visl_hevc_controls/v4l2_ctrl_hevc_sps/enable
 
 The SPS data will be dumped to the trace buffer, i.e.:
 
 .. code-block:: bash
 
-        $ cat /sys/kernel/debug/tracing/trace
+        $ cat /sys/kernel/tracing/trace
         video_parameter_set_id 0
         seq_parameter_set_id 0
         pic_width_in_luma_samples 1920
index 4b8399ac592ba383c12896358e4e2720442012c1..ced7b335e2e001c4c17c16631012c24eb1900011 100644 (file)
@@ -174,7 +174,7 @@ HWCAP2_DCPODP
     Functionality implied by ID_AA64ISAR1_EL1.DPB == 0b0010.
 
 HWCAP2_SVE2
-    Functionality implied by ID_AA64ZFR0_EL1.SVEVer == 0b0001.
+    Functionality implied by ID_AA64ZFR0_EL1.SVEver == 0b0001.
 
 HWCAP2_SVEAES
     Functionality implied by ID_AA64ZFR0_EL1.AES == 0b0001.
@@ -222,7 +222,7 @@ HWCAP2_RNG
     Functionality implied by ID_AA64ISAR0_EL1.RNDR == 0b0001.
 
 HWCAP2_BTI
-    Functionality implied by ID_AA64PFR0_EL1.BT == 0b0001.
+    Functionality implied by ID_AA64PFR1_EL1.BT == 0b0001.
 
 HWCAP2_MTE
     Functionality implied by ID_AA64PFR1_EL1.MTE == 0b0010, as described
@@ -232,7 +232,7 @@ HWCAP2_ECV
     Functionality implied by ID_AA64MMFR0_EL1.ECV == 0b0001.
 
 HWCAP2_AFP
-    Functionality implied by ID_AA64MFR1_EL1.AFP == 0b0001.
+    Functionality implied by ID_AA64MMFR1_EL1.AFP == 0b0001.
 
 HWCAP2_RPRES
     Functionality implied by ID_AA64ISAR2_EL1.RPRES == 0b0001.
index a52996b22f75d3e8fea1064313cf36abcd878cf3..7b2384de471f8fa5813271a010f8f5f011e7d8c3 100644 (file)
@@ -77,6 +77,9 @@ The following keys are defined:
   * :c:macro:`RISCV_HWPROBE_EXT_ZBS`: The Zbs extension is supported, as defined
        in version 1.0 of the Bit-Manipulation ISA extensions.
 
+  * :c:macro:`RISCV_HWPROBE_EXT_ZICBOZ`: The Zicboz extension is supported, as
+       ratified in commit 3dd606f ("Create cmobase-v1.0.pdf") of riscv-CMOs.
+
 * :c:macro:`RISCV_HWPROBE_KEY_CPUPERF_0`: A bitmask that contains performance
   information about the selected set of processors.
 
@@ -96,3 +99,6 @@ The following keys are defined:
 
   * :c:macro:`RISCV_HWPROBE_MISALIGNED_UNSUPPORTED`: Misaligned accesses are
     not supported at all and will generate a misaligned address fault.
+
+* :c:macro:`RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE`: An unsigned int which
+  represents the size of the Zicboz block in bytes.
index 8960fac42c40f3c7fd288f86f5f8d2233f422f09..54d199dce78bf50525b0430dcfc5a6a71429bf68 100644 (file)
@@ -42,6 +42,26 @@ An example string following the order is::
 
    rv64imadc_zifoo_zigoo_zafoo_sbar_scar_zxmbaz_xqux_xrux
 
+"isa" and "hart isa" lines in /proc/cpuinfo
+-------------------------------------------
+
+The "isa" line in /proc/cpuinfo describes the lowest common denominator of
+RISC-V ISA extensions recognized by the kernel and implemented on all harts. The
+"hart isa" line, in contrast, describes the set of extensions recognized by the
+kernel on the particular hart being described, even if those extensions may not
+be present on all harts in the system.
+
+In both lines, the presence of an extension guarantees only that the hardware
+has the described capability. Additional kernel support or policy changes may be
+required before an extension's capability is fully usable by userspace programs.
+Similarly, for S-mode extensions, presence in one of these lines does not
+guarantee that the kernel is taking advantage of the extension, or that the
+feature will be visible in guest VMs managed by this kernel.
+
+Inversely, the absence of an extension in these lines does not necessarily mean
+the hardware does not support that feature. The running kernel may not recognize
+the extension, or may have deliberately removed it from the listing.
+
 Misaligned accesses
 -------------------
 
index 0d2647fb358d7ce1a4a85bd994d9b05e8ea0cec9..723408e399abd8ce83c8d9bc2c40dcf251c258b9 100644 (file)
@@ -37,16 +37,14 @@ prototype in a header for the wrapper kfunc.
 An example is given below::
 
         /* Disables missing prototype warnings */
-        __diag_push();
-        __diag_ignore_all("-Wmissing-prototypes",
-                          "Global kfuncs as their definitions will be in BTF");
+        __bpf_kfunc_start_defs();
 
         __bpf_kfunc struct task_struct *bpf_find_get_task_by_vpid(pid_t nr)
         {
                 return find_get_task_by_vpid(nr);
         }
 
-        __diag_pop();
+        __bpf_kfunc_end_defs();
 
 A wrapper kfunc is often needed when we need to annotate parameters of the
 kfunc. Otherwise one may directly make the kfunc visible to the BPF program by
diff --git a/Documentation/devicetree/bindings/display/renesas,shmobile-lcdc.yaml b/Documentation/devicetree/bindings/display/renesas,shmobile-lcdc.yaml
new file mode 100644 (file)
index 0000000..9816c4c
--- /dev/null
@@ -0,0 +1,130 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/renesas,shmobile-lcdc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas SH-Mobile LCD Controller (LCDC)
+
+maintainers:
+  - Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
+  - Geert Uytterhoeven <geert+renesas@glider.be>
+
+properties:
+  compatible:
+    enum:
+      - renesas,r8a7740-lcdc # R-Mobile A1
+      - renesas,sh73a0-lcdc  # SH-Mobile AG5
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  clocks:
+    minItems: 1
+    maxItems: 5
+    description:
+      Only the functional clock is mandatory.
+      Some of the optional clocks are model-dependent (e.g. "video" (a.k.a.
+      "vou" or "dv_clk") is available on R-Mobile A1 only).
+
+  clock-names:
+    minItems: 1
+    items:
+      - const: fck
+      - enum: [ media, lclk, hdmi, video ]
+      - enum: [ media, lclk, hdmi, video ]
+      - enum: [ media, lclk, hdmi, video ]
+      - enum: [ media, lclk, hdmi, video ]
+
+  power-domains:
+    maxItems: 1
+
+  ports:
+    $ref: /schemas/graph.yaml#/properties/ports
+
+    properties:
+      port@0:
+        $ref: /schemas/graph.yaml#/properties/port
+        description: LCD port (R-Mobile A1 and SH-Mobile AG5)
+        unevaluatedProperties: false
+
+      port@1:
+        $ref: /schemas/graph.yaml#/properties/port
+        description: HDMI port (R-Mobile A1 LCDC1 and SH-Mobile AG5)
+        unevaluatedProperties: false
+
+      port@2:
+        $ref: /schemas/graph.yaml#/properties/port
+        description: MIPI-DSI port (SH-Mobile AG5)
+        unevaluatedProperties: false
+
+    required:
+      - port@0
+
+    unevaluatedProperties: false
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - clocks
+  - clock-names
+  - power-domains
+  - ports
+
+additionalProperties: false
+
+allOf:
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: renesas,r8a7740-lcdc
+    then:
+      properties:
+        ports:
+          properties:
+            port@2: false
+
+  - if:
+      properties:
+        compatible:
+          contains:
+            const: renesas,sh73a0-lcdc
+    then:
+      properties:
+        ports:
+          required:
+            - port@1
+            - port@2
+
+examples:
+  - |
+    #include <dt-bindings/clock/r8a7740-clock.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+    lcd-controller@fe940000 {
+        compatible = "renesas,r8a7740-lcdc";
+        reg = <0xfe940000 0x4000>;
+        interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
+        clocks = <&mstp1_clks R8A7740_CLK_LCDC0>,
+                 <&cpg_clocks R8A7740_CLK_M3>, <&lcdlclk0_clk>,
+                 <&vou_clk>;
+        clock-names = "fck", "media", "lclk", "video";
+        power-domains = <&pd_a4lc>;
+
+        ports {
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            port@0 {
+                reg = <0>;
+
+                lcdc0_rgb: endpoint {
+                };
+            };
+        };
+    };
index 0aa41bd9ddca433152b03979de6df95e046e8512..37975ee61c5ad48e822792ec4e37bad6f331a91c 100644 (file)
@@ -11,10 +11,10 @@ maintainers:
 
 properties:
   compatible:
-    enum:
-        - solomon,ssd1322
-        - solomon,ssd1325
-        - solomon,ssd1327
+    enum:
+      - solomon,ssd1322
+      - solomon,ssd1325
+      - solomon,ssd1327
 
 required:
   - compatible
index bfff71ff3d6a7366343ca4b47d514d09f3cb3528..b6864d0ee81e4bbf89fa75d9ee25771c234ddab6 100644 (file)
@@ -68,10 +68,14 @@ properties:
                   pattern: cs16$
               - items:
                   pattern: c32$
+              - items:
+                  pattern: c32d-wl$
               - items:
                   pattern: cs32$
               - items:
                   pattern: c64$
+              - items:
+                  pattern: c64d-wl$
               - items:
                   pattern: cs64$
               - items:
diff --git a/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt b/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt
deleted file mode 100644 (file)
index 86b2e43..0000000
+++ /dev/null
@@ -1,135 +0,0 @@
-Pinctrl-based I2C Bus DeMux
-
-This binding describes an I2C bus demultiplexer that uses pin multiplexing to
-route the I2C signals, and represents the pin multiplexing configuration using
-the pinctrl device tree bindings. This may be used to select one I2C IP core at
-runtime which may have a better feature set for a given task than another I2C
-IP core on the SoC. The most simple example is to fall back to GPIO bitbanging
-if your current runtime configuration hits an errata of the internal IP core.
-
-    +-------------------------------+
-    | SoC                           |
-    |                               |   +-----+  +-----+
-    |   +------------+              |   | dev |  | dev |
-    |   |I2C IP Core1|--\           |   +-----+  +-----+
-    |   +------------+   \-------+  |      |        |
-    |                    |Pinctrl|--|------+--------+
-    |   +------------+   +-------+  |
-    |   |I2C IP Core2|--/           |
-    |   +------------+              |
-    |                               |
-    +-------------------------------+
-
-Required properties:
-- compatible: "i2c-demux-pinctrl"
-- i2c-parent: List of phandles of I2C masters available for selection. The first
-             one will be used as default.
-- i2c-bus-name: The name of this bus. Also needed as pinctrl-name for the I2C
-               parents.
-
-Furthermore, I2C mux properties and child nodes. See i2c-mux.yaml in this
-directory.
-
-Example:
-
-Here is a snipplet for a bus to be demuxed. It contains various i2c clients for
-HDMI, so the bus is named "i2c-hdmi":
-
-       i2chdmi: i2c@8 {
-
-               compatible = "i2c-demux-pinctrl";
-               i2c-parent = <&gpioi2c>, <&iic2>, <&i2c2>;
-               i2c-bus-name = "i2c-hdmi";
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               ak4643: sound-codec@12 {
-                       compatible = "asahi-kasei,ak4643";
-
-                       #sound-dai-cells = <0>;
-                       reg = <0x12>;
-               };
-
-               composite-in@20 {
-                       compatible = "adi,adv7180";
-                       reg = <0x20>;
-                       remote = <&vin1>;
-
-                       port {
-                               adv7180: endpoint {
-                                       bus-width = <8>;
-                                       remote-endpoint = <&vin1ep0>;
-                               };
-                       };
-               };
-
-               hdmi@39 {
-                       compatible = "adi,adv7511w";
-                       reg = <0x39>;
-                       interrupt-parent = <&gpio1>;
-                       interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
-
-                       adi,input-depth = <8>;
-                       adi,input-colorspace = "rgb";
-                       adi,input-clock = "1x";
-                       adi,input-style = <1>;
-                       adi,input-justification = "evenly";
-
-                       ports {
-                               #address-cells = <1>;
-                               #size-cells = <0>;
-
-                               port@0 {
-                                       reg = <0>;
-                                       adv7511_in: endpoint {
-                                               remote-endpoint = <&du_out_lvds0>;
-                                       };
-                               };
-
-                               port@1 {
-                                       reg = <1>;
-                                       adv7511_out: endpoint {
-                                               remote-endpoint = <&hdmi_con>;
-                                       };
-                               };
-                       };
-               };
-       };
-
-And for clarification, here are the snipplets for the i2c-parents:
-
-       gpioi2c: i2c@9 {
-               #address-cells = <1>;
-               #size-cells = <0>;
-               compatible = "i2c-gpio";
-               gpios = <&gpio5 6 GPIO_ACTIVE_HIGH /* sda */
-                        &gpio5 5 GPIO_ACTIVE_HIGH /* scl */
-                       >;
-               i2c-gpio,delay-us = <5>;
-       };
-
-...
-
-&i2c2  {
-       pinctrl-0 = <&i2c2_pins>;
-       pinctrl-names = "i2c-hdmi";
-
-       clock-frequency = <100000>;
-};
-
-...
-
-&iic2  {
-       pinctrl-0 = <&iic2_pins>;
-       pinctrl-names = "i2c-hdmi";
-
-       clock-frequency = <100000>;
-};
-
-Please note:
-
-- pinctrl properties for the parent I2C controllers need a pinctrl state
-  with the same name as i2c-bus-name, not "default"!
-
-- the i2c masters must have their status "disabled". This driver will
-  enable them at runtime when needed.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.yaml b/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.yaml
new file mode 100644 (file)
index 0000000..2c08f2a
--- /dev/null
@@ -0,0 +1,172 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/i2c-demux-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Pinctrl-based I2C Bus Demultiplexer
+
+maintainers:
+  - Wolfram Sang <wsa+renesas@sang-engineering.com>
+
+description: |
+  This binding describes an I2C bus demultiplexer that uses pin multiplexing to
+  route the I2C signals, and represents the pin multiplexing configuration
+  using the pinctrl device tree bindings.  This may be used to select one I2C
+  IP core at runtime which may have a better feature set for a given task than
+  another I2C IP core on the SoC.  The most simple example is to fall back to
+  GPIO bitbanging if your current runtime configuration hits an errata of the
+  internal IP core.
+
+      +-------------------------------+
+      | SoC                           |
+      |                               |   +-----+  +-----+
+      |   +------------+              |   | dev |  | dev |
+      |   |I2C IP Core1|--\           |   +-----+  +-----+
+      |   +------------+   \-------+  |      |        |
+      |                    |Pinctrl|--|------+--------+
+      |   +------------+   +-------+  |
+      |   |I2C IP Core2|--/           |
+      |   +------------+              |
+      |                               |
+      +-------------------------------+
+
+allOf:
+  - $ref: i2c-mux.yaml
+  - $ref: /schemas/i2c/i2c-controller.yaml#
+
+properties:
+  compatible:
+    const: i2c-demux-pinctrl
+
+  i2c-parent:
+    $ref: /schemas/types.yaml#/definitions/phandle-array
+    description:
+      List of phandles of I2C masters available for selection.  The first one
+      will be used as default.
+
+  i2c-bus-name:
+    $ref: /schemas/types.yaml#/definitions/string
+    description:
+      The name of this bus.  Also needed as pinctrl-name for the I2C parents.
+
+required:
+  - compatible
+  - i2c-parent
+  - i2c-bus-name
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/interrupt-controller/irq.h>
+
+    gpioi2c2: i2c-9 {
+        #address-cells = <1>;
+        #size-cells = <0>;
+        compatible = "i2c-gpio";
+        scl-gpios = <&gpio5 5 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+        sda-gpios = <&gpio5 6 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
+        i2c-gpio,delay-us = <5>;
+
+        // The I2C controller must have its status "disabled".  The I2C bus
+        // demultiplexer will enable it at runtime when needed.
+        status = "disabled";
+    };
+
+    iic2: i2c@e6520000 {
+        reg = <0xe6520000 0x425>;
+        pinctrl-0 = <&iic2_pins>;
+        // The pinctrl property for the parent I2C controller needs a pinctrl
+        // state with the same name as i2c-bus-name in the I2C bus demultiplexer
+        // node, not "default"!
+        pinctrl-names = "i2c-hdmi";
+
+        clock-frequency = <100000>;
+
+        // The I2C controller must have its status "disabled".  The I2C bus
+        // demultiplexer will enable it at runtime when needed.
+        status = "disabled";
+    };
+
+    i2c2: i2c@e6530000 {
+        reg = <0 0xe6530000 0 0x40>;
+        pinctrl-0 = <&i2c2_pins>;
+        // The pinctrl property for the parent I2C controller needs a pinctrl
+        // state with the same name as i2c-bus-name in the I2C bus demultiplexer
+        // node, not "default"!
+        pinctrl-names = "i2c-hdmi";
+
+        clock-frequency = <100000>;
+
+        // The I2C controller must have its status "disabled".  The I2C bus
+        // demultiplexer will enable it at runtime when needed.
+        status = "disabled";
+    };
+
+    // Example for a bus to be demuxed.  It contains various I2C clients for
+    // HDMI, so the bus is named "i2c-hdmi":
+    i2chdmi: i2c-mux3 {
+            compatible = "i2c-demux-pinctrl";
+            i2c-parent = <&iic2>, <&i2c2>, <&gpioi2c2>;
+            i2c-bus-name = "i2c-hdmi";
+            #address-cells = <1>;
+            #size-cells = <0>;
+
+            ak4643: codec@12 {
+                    compatible = "asahi-kasei,ak4643";
+                    #sound-dai-cells = <0>;
+                    reg = <0x12>;
+            };
+
+            composite-in@20 {
+                    compatible = "adi,adv7180";
+                    reg = <0x20>;
+
+                    port {
+                            adv7180: endpoint {
+                                    bus-width = <8>;
+                                    remote-endpoint = <&vin1ep0>;
+                            };
+                    };
+            };
+
+            hdmi@39 {
+                    compatible = "adi,adv7511w";
+                    reg = <0x39>;
+                    interrupt-parent = <&gpio1>;
+                    interrupts = <15 IRQ_TYPE_LEVEL_LOW>;
+                    clocks = <&cec_clock>;
+                    clock-names = "cec";
+
+                    avdd-supply = <&fixedregulator1v8>;
+                    dvdd-supply = <&fixedregulator1v8>;
+                    pvdd-supply = <&fixedregulator1v8>;
+                    dvdd-3v-supply = <&fixedregulator3v3>;
+                    bgvdd-supply = <&fixedregulator1v8>;
+
+                    adi,input-depth = <8>;
+                    adi,input-colorspace = "rgb";
+                    adi,input-clock = "1x";
+
+                    ports {
+                            #address-cells = <1>;
+                            #size-cells = <0>;
+
+                            port@0 {
+                                    reg = <0>;
+                                    adv7511_in: endpoint {
+                                            remote-endpoint = <&lvds0_out>;
+                                    };
+                            };
+
+                            port@1 {
+                                    reg = <1>;
+                                    adv7511_out: endpoint {
+                                            remote-endpoint = <&hdmi_con_out>;
+                                    };
+                            };
+                    };
+            };
+    };
index 042d4dc636eed0300d15f05bc7cbc74bd8341337..8386cfe21532e5bba9984fac2b7de37c277aa275 100644 (file)
@@ -25,6 +25,7 @@ properties:
 
       - items:
           - enum:
+              - qcom,sc7280-cci
               - qcom,sdm845-cci
               - qcom,sm6350-cci
               - qcom,sm8250-cci
@@ -159,6 +160,7 @@ allOf:
         compatible:
           contains:
             enum:
+              - qcom,sc7280-cci
               - qcom,sm8250-cci
               - qcom,sm8450-cci
     then:
index d9483fbd24543925b0d1eeec4809bbc6de94fc7b..c816e295d5651f372c4884751c9954a953d8838a 100644 (file)
@@ -125,12 +125,12 @@ patternProperties:
                 minimum: 0
                 maximum: 0x7f
               - description: |
-                  First half of the Provisional ID (following the PID
+                  First half of the Provisioned ID (following the PID
                   definition provided by the I3C specification).
 
                   Contains the manufacturer ID left-shifted by 1.
               - description: |
-                  Second half of the Provisional ID (following the PID
+                  Second half of the Provisioned ID (following the PID
                   definition provided by the I3C specification).
 
                   Contains the ORing of the part ID left-shifted by 16,
index e5a3c355ee1f05a62faa181b45f94146d0d26c98..29921aab9d9713c475654fd1e1e1ebe26a5e08d1 100644 (file)
@@ -24,6 +24,8 @@ properties:
   linux,keycodes:
     maxItems: 1
 
+  wakeup-source: true
+
 required:
   - compatible
   - linux,keycodes
index 4080422a9eb5c2fe26116217e346480323a784ee..037e5d3c447f8a0d0c4585d2a9f2a5a3805ceca6 100644 (file)
@@ -34,6 +34,9 @@ properties:
   vdd-supply:
     description: Regulator for voltage.
 
+  vddio-supply:
+    description: Optional Regulator for I/O voltage.
+
   reset-gpios:
     maxItems: 1
 
index b1b2cf81b42fc1cf2db849fabceb159e368c7ce5..aa9e1c0895a508a2e6eed5831a73e92de06df9d4 100644 (file)
@@ -110,6 +110,7 @@ properties:
               - qcom,sdm630-smmu-v2
               - qcom,sdm845-smmu-v2
               - qcom,sm6350-smmu-v2
+              - qcom,sm7150-smmu-v2
           - const: qcom,adreno-smmu
           - const: qcom,smmu-v2
       - description: Qcom Adreno GPUs on Google Cheza platform
@@ -409,6 +410,7 @@ allOf:
           contains:
             enum:
               - qcom,sm6350-smmu-v2
+              - qcom,sm7150-smmu-v2
               - qcom,sm8150-smmu-500
               - qcom,sm8250-smmu-500
     then:
index f2a6fa140f38c47f38f4ba6203b724d36a1235fd..7526e3149f728de29b100323969b7167b3deacee 100644 (file)
@@ -15,7 +15,10 @@ description:
 
 properties:
   compatible:
-    const: pwm-ir-tx
+    oneOf:
+      - const: pwm-ir-tx
+      - const: nokia,n900-ir
+        deprecated: true
 
   pwms:
     maxItems: 1
index 191c1ce150095e7860e2c0fc43a4d185482b54fb..12e7a7d536a38f87090f1bc6a85e50b1e3df0e7d 100644 (file)
@@ -72,9 +72,9 @@ properties:
       type      : Channel type
       channel   : Channel number
 
-      This MU support 5 type of unidirectional channels, each type
+      This MU support 6 type of unidirectional channels, each type
       has 4 channels except RST channel which only has 1 channel.
-      A total of 17 channels.  Following types are
+      A total of 21 channels.  Following types are
       supported:
       0 - TX channel with 32bit transmit register and IRQ transmit
           acknowledgment support.
@@ -82,6 +82,7 @@ properties:
       2 - TX doorbell channel. Without own register and no ACK support.
       3 - RX doorbell channel.
       4 - RST channel
+      5 - Tx doorbell channel. With S/W ACK from the other side.
     const: 2
 
   clocks:
index d2e25ff6db7f66af7b509afb75d510ce0bff1980..a38413f8d1321b0e1a8f4cade8341d011048a003 100644 (file)
@@ -125,10 +125,12 @@ allOf:
           items:
             - description: primary pll parent of the clock driver
             - description: XO clock
+            - description: GCC GPLL0 clock source
         clock-names:
           items:
             - const: pll
             - const: xo
+            - const: gpll0
 
   - if:
       properties:
index cc6f66eccc8456b7f986465c9ed2b1bb39848417..a35f9483dc716ee4fe45f2f96b59103c974387aa 100644 (file)
@@ -34,6 +34,7 @@ properties:
           - qcom,sm8350-ipcc
           - qcom,sm8450-ipcc
           - qcom,sm8550-ipcc
+          - qcom,sm8650-ipcc
       - const: qcom,ipcc
 
   reg:
index aeaddbf574b0b1207a1caadfa8dd46c134cb8350..8b15a0532120f7bbd9610df84397d7d9fc14059e 100644 (file)
@@ -74,6 +74,10 @@ patternProperties:
     type: object  # DT nodes are json objects
     additionalProperties: false
     properties:
+
+      compatible:
+        const: xlnx,zynqmp-ipi-dest-mailbox
+
       xlnx,ipi-id:
         description:
           Remote Xilinx IPI agent ID of which the mailbox is connected to.
@@ -95,6 +99,7 @@ patternProperties:
           - const: remote_response_region
 
     required:
+      - compatible
       - reg
       - reg-names
       - "#mbox-cells"
@@ -124,6 +129,7 @@ examples:
         ranges;
 
         mailbox: mailbox@ff9905c0 {
+          compatible = "xlnx,zynqmp-ipi-dest-mailbox";
           reg = <0x0 0xff9905c0 0x0 0x20>,
                 <0x0 0xff9905e0 0x0 0x20>,
                 <0x0 0xff990e80 0x0 0x20>,
index 3f9fa92703bbbfcb9cac985e711d58f30d433c37..0f95fe8dd9ac78455a3182e3c7860a029620912d 100644 (file)
@@ -19,6 +19,7 @@ properties:
           - amlogic,meson6-ir
           - amlogic,meson8b-ir
           - amlogic,meson-gxbb-ir
+          - amlogic,meson-s4-ir
       - items:
           - const: amlogic,meson-gx-ir
           - const: amlogic,meson-gxbb-ir
index 30a335b10762170e6ae1cc98866fd3ca275127d7..2008a47c0580632e698a361c01e8e1a974ef9f2e 100644 (file)
@@ -18,6 +18,7 @@ properties:
     items:
       - enum:
           - starfive,jh7110-csi2rx
+          - ti,j721e-csi2rx
       - const: cdns,csi2rx
 
   reg:
index 1e2df8cf2937b84233a727582aa67b49a1920cdd..60f19e1152b33128cf3baa15b8c70a874ca6d52e 100644 (file)
@@ -14,6 +14,9 @@ description: |-
   interface and CCI (I2C compatible) control bus. The output format
   is raw Bayer.
 
+allOf:
+  - $ref: /schemas/media/video-interface-devices.yaml#
+
 properties:
   compatible:
     const: hynix,hi846
@@ -86,7 +89,7 @@ required:
   - vddd-supply
   - port
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
@@ -109,6 +112,8 @@ examples:
             vddio-supply = <&reg_camera_vddio>;
             reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>;
             shutdown-gpios = <&gpio5 4 GPIO_ACTIVE_LOW>;
+            orientation = <0>;
+            rotation = <0>;
 
             port {
                 camera_out: endpoint {
diff --git a/Documentation/devicetree/bindings/media/i2c/onnn,mt9m114.yaml b/Documentation/devicetree/bindings/media/i2c/onnn,mt9m114.yaml
new file mode 100644 (file)
index 0000000..f6b8789
--- /dev/null
@@ -0,0 +1,114 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/i2c/onnn,mt9m114.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: onsemi 1/6-inch 720p CMOS Digital Image Sensor
+
+maintainers:
+  - Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+
+description: |-
+  The onsemi MT9M114 is a 1/6-inch 720p (1.26 Mp) CMOS digital image sensor
+  with an active pixel-array size of 1296H x 976V. It is programmable through
+  an I2C interface and outputs image data over a 8-bit parallel or 1-lane MIPI
+  CSI-2 connection.
+
+properties:
+  compatible:
+    const: onnn,mt9m114
+
+  reg:
+    description: I2C device address
+    enum:
+      - 0x48
+      - 0x5d
+
+  clocks:
+    description: EXTCLK clock signal
+    maxItems: 1
+
+  vdd-supply:
+    description:
+      Core digital voltage supply, 1.8V
+
+  vddio-supply:
+    description:
+      I/O digital voltage supply, 1.8V or 2.8V
+
+  vaa-supply:
+    description:
+      Analog voltage supply, 2.8V
+
+  reset-gpios:
+    description: |-
+      Reference to the GPIO connected to the RESET_BAR pin, if any (active
+      low).
+
+  port:
+    $ref: /schemas/graph.yaml#/$defs/port-base
+    additionalProperties: false
+
+    properties:
+      endpoint:
+        $ref: /schemas/media/video-interfaces.yaml#
+        additionalProperties: false
+
+        properties:
+          bus-type:
+            enum: [4, 5, 6]
+
+          link-frequencies: true
+          remote-endpoint: true
+
+          # The number and mapping of lanes (for CSI-2), and the bus width and
+          # signal polarities (for parallel and BT.656) are fixed and must not
+          # be specified.
+
+        required:
+          - bus-type
+          - link-frequencies
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - vdd-supply
+  - vddio-supply
+  - vaa-supply
+  - port
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/gpio/gpio.h>
+    #include <dt-bindings/media/video-interfaces.h>
+
+    i2c0 {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        sensor@48 {
+            compatible = "onnn,mt9m114";
+            reg = <0x48>;
+
+            clocks = <&clk24m 0>;
+
+            reset-gpios = <&gpio5 21 GPIO_ACTIVE_LOW>;
+
+            vddio-supply = <&reg_cam_1v8>;
+            vdd-supply = <&reg_cam_1v8>;
+            vaa-supply = <&reg_2p8v>;
+
+            port {
+                endpoint {
+                    bus-type = <MEDIA_BUS_TYPE_CSI2_DPHY>;
+                    link-frequencies = /bits/ 64 <384000000>;
+                    remote-endpoint = <&mipi_csi_in>;
+                };
+            };
+        };
+    };
+...
index 763cebe03dc20d772f61710421d9eed212788ea9..67c1c291327b7febb6a039bf6f28c8dc1f32ed7f 100644 (file)
@@ -68,12 +68,6 @@ properties:
       marked GPIO_ACTIVE_LOW.
     maxItems: 1
 
-  rotation:
-    enum:
-      - 0    # Sensor Mounted Upright
-      - 180  # Sensor Mounted Upside Down
-    default: 0
-
   port:
     $ref: /schemas/graph.yaml#/$defs/port-base
     additionalProperties: false
@@ -114,7 +108,7 @@ required:
   - reset-gpios
   - port
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 50579c947f3c2308245fdbdd00bf966679513f23..d96199031b66c5c162a034824f195e277f2a1795 100644 (file)
@@ -52,10 +52,6 @@ properties:
     description:
       GPIO connected to the reset pin (active low)
 
-  orientation: true
-
-  rotation: true
-
   port:
     $ref: /schemas/graph.yaml#/$defs/port-base
     additionalProperties: false
@@ -95,7 +91,7 @@ required:
   - dvdd-supply
   - port
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index a621032f9bd078cc06dad6014f10fa96da0f9f1b..2c5e69356658ec67364c52cceebc56ebd1760e74 100644 (file)
@@ -44,11 +44,6 @@ properties:
     description: >
       Reference to the GPIO connected to the reset pin, if any.
 
-  rotation:
-    enum:
-      - 0
-      - 180
-
   port:
     description: Digital Output Port
     $ref: /schemas/graph.yaml#/$defs/port-base
@@ -85,7 +80,7 @@ required:
   - DOVDD-supply
   - port
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
diff --git a/Documentation/devicetree/bindings/media/i2c/ovti,ov5642.yaml b/Documentation/devicetree/bindings/media/i2c/ovti,ov5642.yaml
new file mode 100644 (file)
index 0000000..01f8b2b
--- /dev/null
@@ -0,0 +1,141 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/i2c/ovti,ov5642.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: OmniVision OV5642 Image Sensor
+
+maintainers:
+  - Fabio Estevam <festevam@gmail.com>
+
+allOf:
+  - $ref: /schemas/media/video-interface-devices.yaml#
+
+properties:
+  compatible:
+    const: ovti,ov5642
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    description: XCLK Input Clock
+
+  AVDD-supply:
+    description: Analog voltage supply, 2.8V.
+
+  DVDD-supply:
+    description: Digital core voltage supply, 1.5V.
+
+  DOVDD-supply:
+    description: Digital I/O voltage supply, 1.8V.
+
+  powerdown-gpios:
+    maxItems: 1
+    description: Reference to the GPIO connected to the powerdown pin, if any.
+
+  reset-gpios:
+    maxItems: 1
+    description: Reference to the GPIO connected to the reset pin, if any.
+
+  port:
+    $ref: /schemas/graph.yaml#/$defs/port-base
+    description: |
+      Video output port.
+
+    properties:
+      endpoint:
+        $ref: /schemas/media/video-interfaces.yaml#
+        unevaluatedProperties: false
+
+        properties:
+          bus-type:
+            enum: [5, 6]
+
+          bus-width:
+            enum: [8, 10]
+            default: 10
+
+          data-shift:
+            enum: [0, 2]
+            default: 0
+
+          hsync-active:
+            enum: [0, 1]
+            default: 1
+
+          vsync-active:
+            enum: [0, 1]
+            default: 1
+
+          pclk-sample:
+            enum: [0, 1]
+            default: 1
+
+        allOf:
+          - if:
+              properties:
+                bus-type:
+                  const: 6
+            then:
+              properties:
+                hsync-active: false
+                vsync-active: false
+
+          - if:
+              properties:
+                bus-width:
+                  const: 10
+            then:
+              properties:
+                data-shift:
+                  const: 0
+
+        required:
+          - bus-type
+
+    additionalProperties: false
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - port
+
+additionalProperties: false
+
+examples:
+  - |
+      #include <dt-bindings/gpio/gpio.h>
+      #include <dt-bindings/media/video-interfaces.h>
+
+      i2c {
+          #address-cells = <1>;
+          #size-cells = <0>;
+
+          camera@3c {
+              compatible = "ovti,ov5642";
+              reg = <0x3c>;
+              pinctrl-names = "default";
+              pinctrl-0 = <&pinctrl_ov5642>;
+              clocks = <&clk_ext_camera>;
+              DOVDD-supply = <&vgen4_reg>;
+              AVDD-supply = <&vgen3_reg>;
+              DVDD-supply = <&vgen2_reg>;
+              powerdown-gpios = <&gpio1 19 GPIO_ACTIVE_HIGH>;
+              reset-gpios = <&gpio1 20 GPIO_ACTIVE_LOW>;
+
+              port {
+                  ov5642_to_parallel: endpoint {
+                      bus-type = <MEDIA_BUS_TYPE_PARALLEL>;
+                      remote-endpoint = <&parallel_from_ov5642>;
+                      bus-width = <8>;
+                      data-shift = <2>; /* lines 9:2 are used */
+                      hsync-active = <0>;
+                      vsync-active = <0>;
+                      pclk-sample = <1>;
+                  };
+              };
+          };
+      };
index 6829a4aadd22e31bea67a8f61ebb5c2fe8278745..3368b3bd8ef2f0b648b239a8ec7d948d4d795bd6 100644 (file)
@@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Omnivision OV5693/OV5695 CMOS Sensors
 
 maintainers:
-  - Tommaso Merciai <tommaso.merciai@amarulasolutions.com>
+  - Tommaso Merciai <tomm.merciai@gmail.com>
 
 description: |
   The Omnivision OV5693/OV5695 are high performance, 1/4-inch, 5 megapixel, CMOS
index e2470dd5920c7960a5d591b5a96f3ab273d09790..60903da84e1f3281e9faeaf43c057a07462cc277 100644 (file)
@@ -91,7 +91,7 @@ required:
   - vddd-supply
   - port
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
index 642f9b15d3597738e33fd7e5a51636e69d5fe8ae..9a00dab2e8a3f008857a9dcc493f103e497fbe7e 100644 (file)
@@ -44,14 +44,6 @@ properties:
     description: Sensor reset (XCLR) GPIO
     maxItems: 1
 
-  flash-leds: true
-
-  lens-focus: true
-
-  orientation: true
-
-  rotation: true
-
   port:
     $ref: /schemas/graph.yaml#/$defs/port-base
     unevaluatedProperties: false
@@ -89,7 +81,7 @@ required:
   - ovdd-supply
   - port
 
-additionalProperties: false
+unevaluatedProperties: false
 
 examples:
   - |
diff --git a/Documentation/devicetree/bindings/media/nokia,n900-ir b/Documentation/devicetree/bindings/media/nokia,n900-ir
deleted file mode 100644 (file)
index 13a18ce..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-Device-Tree bindings for LIRC TX driver for Nokia N900(RX51)
-
-Required properties:
-       - compatible: should be "nokia,n900-ir".
-       - pwms: specifies PWM used for IR signal transmission.
-
-Example node:
-
-       pwm9: dmtimer-pwm@9 {
-               compatible = "ti,omap-dmtimer-pwm";
-               ti,timers = <&timer9>;
-               ti,clock-source = <0x00>; /* timer_sys_ck */
-               #pwm-cells = <3>;
-       };
-
-       ir: n900-ir {
-               compatible = "nokia,n900-ir";
-
-               pwms = <&pwm9 0 26316 0>; /* 38000 Hz */
-       };
diff --git a/Documentation/devicetree/bindings/media/nuvoton,npcm-ece.yaml b/Documentation/devicetree/bindings/media/nuvoton,npcm-ece.yaml
new file mode 100644 (file)
index 0000000..b47468e
--- /dev/null
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/nuvoton,npcm-ece.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Nuvoton NPCM Encoding Compression Engine
+
+maintainers:
+  - Joseph Liu <kwliu@nuvoton.com>
+  - Marvin Lin <kflin@nuvoton.com>
+
+description: |
+  Video Encoding Compression Engine (ECE) present on Nuvoton NPCM SoCs.
+
+properties:
+  compatible:
+    enum:
+      - nuvoton,npcm750-ece
+      - nuvoton,npcm845-ece
+
+  reg:
+    maxItems: 1
+
+  resets:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - resets
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/reset/nuvoton,npcm7xx-reset.h>
+
+    ece: video-codec@f0820000 {
+        compatible = "nuvoton,npcm750-ece";
+        reg = <0xf0820000 0x2000>;
+        resets = <&rstc NPCM7XX_RESET_IPSRST2 NPCM7XX_RESET_ECE>;
+    };
diff --git a/Documentation/devicetree/bindings/media/nuvoton,npcm-vcd.yaml b/Documentation/devicetree/bindings/media/nuvoton,npcm-vcd.yaml
new file mode 100644 (file)
index 0000000..c885f55
--- /dev/null
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/nuvoton,npcm-vcd.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Nuvoton NPCM Video Capture/Differentiation Engine
+
+maintainers:
+  - Joseph Liu <kwliu@nuvoton.com>
+  - Marvin Lin <kflin@nuvoton.com>
+
+description: |
+  Video Capture/Differentiation Engine (VCD) present on Nuvoton NPCM SoCs.
+
+properties:
+  compatible:
+    enum:
+      - nuvoton,npcm750-vcd
+      - nuvoton,npcm845-vcd
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  resets:
+    maxItems: 1
+
+  nuvoton,sysgcr:
+    $ref: /schemas/types.yaml#/definitions/phandle
+    description: phandle to access GCR (Global Control Register) registers.
+
+  nuvoton,sysgfxi:
+    $ref: /schemas/types.yaml#/definitions/phandle
+    description: phandle to access GFXI (Graphics Core Information) registers.
+
+  nuvoton,ece:
+    $ref: /schemas/types.yaml#/definitions/phandle
+    description: phandle to access ECE (Encoding Compression Engine) registers.
+
+  memory-region:
+    maxItems: 1
+    description:
+      CMA pool to use for buffers allocation instead of the default CMA pool.
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - resets
+  - nuvoton,sysgcr
+  - nuvoton,sysgfxi
+  - nuvoton,ece
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/reset/nuvoton,npcm7xx-reset.h>
+
+    vcd: vcd@f0810000 {
+        compatible = "nuvoton,npcm750-vcd";
+        reg = <0xf0810000 0x10000>;
+        interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+        resets = <&rstc NPCM7XX_RESET_IPSRST2 NPCM7XX_RESET_VCD>;
+        nuvoton,sysgcr = <&gcr>;
+        nuvoton,sysgfxi = <&gfxi>;
+        nuvoton,ece = <&ece>;
+    };
index d5f80976f4cfec7236bd329aba80b10c80cd0b04..6228fd2b324631f3138e128c918266da58f6b544 100644 (file)
@@ -48,6 +48,14 @@ properties:
   iommus:
     maxItems: 2
 
+  interconnects:
+    maxItems: 2
+
+  interconnect-names:
+    items:
+      - const: video-mem
+      - const: cpu-cfg
+
   operating-points-v2: true
   opp-table:
     type: object
index 772ec3283bc6ce29dd77aa0bcff5dc5c8ed4cf32..c57e1f488895b3b99a6c2596f81c0a3c49c6c1cf 100644 (file)
@@ -68,6 +68,13 @@ properties:
   iommus:
     maxItems: 1
 
+  resets:
+    items:
+      - description: AXI reset line
+      - description: AXI bus interface unit reset line
+      - description: APB reset line
+      - description: APB bus interface unit reset line
+
 required:
   - compatible
   - reg
index 3691cd4962b269f396023347a115845353f1d93f..3a5ff3f470603b402721a455039a5175bcdd8b6f 100644 (file)
@@ -75,13 +75,20 @@ properties:
   power-domains:
     maxItems: 1
 
+  samsung,pmu-syscon:
+    $ref: /schemas/types.yaml#/definitions/phandle
+    description:
+      Power Management Unit (PMU) system controller interface, used to
+      power/start the ISP.
+
 patternProperties:
   "^pmu@[0-9a-f]+$":
     type: object
     additionalProperties: false
+    deprecated: true
     description:
       Node representing the SoC's Power Management Unit (duplicated with the
-      correct PMU node in the SoC).
+      correct PMU node in the SoC). Deprecated, use samsung,pmu-syscon.
 
     properties:
       reg:
@@ -131,6 +138,7 @@ required:
   - clock-names
   - interrupts
   - ranges
+  - samsung,pmu-syscon
   - '#size-cells'
 
 additionalProperties: false
@@ -179,15 +187,12 @@ examples:
                  <&sysmmu_fimc_fd>, <&sysmmu_fimc_mcuctl>;
         iommu-names = "isp", "drc", "fd", "mcuctl";
         power-domains = <&pd_isp>;
+        samsung,pmu-syscon = <&pmu_system_controller>;
 
         #address-cells = <1>;
         #size-cells = <1>;
         ranges;
 
-        pmu@10020000 {
-            reg = <0x10020000 0x3000>;
-        };
-
         i2c-isp@12140000 {
             compatible = "samsung,exynos4212-i2c-isp";
             reg = <0x12140000 0x100>;
index b3486c38a05b9ef7cbbcd485fed90f9fd0741670..7808d61f1fa380c7419718d418616bc7d8a728b2 100644 (file)
@@ -118,7 +118,7 @@ examples:
         #clock-cells = <1>;
         #address-cells = <1>;
         #size-cells = <1>;
-        ranges = <0x0 0x0 0x18000000>;
+        ranges = <0x0 0x0 0xba1000>;
 
         clocks = <&clock CLK_SCLK_CAM0>, <&clock CLK_SCLK_CAM1>,
                  <&clock CLK_PIXELASYNCM0>, <&clock CLK_PIXELASYNCM1>;
@@ -133,9 +133,9 @@ examples:
         pinctrl-0 = <&cam_port_a_clk_active &cam_port_b_clk_active>;
         pinctrl-names = "default";
 
-        fimc@11800000 {
+        fimc@0 {
             compatible = "samsung,exynos4212-fimc";
-            reg = <0x11800000 0x1000>;
+            reg = <0x00000000 0x1000>;
             interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
             clocks = <&clock CLK_FIMC0>,
                      <&clock CLK_SCLK_FIMC0>;
@@ -152,9 +152,9 @@ examples:
 
         /* ... FIMC 1-3 */
 
-        csis@11880000 {
+        csis@80000 {
             compatible = "samsung,exynos4210-csis";
-            reg = <0x11880000 0x4000>;
+            reg = <0x00080000 0x4000>;
             interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
             clocks = <&clock CLK_CSIS0>,
                      <&clock CLK_SCLK_CSIS0>;
@@ -187,9 +187,9 @@ examples:
 
         /* ... CSIS 1 */
 
-        fimc-lite@12390000 {
+        fimc-lite@b90000 {
               compatible = "samsung,exynos4212-fimc-lite";
-              reg = <0x12390000 0x1000>;
+              reg = <0xb90000 0x1000>;
               interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
               power-domains = <&pd_isp>;
               clocks = <&isp_clock CLK_ISP_FIMC_LITE0>;
@@ -199,9 +199,9 @@ examples:
 
         /* ... FIMC-LITE 1 */
 
-        fimc-is@12000000 {
+        fimc-is@800000 {
             compatible = "samsung,exynos4212-fimc-is";
-            reg = <0x12000000 0x260000>;
+            reg = <0x00800000 0x260000>;
             interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>,
                          <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
             clocks = <&isp_clock CLK_ISP_FIMC_LITE0>,
@@ -237,18 +237,15 @@ examples:
                      <&sysmmu_fimc_fd>, <&sysmmu_fimc_mcuctl>;
             iommu-names = "isp", "drc", "fd", "mcuctl";
             power-domains = <&pd_isp>;
+            samsung,pmu-syscon = <&pmu_system_controller>;
 
             #address-cells = <1>;
             #size-cells = <1>;
             ranges;
 
-            pmu@10020000 {
-                reg = <0x10020000 0x3000>;
-            };
-
-            i2c-isp@12140000 {
+            i2c-isp@940000 {
                 compatible = "samsung,exynos4212-i2c-isp";
-                reg = <0x12140000 0x100>;
+                reg = <0x00940000 0x100>;
                 clocks = <&isp_clock CLK_ISP_I2C1_ISP>;
                 clock-names = "i2c_isp";
                 pinctrl-0 = <&fimc_is_i2c1>;
diff --git a/Documentation/devicetree/bindings/media/ti,j721e-csi2rx-shim.yaml b/Documentation/devicetree/bindings/media/ti,j721e-csi2rx-shim.yaml
new file mode 100644 (file)
index 0000000..f762fdc
--- /dev/null
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/ti,j721e-csi2rx-shim.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TI J721E CSI2RX Shim
+
+description: |
+  The TI J721E CSI2RX Shim is a wrapper around Cadence CSI2RX bridge that
+  enables sending captured frames to memory over PSI-L DMA. In the J721E
+  Technical Reference Manual (SPRUIL1B) it is referred to as "SHIM" under the
+  CSI_RX_IF section.
+
+maintainers:
+  - Jai Luthra <j-luthra@ti.com>
+
+properties:
+  compatible:
+    const: ti,j721e-csi2rx-shim
+
+  dmas:
+    maxItems: 1
+
+  dma-names:
+    items:
+      - const: rx0
+
+  reg:
+    maxItems: 1
+
+  power-domains:
+    maxItems: 1
+
+  ranges: true
+
+  "#address-cells": true
+
+  "#size-cells": true
+
+patternProperties:
+  "^csi-bridge@":
+    type: object
+    description: CSI2 bridge node.
+    $ref: cdns,csi2rx.yaml#
+
+required:
+  - compatible
+  - reg
+  - dmas
+  - dma-names
+  - power-domains
+  - ranges
+  - "#address-cells"
+  - "#size-cells"
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/soc/ti,sci_pm_domain.h>
+
+    ti_csi2rx0: ticsi2rx@4500000 {
+        compatible = "ti,j721e-csi2rx-shim";
+        dmas = <&main_udmap 0x4940>;
+        dma-names = "rx0";
+        reg = <0x4500000 0x1000>;
+        power-domains = <&k3_pds 26 TI_SCI_PD_EXCLUSIVE>;
+        #address-cells = <1>;
+        #size-cells = <1>;
+        ranges;
+
+        cdns_csi2rx: csi-bridge@4504000 {
+            compatible = "ti,j721e-csi2rx", "cdns,csi2rx";
+            reg = <0x4504000 0x1000>;
+            clocks = <&k3_clks 26 2>, <&k3_clks 26 0>, <&k3_clks 26 2>,
+              <&k3_clks 26 2>, <&k3_clks 26 3>, <&k3_clks 26 3>;
+            clock-names = "sys_clk", "p_clk", "pixel_if0_clk",
+              "pixel_if1_clk", "pixel_if2_clk", "pixel_if3_clk";
+            phys = <&dphy0>;
+            phy-names = "dphy";
+
+            ports {
+                #address-cells = <1>;
+                #size-cells = <0>;
+
+                csi2_0: port@0 {
+
+                    reg = <0>;
+
+                    csi2rx0_in_sensor: endpoint {
+                        remote-endpoint = <&csi2_cam0>;
+                        bus-type = <4>; /* CSI2 DPHY. */
+                        clock-lanes = <0>;
+                        data-lanes = <1 2>;
+                    };
+                };
+            };
+        };
+    };
index a211d49dc2ac348f59eefca821838cd5489b00e4..26e3e7d7c67baba30a81feee5cdd343e8aa190d4 100644 (file)
@@ -160,6 +160,7 @@ properties:
     $ref: /schemas/types.yaml#/definitions/uint32-array
     minItems: 1
     maxItems: 8
+    uniqueItems: true
     items:
       # Assume up to 9 physical lane indices
       maximum: 8
index 331e564f29dce9f0d6dc94e66151b204452ac0c1..058253d6d889c2c421a354418aac6d51c27a048f 100644 (file)
@@ -29,6 +29,24 @@ properties:
 
   "#size-cells": true
 
+  compression:
+    $ref: /schemas/types.yaml#/definitions/string
+    description: |
+      Compression algorithm used to store the data in this partition, chosen
+      from a list of well-known algorithms.
+
+      The contents are compressed using this algorithm.
+
+    enum:
+      - none
+      - bzip2
+      - gzip
+      - lzop
+      - lz4
+      - lzma
+      - xz
+      - zstd
+
 patternProperties:
   "@[0-9a-f]+$":
     $ref: partition.yaml#
@@ -64,6 +82,7 @@ examples:
 
         uimage@100000 {
             reg = <0x0100000 0x200000>;
+            compress = "lzma";
         };
     };
 
index 6ffbed204c259def8204c7c7a7a837c99190b3b6..8f50e23ca8c9b3a3ead1bf9e1d554b8a4277b33e 100644 (file)
@@ -15,12 +15,19 @@ allOf:
 
 properties:
   compatible:
-    enum:
-      - fsl,imx23-pwm
+    oneOf:
+      - const: fsl,imx23-pwm
+      - items:
+          - enum:
+              - fsl,imx28-pwm
+          - const: fsl,imx23-pwm
 
   reg:
     maxItems: 1
 
+  clocks:
+    maxItems: 1
+
   "#pwm-cells":
     const: 3
 
@@ -31,6 +38,7 @@ properties:
 required:
   - compatible
   - reg
+  - clocks
   - fsl,pwm-number
 
 additionalProperties: false
@@ -40,6 +48,7 @@ examples:
     pwm@80064000 {
         compatible = "fsl,imx23-pwm";
         reg = <0x80064000 0x2000>;
+        clocks = <&clks 30>;
         #pwm-cells = <3>;
         fsl,pwm-number = <8>;
     };
index 895415772d1d6b9872f5bcba62f8376c72953719..09102dda4942c14121191c455e8edb6b3a6e3d42 100644 (file)
@@ -21,6 +21,7 @@ properties:
       - mediatek,mt8188-scp
       - mediatek,mt8192-scp
       - mediatek,mt8195-scp
+      - mediatek,mt8195-scp-dual
 
   reg:
     description:
@@ -31,10 +32,7 @@ properties:
 
   reg-names:
     minItems: 2
-    items:
-      - const: sram
-      - const: cfg
-      - const: l1tcm
+    maxItems: 3
 
   clocks:
     description:
@@ -58,6 +56,93 @@ properties:
   memory-region:
     maxItems: 1
 
+  cros-ec-rpmsg:
+    $ref: /schemas/mfd/google,cros-ec.yaml
+    description:
+      This subnode represents the rpmsg device. The properties
+      of this node are defined by the individual bindings for
+      the rpmsg devices.
+
+    required:
+      - mediatek,rpmsg-name
+
+    unevaluatedProperties: false
+
+  '#address-cells':
+    const: 1
+
+  '#size-cells':
+    const: 1
+
+  ranges:
+    description:
+      Standard ranges definition providing address translations for
+      local SCP SRAM address spaces to bus addresses.
+
+patternProperties:
+  "^scp@[a-f0-9]+$":
+    type: object
+    description:
+      The MediaTek SCP integrated to SoC might be a multi-core version.
+      The other cores are represented as child nodes of the boot core.
+      There are some integration differences for the IP like the usage of
+      address translator for translating SoC bus addresses into address space
+      for the processor.
+
+      Each SCP core has own cache memory. The SRAM and L1TCM are shared by
+      cores. The power of cache, SRAM and L1TCM power should be enabled
+      before booting SCP cores. The size of cache, SRAM, and L1TCM are varied
+      on differnt SoCs.
+
+      The SCP cores do not use an MMU, but has a set of registers to
+      control the translations between 32-bit CPU addresses into system bus
+      addresses. Cache and memory access settings are provided through a
+      Memory Protection Unit (MPU), programmable only from the SCP.
+
+    properties:
+      compatible:
+        enum:
+          - mediatek,scp-core
+
+      reg:
+        description: The base address and size of SRAM.
+        maxItems: 1
+
+      reg-names:
+        const: sram
+
+      interrupts:
+        maxItems: 1
+
+      firmware-name:
+        $ref: /schemas/types.yaml#/definitions/string
+        description:
+          If present, name (or relative path) of the file within the
+          firmware search path containing the firmware image used when
+          initializing sub cores of multi-core SCP.
+
+      memory-region:
+        maxItems: 1
+
+      cros-ec-rpmsg:
+        $ref: /schemas/mfd/google,cros-ec.yaml
+        description:
+          This subnode represents the rpmsg device. The properties
+          of this node are defined by the individual bindings for
+          the rpmsg devices.
+
+        required:
+          - mediatek,rpmsg-name
+
+        unevaluatedProperties: false
+
+    required:
+      - compatible
+      - reg
+      - reg-names
+
+    additionalProperties: false
+
 required:
   - compatible
   - reg
@@ -87,23 +172,39 @@ allOf:
         reg:
           maxItems: 2
         reg-names:
+          items:
+            - const: sram
+            - const: cfg
+  - if:
+      properties:
+        compatible:
+          enum:
+            - mediatek,mt8192-scp
+            - mediatek,mt8195-scp
+    then:
+      properties:
+        reg:
+          maxItems: 3
+        reg-names:
+          items:
+            - const: sram
+            - const: cfg
+            - const: l1tcm
+  - if:
+      properties:
+        compatible:
+          enum:
+            - mediatek,mt8195-scp-dual
+    then:
+      properties:
+        reg:
           maxItems: 2
+        reg-names:
+          items:
+            - const: cfg
+            - const: l1tcm
 
-additionalProperties:
-  type: object
-  description:
-    Subnodes of the SCP represent rpmsg devices. The names of the devices
-    are not important. The properties of these nodes are defined by the
-    individual bindings for the rpmsg devices.
-  properties:
-    mediatek,rpmsg-name:
-      $ref: /schemas/types.yaml#/definitions/string-array
-      description:
-        Contains the name for the rpmsg device. Used to match
-        the subnode to rpmsg device announced by SCP.
-
-  required:
-    - mediatek,rpmsg-name
+additionalProperties: false
 
 examples:
   - |
@@ -118,7 +219,42 @@ examples:
         clocks = <&infracfg CLK_INFRA_SCPSYS>;
         clock-names = "main";
 
-        cros_ec {
+        cros-ec-rpmsg {
+            compatible = "google,cros-ec-rpmsg";
             mediatek,rpmsg-name = "cros-ec-rpmsg";
         };
     };
+
+  - |
+    scp@10500000 {
+        compatible = "mediatek,mt8195-scp-dual";
+        reg = <0x10720000 0xe0000>,
+              <0x10700000 0x8000>;
+        reg-names = "cfg", "l1tcm";
+
+        #address-cells = <1>;
+        #size-cells = <1>;
+        ranges = <0 0x10500000 0x100000>;
+
+        scp@0 {
+            compatible = "mediatek,scp-core";
+            reg = <0x0 0xa0000>;
+            reg-names = "sram";
+
+            cros-ec-rpmsg {
+                compatible = "google,cros-ec-rpmsg";
+                mediatek,rpmsg-name = "cros-ec-rpmsg";
+            };
+        };
+
+        scp@a0000 {
+            compatible = "mediatek,scp-core";
+            reg = <0xa0000 0x20000>;
+            reg-names = "sram";
+
+            cros-ec-rpmsg {
+                compatible = "google,cros-ec-rpmsg";
+                mediatek,rpmsg-name = "cros-ec-rpmsg";
+            };
+        };
+    };
index a2b0079de0390605b1892c9ab8e9d12ef80543cb..661c2b425da35c3756965fd1c47a485fe15a3f83 100644 (file)
@@ -66,7 +66,9 @@ allOf:
               - qcom,msm8953-adsp-pil
               - qcom,msm8974-adsp-pil
               - qcom,msm8996-adsp-pil
+              - qcom,msm8996-slpi-pil
               - qcom,msm8998-adsp-pas
+              - qcom,msm8998-slpi-pas
               - qcom,sdm845-adsp-pas
               - qcom,sdm845-cdsp-pas
               - qcom,sdm845-slpi-pas
@@ -79,24 +81,6 @@ allOf:
           items:
             - const: xo
 
-  - if:
-      properties:
-        compatible:
-          contains:
-            enum:
-              - qcom,msm8996-slpi-pil
-              - qcom,msm8998-slpi-pas
-    then:
-      properties:
-        clocks:
-          items:
-            - description: XO clock
-            - description: AGGRE2 clock
-        clock-names:
-          items:
-            - const: xo
-            - const: aggre2
-
   - if:
       properties:
         compatible:
index 0643faae2c394f436a44087e0efe5764c20f25b2..971734085d512ec8bc7171b401e96198bee107d5 100644 (file)
@@ -220,7 +220,6 @@ allOf:
             - description: GCC MSS GPLL0 clock
             - description: GCC MSS SNOC_AXI clock
             - description: GCC MSS MNOC_AXI clock
-            - description: RPM PNOC clock
             - description: RPM QDSS clock
         clock-names:
           items:
@@ -231,7 +230,6 @@ allOf:
             - const: gpll0_mss
             - const: snoc_axi
             - const: mnoc_axi
-            - const: pnoc
             - const: qdss
         glink-edge: false
       required:
index 689d5d535331894a5aeadfa26bffc350258bb097..f10f329677d84d06baa5d7fc6f0f68b559b7bcf0 100644 (file)
@@ -16,6 +16,7 @@ description:
 properties:
   compatible:
     enum:
+      - qcom,sc7180-adsp-pas
       - qcom,sc7180-mpss-pas
       - qcom,sc7280-mpss-pas
 
@@ -30,26 +31,6 @@ properties:
     items:
       - const: xo
 
-  interrupts:
-    minItems: 6
-
-  interrupt-names:
-    minItems: 6
-
-  power-domains:
-    minItems: 2
-    items:
-      - description: CX power domain
-      - description: MX power domain
-      - description: MSS power domain
-
-  power-domain-names:
-    minItems: 2
-    items:
-      - const: cx
-      - const: mx
-      - const: mss
-
   memory-region:
     maxItems: 1
     description: Reference to the reserved-memory for the Hexagon core
@@ -71,6 +52,40 @@ required:
 
 allOf:
   - $ref: /schemas/remoteproc/qcom,pas-common.yaml#
+  - if:
+      properties:
+        compatible:
+          enum:
+            - qcom,sc7180-adsp-pas
+    then:
+      properties:
+        interrupts:
+          maxItems: 5
+        interrupt-names:
+          maxItems: 5
+    else:
+      properties:
+        interrupts:
+          minItems: 6
+        interrupt-names:
+          minItems: 6
+
+  - if:
+      properties:
+        compatible:
+          enum:
+            - qcom,sc7180-adsp-pas
+    then:
+      properties:
+        power-domains:
+          items:
+            - description: LCX power domain
+            - description: LMX power domain
+        power-domain-names:
+          items:
+            - const: lcx
+            - const: lmx
+
   - if:
       properties:
         compatible:
@@ -79,15 +94,31 @@ allOf:
     then:
       properties:
         power-domains:
-          minItems: 3
+          items:
+            - description: CX power domain
+            - description: MX power domain
+            - description: MSS power domain
         power-domain-names:
-          minItems: 3
-    else:
+          items:
+            - const: cx
+            - const: mx
+            - const: mss
+
+  - if:
+      properties:
+        compatible:
+          enum:
+            - qcom,sc7280-mpss-pas
+    then:
       properties:
         power-domains:
-          maxItems: 2
+          items:
+            - description: CX power domain
+            - description: MX power domain
         power-domain-names:
-          maxItems: 2
+          items:
+            - const: cx
+            - const: mx
 
 unevaluatedProperties: false
 
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,sm6375-pas.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,sm6375-pas.yaml
new file mode 100644 (file)
index 0000000..3e4a03e
--- /dev/null
@@ -0,0 +1,145 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/remoteproc/qcom,sm6375-pas.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm SM6375 Peripheral Authentication Service
+
+maintainers:
+  - Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+
+description:
+  Qualcomm SM6375 SoC Peripheral Authentication Service loads and boots
+  firmware on the Qualcomm DSP Hexagon cores.
+
+properties:
+  compatible:
+    enum:
+      - qcom,sm6375-adsp-pas
+      - qcom,sm6375-cdsp-pas
+      - qcom,sm6375-mpss-pas
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    items:
+      - description: XO clock
+
+  clock-names:
+    items:
+      - const: xo
+
+  memory-region:
+    maxItems: 1
+    description: Reference to the reserved-memory for the Hexagon core
+
+  firmware-name:
+    $ref: /schemas/types.yaml#/definitions/string
+    description: Firmware name for the Hexagon core
+
+  smd-edge: false
+
+required:
+  - compatible
+  - reg
+
+allOf:
+  - $ref: /schemas/remoteproc/qcom,pas-common.yaml#
+  - if:
+      properties:
+        compatible:
+          enum:
+            - qcom,sm6375-adsp-pas
+            - qcom,sm6375-cdsp-pas
+    then:
+      properties:
+        interrupts:
+          maxItems: 5
+        interrupt-names:
+          maxItems: 5
+    else:
+      properties:
+        interrupts:
+          minItems: 6
+        interrupt-names:
+          minItems: 6
+
+  - if:
+      properties:
+        compatible:
+          enum:
+            - qcom,sm6375-adsp-pas
+    then:
+      properties:
+        power-domains:
+          items:
+            - description: LCX power domain
+            - description: LMX power domain
+        power-domain-names:
+          items:
+            - const: lcx
+            - const: lmx
+
+  - if:
+      properties:
+        compatible:
+          enum:
+            - qcom,sm6375-cdsp-pas
+            - qcom,sm6375-mpss-pas
+    then:
+      properties:
+        power-domains:
+          items:
+            - description: CX power domain
+        power-domain-names:
+          items:
+            - const: cx
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/clock/qcom,rpmcc.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/mailbox/qcom-ipcc.h>
+    #include <dt-bindings/power/qcom-rpmpd.h>
+
+    remoteproc_adsp: remoteproc@a400000 {
+        compatible = "qcom,sm6375-adsp-pas";
+        reg = <0x0a400000 0x100>;
+
+        interrupts-extended = <&intc GIC_SPI 282 IRQ_TYPE_LEVEL_HIGH>,
+                              <&smp2p_adsp_in 0 IRQ_TYPE_EDGE_RISING>,
+                              <&smp2p_adsp_in 1 IRQ_TYPE_EDGE_RISING>,
+                              <&smp2p_adsp_in 2 IRQ_TYPE_EDGE_RISING>,
+                              <&smp2p_adsp_in 3 IRQ_TYPE_EDGE_RISING>;
+        interrupt-names = "wdog", "fatal", "ready",
+                          "handover", "stop-ack";
+
+        clocks = <&rpmcc RPM_SMD_XO_CLK_SRC>;
+        clock-names = "xo";
+
+        power-domains = <&rpmpd SM6375_VDD_LPI_CX>,
+                        <&rpmpd SM6375_VDD_LPI_MX>;
+        power-domain-names = "lcx", "lmx";
+
+        memory-region = <&pil_adsp_mem>;
+
+        qcom,smem-states = <&smp2p_adsp_out 0>;
+        qcom,smem-state-names = "stop";
+
+        glink-edge {
+            interrupts-extended = <&ipcc IPCC_CLIENT_LPASS
+                                         IPCC_MPROC_SIGNAL_GLINK_QMP
+                                         IRQ_TYPE_EDGE_RISING>;
+            mboxes = <&ipcc IPCC_CLIENT_LPASS
+                            IPCC_MPROC_SIGNAL_GLINK_QMP>;
+
+            label = "lpass";
+            qcom,remote-pid = <2>;
+
+            /* ... */
+        };
+    };
index baccd98754a90b4bcdcaf9bd4e699fe882344a3c..faf16cf140851d801dfebfaeaa5654b044efdd9d 100644 (file)
@@ -66,6 +66,17 @@ properties:
       Should contain the name of the default firmware image
       file located on the firmware search path.
 
+  interrupts:
+    maxItems: 1
+    description:
+      Interrupt specifiers enable the virtio/rpmsg communication between MPU
+      and the PRU/RTU cores. For the values of the interrupt cells please refer
+      to interrupt-controller/ti,pruss-intc.yaml schema.
+
+  interrupt-names:
+    items:
+      - const: vring
+
 if:
   properties:
     compatible:
@@ -171,6 +182,9 @@ examples:
               <0x22400 0x100>;
         reg-names = "iram", "control", "debug";
         firmware-name = "am65x-pru0_0-fw";
+        interrupt-parent = <&icssg0_intc>;
+        interrupts = <16 2 2>;
+        interrupt-names = "vring";
       };
 
       rtu0_0: rtu@4000 {
@@ -180,6 +194,9 @@ examples:
               <0x23400 0x100>;
         reg-names = "iram", "control", "debug";
         firmware-name = "am65x-rtu0_0-fw";
+        interrupt-parent = <&icssg0_intc>;
+        interrupts = <20 4 4>;
+        interrupt-names = "vring";
       };
 
       tx_pru0_0: txpru@a000 {
@@ -198,6 +215,9 @@ examples:
               <0x24400 0x100>;
         reg-names = "iram", "control", "debug";
         firmware-name = "am65x-pru0_1-fw";
+        interrupt-parent = <&icssg0_intc>;
+        interrupts = <18 3 3>;
+        interrupt-names = "vring";
       };
 
       rtu0_1: rtu@6000 {
@@ -207,6 +227,9 @@ examples:
               <0x23c00 0x100>;
         reg-names = "iram", "control", "debug";
         firmware-name = "am65x-rtu0_1-fw";
+        interrupt-parent = <&icssg0_intc>;
+        interrupts = <22 5 5>;
+        interrupt-names = "vring";
       };
 
       tx_pru0_1: txpru@c000 {
diff --git a/Documentation/devicetree/bindings/rtc/cirrus,ep9301-rtc.yaml b/Documentation/devicetree/bindings/rtc/cirrus,ep9301-rtc.yaml
new file mode 100644 (file)
index 0000000..a95f6af
--- /dev/null
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/cirrus,ep9301-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus EP93xx Real Time Clock controller
+
+maintainers:
+  - Hartley Sweeten <hsweeten@visionengravers.com>
+  - Alexander Sverdlin <alexander.sverdlin@gmail.com>
+
+allOf:
+  - $ref: rtc.yaml#
+
+properties:
+  compatible:
+    oneOf:
+      - const: cirrus,ep9301-rtc
+      - items:
+          - enum:
+              - cirrus,ep9302-rtc
+              - cirrus,ep9307-rtc
+              - cirrus,ep9312-rtc
+              - cirrus,ep9315-rtc
+          - const: cirrus,ep9301-rtc
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    rtc@80920000 {
+        compatible = "cirrus,ep9301-rtc";
+        reg = <0x80920000 0x100>;
+    };
diff --git a/Documentation/devicetree/bindings/rtc/epson,rtc7301.txt b/Documentation/devicetree/bindings/rtc/epson,rtc7301.txt
deleted file mode 100644 (file)
index 5f9df3f..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-EPSON TOYOCOM RTC-7301SF/DG
-
-Required properties:
-
-- compatible: Should be "epson,rtc7301sf" or "epson,rtc7301dg"
-- reg: Specifies base physical address and size of the registers.
-- interrupts: A single interrupt specifier.
-
-Example:
-
-rtc: rtc@44a00000 {
-       compatible = "epson,rtc7301dg";
-       reg = <0x44a00000 0x10000>;
-       interrupt-parent = <&axi_intc_0>;
-       interrupts = <3 2>;
-};
diff --git a/Documentation/devicetree/bindings/rtc/epson,rtc7301.yaml b/Documentation/devicetree/bindings/rtc/epson,rtc7301.yaml
new file mode 100644 (file)
index 0000000..bdb5cad
--- /dev/null
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/epson,rtc7301.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Epson Toyocom RTC-7301SF/DG
+
+description:
+  The only difference between the two variants is the packaging.
+  The DG variant is a DIL package, and the SF variant is a flat
+  package.
+
+maintainers:
+  - Akinobu Mita <akinobu.mita@gmail.com>
+
+properties:
+  compatible:
+    enum:
+      - epson,rtc7301dg
+      - epson,rtc7301sf
+
+  reg:
+    maxItems: 1
+
+  reg-io-width:
+    description:
+      The size (in bytes) of the IO accesses that should be performed
+      on the device.
+    enum: [1, 4]
+    default: 4
+
+  interrupts:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    rtc: rtc@44a00000 {
+        compatible = "epson,rtc7301dg";
+        reg = <0x44a00000 0x10000>;
+        reg-io-width = <4>;
+        interrupt-parent = <&axi_intc_0>;
+        interrupts = <3 2>;
+    };
diff --git a/Documentation/devicetree/bindings/rtc/maxim,mcp795.txt b/Documentation/devicetree/bindings/rtc/maxim,mcp795.txt
deleted file mode 100644 (file)
index a59fdd8..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-* Maxim MCP795         SPI Serial Real-Time Clock
-
-Required properties:
-- compatible: Should contain "maxim,mcp795".
-- reg: SPI address for chip
-
-Example:
-       mcp795: rtc@0 {
-               compatible = "maxim,mcp795";
-               reg = <0>;
-       };
index 27a9de10f0af39c1ec1fe96fd565585eab2ae33b..7680089d1d9261499cd1341663ffdac96ac9628e 100644 (file)
@@ -38,6 +38,8 @@ properties:
       - 3000
       - 4400
 
+  wakeup-source: true
+
 required:
   - compatible
   - reg
diff --git a/Documentation/devicetree/bindings/rtc/mstar,ssd202d-rtc.yaml b/Documentation/devicetree/bindings/rtc/mstar,ssd202d-rtc.yaml
new file mode 100644 (file)
index 0000000..4c1f22e
--- /dev/null
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/mstar,ssd202d-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mstar SSD202D Real Time Clock
+
+maintainers:
+  - Daniel Palmer <daniel@0x0f.com>
+  - Romain Perier <romain.perier@gmail.com>
+
+allOf:
+  - $ref: rtc.yaml#
+
+properties:
+  compatible:
+    enum:
+      - mstar,ssd202d-rtc
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    rtc@6800 {
+        compatible = "mstar,ssd202d-rtc";
+        reg = <0x6800 0x200>;
+    };
+...
diff --git a/Documentation/devicetree/bindings/rtc/nxp,pcf2123.yaml b/Documentation/devicetree/bindings/rtc/nxp,pcf2123.yaml
new file mode 100644 (file)
index 0000000..96e377a
--- /dev/null
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/nxp,pcf2123.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP PCF2123 SPI Real Time Clock
+
+maintainers:
+  - Javier Carrasco <javier.carrasco.cruz@gmail.com>
+
+allOf:
+  - $ref: /schemas/spi/spi-peripheral-props.yaml#
+  - $ref: rtc.yaml#
+
+properties:
+  compatible:
+    enum:
+      - nxp,pcf2123
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    spi {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        rtc@3 {
+            compatible = "nxp,pcf2123";
+            reg = <3>;
+            interrupts = <GIC_SPI 130 IRQ_TYPE_LEVEL_LOW>;
+            spi-cs-high;
+        };
+    };
+...
diff --git a/Documentation/devicetree/bindings/rtc/nxp,pcf8523.txt b/Documentation/devicetree/bindings/rtc/nxp,pcf8523.txt
deleted file mode 100644 (file)
index 0b1080c..0000000
+++ /dev/null
@@ -1,18 +0,0 @@
-* NXP PCF8523 Real Time Clock
-
-Required properties:
-- compatible: Should contain "nxp,pcf8523".
-- reg: I2C address for chip.
-
-Optional property:
-- quartz-load-femtofarads: The capacitive load of the quartz(x-tal),
-  expressed in femto Farad (fF). Valid values are 7000 and 12500.
-  Default value (if no value is specified) is 12500fF.
-
-Example:
-
-pcf8523: rtc@68 {
-       compatible = "nxp,pcf8523";
-       reg = <0x68>;
-       quartz-load-femtofarads = <7000>;
-};
diff --git a/Documentation/devicetree/bindings/rtc/nxp,pcf8523.yaml b/Documentation/devicetree/bindings/rtc/nxp,pcf8523.yaml
new file mode 100644 (file)
index 0000000..d11c8bc
--- /dev/null
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/nxp,pcf8523.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP PCF8523 Real Time Clock
+
+maintainers:
+  - Sam Ravnborg <sam@ravnborg.org>
+
+allOf:
+  - $ref: rtc.yaml#
+
+properties:
+  compatible:
+    const: nxp,pcf8523
+
+  reg:
+    maxItems: 1
+
+  quartz-load-femtofarads:
+    description:
+      The capacitive load of the crystal, expressed in femto Farad (fF).
+    enum: [ 7000, 12500 ]
+    default: 12500
+
+required:
+  - compatible
+  - reg
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    i2c {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        rtc@68 {
+            compatible = "nxp,pcf8523";
+            reg = <0x68>;
+            quartz-load-femtofarads = <7000>;
+        };
+    };
diff --git a/Documentation/devicetree/bindings/rtc/nxp,rtc-2123.txt b/Documentation/devicetree/bindings/rtc/nxp,rtc-2123.txt
deleted file mode 100644 (file)
index 7371f52..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-NXP PCF2123 SPI Real Time Clock
-
-Required properties:
-- compatible: should be: "nxp,pcf2123"
-                      or "microcrystal,rv2123"
-- reg: should be the SPI slave chipselect address
-
-Optional properties:
-- spi-cs-high: PCF2123 needs chipselect high
-
-Example:
-
-pcf2123: rtc@3 {
-       compatible = "nxp,pcf2123"
-       reg = <3>
-       spi-cs-high;
-};
index 2a65f31ac5a058a53970d6f31514ca0e71e35600..c9e3c5262c21a243f51b319f1a366ce8c0627f71 100644 (file)
@@ -45,6 +45,8 @@ properties:
       - isil,isl1208
       # Intersil ISL1218 Low Power RTC with Battery Backed SRAM
       - isil,isl1218
+      # SPI-BUS INTERFACE REAL TIME CLOCK MODULE
+      - maxim,mcp795
       # Real Time Clock Module with I2C-Bus
       - microcrystal,rv3029
       # Real Time Clock
diff --git a/Documentation/devicetree/bindings/soc/nuvoton/nuvoton,gfxi.yaml b/Documentation/devicetree/bindings/soc/nuvoton/nuvoton,gfxi.yaml
new file mode 100644 (file)
index 0000000..0222a43
--- /dev/null
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/soc/nuvoton/nuvoton,gfxi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Graphics Core Information block in Nuvoton SoCs
+
+maintainers:
+  - Joseph Liu <kwliu@nuvoton.com>
+  - Marvin Lin <kflin@nuvoton.com>
+
+description:
+  The Graphics Core Information (GFXI) are a block of registers in Nuvoton SoCs
+  that analyzes Graphics core behavior and provides information in registers.
+
+properties:
+  compatible:
+    items:
+      - enum:
+          - nuvoton,npcm750-gfxi
+          - nuvoton,npcm845-gfxi
+      - const: syscon
+
+  reg:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+
+additionalProperties: false
+
+examples:
+  - |
+    gfxi: gfxi@e000 {
+      compatible = "nuvoton,npcm750-gfxi", "syscon";
+      reg = <0xe000 0x100>;
+    };
index 64b2ef083fdf0b2053c030323377c2881199e655..c3190f2a168a22b1d8cf9c79b0b1de45d7029a09 100644 (file)
@@ -309,8 +309,6 @@ properties:
           - nuvoton,w83773g
             # OKI ML86V7667 video decoder
           - oki,ml86v7667
-            # OV5642: Color CMOS QSXGA (5-megapixel) Image Sensor with OmniBSI and Embedded TrueFocus
-          - ovti,ov5642
             # 48-Lane, 12-Port PCI Express Gen 2 (5.0 GT/s) Switch
           - plx,pex8648
             # Pulsedlight LIDAR range-finding sensor
index 443e2e7ab4676edef94c94dce01f570a6eadb7aa..69845ec32e818bbd9ba43048c7a84fcafcbd49e0 100644 (file)
@@ -15,9 +15,15 @@ allOf:
 
 properties:
   compatible:
-    enum:
-      - amlogic,meson-gxbb-wdt
-      - amlogic,t7-wdt
+    oneOf:
+      - enum:
+          - amlogic,meson-gxbb-wdt
+          - amlogic,t7-wdt
+      - items:
+          - enum:
+              - amlogic,c3-wdt
+              - amlogic,s4-wdt
+          - const: amlogic,t7-wdt
 
   reg:
     maxItems: 1
index a8197632d6d2eb82f46c4cd0a5d3376560333afe..3208adb3e52e821d05098be576aa4ac857cf1888 100644 (file)
@@ -47,7 +47,15 @@ Optional properties for AST2500-compatible watchdogs:
                           is configured as push-pull, then set the pulse
                           polarity to active-high. The default is active-low.
 
-Example:
+Optional properties for AST2500- and AST2600-compatible watchdogs:
+ - aspeed,reset-mask: A bitmask indicating which peripherals will be reset if
+                     the watchdog timer expires.  On AST2500 this should be a
+                     single word defined using the AST2500_WDT_RESET_* macros;
+                     on AST2600 this should be a two-word array with the first
+                     word defined using the AST2600_WDT_RESET1_* macros and the
+                     second word defined using the AST2600_WDT_RESET2_* macros.
+
+Examples:
 
        wdt1: watchdog@1e785000 {
                compatible = "aspeed,ast2400-wdt";
@@ -55,3 +63,11 @@ Example:
                aspeed,reset-type = "system";
                aspeed,external-signal;
        };
+
+       #include <dt-bindings/watchdog/aspeed-wdt.h>
+       wdt2: watchdog@1e785040 {
+               compatible = "aspeed,ast2600-wdt";
+               reg = <0x1e785040 0x40>;
+               aspeed,reset-mask = <AST2600_WDT_RESET1_DEFAULT
+                                    (AST2600_WDT_RESET2_DEFAULT & ~AST2600_WDT_RESET2_LPC)>;
+       };
index 4b7ed135570155790ae181243fa9972946d07b4d..9c50766bf690fd0f6e4a5517edbd319502f10fad 100644 (file)
@@ -30,6 +30,11 @@ properties:
   clocks:
     maxItems: 1
 
+  fsl,ext-reset-output:
+    description:
+      When set, wdog can generate external reset from the wdog_any pin.
+    type: boolean
+
 required:
   - compatible
   - interrupts
index 5046dfa55f135b569f4ba70b7e36e372904191b3..c12bc852aedc4ea49007344c4bdc40535b4a9302 100644 (file)
@@ -21,6 +21,8 @@ properties:
               - qcom,apss-wdt-ipq5018
               - qcom,apss-wdt-ipq5332
               - qcom,apss-wdt-ipq9574
+              - qcom,apss-wdt-msm8226
+              - qcom,apss-wdt-msm8974
               - qcom,apss-wdt-msm8994
               - qcom,apss-wdt-qcm2290
               - qcom,apss-wdt-qcs404
index 02653defa011d3173001583c6eea300dfb0a85c2..23a0b93c62b1e1765d3e71fc43280c98acf75126 100644 (file)
@@ -71,8 +71,8 @@ During DAA, each I3C device reports 3 important things:
   related capabilities
 * DCR: Device Characteristic Register. This 8-bit register describes the
   functionalities provided by the device
-* Provisional ID: A 48-bit unique identifier. On a given bus there should be no
-  Provisional ID collision, otherwise the discovery mechanism may fail.
+* Provisioned ID: A 48-bit unique identifier. On a given bus there should be no
+  Provisioned ID collision, otherwise the discovery mechanism may fail.
 
 I3C slave events
 ================
index 93f4f2536c250d543a24bf14095ba3d7c9d818c5..6456145f96ed00cce5f5d55c64644bae93ce0b39 100644 (file)
@@ -1,8 +1,14 @@
 .. SPDX-License-Identifier: GPL-2.0
 
+.. _media_writing_camera_sensor_drivers:
+
 Writing camera sensor drivers
 =============================
 
+This document covers the in-kernel APIs only. For the best practices on
+userspace API implementation in camera sensor drivers, please see
+:ref:`media_using_camera_sensor_drivers`.
+
 CSI-2 and parallel (BT.601 and BT.656) busses
 ---------------------------------------------
 
@@ -13,7 +19,7 @@ Handling clocks
 
 Camera sensors have an internal clock tree including a PLL and a number of
 divisors. The clock tree is generally configured by the driver based on a few
-input parameters that are specific to the hardware:: the external clock frequency
+input parameters that are specific to the hardware: the external clock frequency
 and the link frequency. The two parameters generally are obtained from system
 firmware. **No other frequencies should be used in any circumstances.**
 
@@ -32,110 +38,61 @@ can rely on this frequency being used.
 Devicetree
 ~~~~~~~~~~
 
-The currently preferred way to achieve this is using ``assigned-clocks``,
-``assigned-clock-parents`` and ``assigned-clock-rates`` properties. See
-``Documentation/devicetree/bindings/clock/clock-bindings.txt`` for more
-information. The driver then gets the frequency using ``clk_get_rate()``.
+The preferred way to achieve this is using ``assigned-clocks``,
+``assigned-clock-parents`` and ``assigned-clock-rates`` properties. See the
+`clock device tree bindings
+<https://github.com/devicetree-org/dt-schema/blob/main/dtschema/schemas/clock/clock.yaml>`_
+for more information. The driver then gets the frequency using
+``clk_get_rate()``.
 
 This approach has the drawback that there's no guarantee that the frequency
 hasn't been modified directly or indirectly by another driver, or supported by
 the board's clock tree to begin with. Changes to the Common Clock Framework API
 are required to ensure reliability.
 
-Frame size
-----------
-
-There are two distinct ways to configure the frame size produced by camera
-sensors.
-
-Freely configurable camera sensor drivers
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Freely configurable camera sensor drivers expose the device's internal
-processing pipeline as one or more sub-devices with different cropping and
-scaling configurations. The output size of the device is the result of a series
-of cropping and scaling operations from the device's pixel array's size.
-
-An example of such a driver is the CCS driver (see ``drivers/media/i2c/ccs``).
-
-Register list based drivers
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Register list based drivers generally, instead of able to configure the device
-they control based on user requests, are limited to a number of preset
-configurations that combine a number of different parameters that on hardware
-level are independent. How a driver picks such configuration is based on the
-format set on a source pad at the end of the device's internal pipeline.
-
-Most sensor drivers are implemented this way, see e.g.
-``drivers/media/i2c/imx319.c`` for an example.
-
-Frame interval configuration
-----------------------------
-
-There are two different methods for obtaining possibilities for different frame
-intervals as well as configuring the frame interval. Which one to implement
-depends on the type of the device.
-
-Raw camera sensors
-~~~~~~~~~~~~~~~~~~
-
-Instead of a high level parameter such as frame interval, the frame interval is
-a result of the configuration of a number of camera sensor implementation
-specific parameters. Luckily, these parameters tend to be the same for more or
-less all modern raw camera sensors.
-
-The frame interval is calculated using the following equation::
-
-       frame interval = (analogue crop width + horizontal blanking) *
-                        (analogue crop height + vertical blanking) / pixel rate
-
-The formula is bus independent and is applicable for raw timing parameters on
-large variety of devices beyond camera sensors. Devices that have no analogue
-crop, use the full source image size, i.e. pixel array size.
-
-Horizontal and vertical blanking are specified by ``V4L2_CID_HBLANK`` and
-``V4L2_CID_VBLANK``, respectively. The unit of the ``V4L2_CID_HBLANK`` control
-is pixels and the unit of the ``V4L2_CID_VBLANK`` is lines. The pixel rate in
-the sensor's **pixel array** is specified by ``V4L2_CID_PIXEL_RATE`` in the same
-sub-device. The unit of that control is pixels per second.
-
-Register list based drivers need to implement read-only sub-device nodes for the
-purpose. Devices that are not register list based need these to configure the
-device's internal processing pipeline.
-
-The first entity in the linear pipeline is the pixel array. The pixel array may
-be followed by other entities that are there to allow configuring binning,
-skipping, scaling or digital crop :ref:`v4l2-subdev-selections`.
-
-USB cameras etc. devices
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-USB video class hardware, as well as many cameras offering a similar higher
-level interface natively, generally use the concept of frame interval (or frame
-rate) on device level in firmware or hardware. This means lower level controls
-implemented by raw cameras may not be used on uAPI (or even kAPI) to control the
-frame interval on these devices.
-
 Power management
 ----------------
 
-Always use runtime PM to manage the power states of your device. Camera sensor
-drivers are in no way special in this respect: they are responsible for
-controlling the power state of the device they otherwise control as well. In
-general, the device must be powered on at least when its registers are being
-accessed and when it is streaming.
-
-Existing camera sensor drivers may rely on the old
-struct v4l2_subdev_core_ops->s_power() callback for bridge or ISP drivers to
-manage their power state. This is however **deprecated**. If you feel you need
-to begin calling an s_power from an ISP or a bridge driver, instead please add
-runtime PM support to the sensor driver you are using. Likewise, new drivers
-should not use s_power.
-
-Please see examples in e.g. ``drivers/media/i2c/ov8856.c`` and
-``drivers/media/i2c/ccs/ccs-core.c``. The two drivers work in both ACPI
-and DT based systems.
+Camera sensors are used in conjunction with other devices to form a camera
+pipeline. They must obey the rules listed herein to ensure coherent power
+management over the pipeline.
+
+Camera sensor drivers are responsible for controlling the power state of the
+device they otherwise control as well. They shall use runtime PM to manage
+power states. Runtime PM shall be enabled at probe time and disabled at remove
+time. Drivers should enable runtime PM autosuspend.
+
+The runtime PM handlers shall handle clocks, regulators, GPIOs, and other
+system resources required to power the sensor up and down. For drivers that
+don't use any of those resources (such as drivers that support ACPI systems
+only), the runtime PM handlers may be left unimplemented.
+
+In general, the device shall be powered on at least when its registers are
+being accessed and when it is streaming. Drivers should use
+``pm_runtime_resume_and_get()`` when starting streaming and
+``pm_runtime_put()`` or ``pm_runtime_put_autosuspend()`` when stopping
+streaming. They may power the device up at probe time (for example to read
+identification registers), but should not keep it powered unconditionally after
+probe.
+
+At system suspend time, the whole camera pipeline must stop streaming, and
+restart when the system is resumed. This requires coordination between the
+camera sensor and the rest of the camera pipeline. Bridge drivers are
+responsible for this coordination, and instruct camera sensors to stop and
+restart streaming by calling the appropriate subdev operations
+(``.s_stream()``, ``.enable_streams()`` or ``.disable_streams()``). Camera
+sensor drivers shall therefore **not** keep track of the streaming state to
+stop streaming in the PM suspend handler and restart it in the resume handler.
+Drivers should in general not implement the system PM handlers.
+
+Camera sensor drivers shall **not** implement the subdev ``.s_power()``
+operation, as it is deprecated. While this operation is implemented in some
+existing drivers as they predate the deprecation, new drivers shall use runtime
+PM instead. If you feel you need to begin calling ``.s_power()`` from an ISP or
+a bridge driver, instead add runtime PM support to the sensor driver you are
+using and drop its ``.s_power()`` handler.
+
+Please also see :ref:`examples <media-camera-sensor-examples>`.
 
 Control framework
 ~~~~~~~~~~~~~~~~~
@@ -155,21 +112,36 @@ access the device.
 Rotation, orientation and flipping
 ----------------------------------
 
-Some systems have the camera sensor mounted upside down compared to its natural
-mounting rotation. In such cases, drivers shall expose the information to
-userspace with the :ref:`V4L2_CID_CAMERA_SENSOR_ROTATION
-<v4l2-camera-sensor-rotation>` control.
-
-Sensor drivers shall also report the sensor's mounting orientation with the
-:ref:`V4L2_CID_CAMERA_SENSOR_ORIENTATION <v4l2-camera-sensor-orientation>`.
-
 Use ``v4l2_fwnode_device_parse()`` to obtain rotation and orientation
 information from system firmware and ``v4l2_ctrl_new_fwnode_properties()`` to
 register the appropriate controls.
 
-Sensor drivers that have any vertical or horizontal flips embedded in the
-register programming sequences shall initialize the V4L2_CID_HFLIP and
-V4L2_CID_VFLIP controls with the values programmed by the register sequences.
-The default values of these controls shall be 0 (disabled). Especially these
-controls shall not be inverted, independently of the sensor's mounting
-rotation.
+.. _media-camera-sensor-examples:
+
+Example drivers
+---------------
+
+Features implemented by sensor drivers vary, and depending on the set of
+supported features and other qualities, particular sensor drivers better serve
+the purpose of an example. The following drivers are known to be good examples:
+
+.. flat-table:: Example sensor drivers
+    :header-rows: 0
+    :widths:      1 1 1 2
+
+    * - Driver name
+      - File(s)
+      - Driver type
+      - Example topic
+    * - CCS
+      - ``drivers/media/i2c/ccs/``
+      - Freely configurable
+      - Power management (ACPI and DT), UAPI
+    * - imx219
+      - ``drivers/media/i2c/imx219.c``
+      - Register list based
+      - Power management (DT), UAPI, mode selection
+    * - imx319
+      - ``drivers/media/i2c/imx319.c``
+      - Register list based
+      - Power management (ACPI and DT)
index 7389204afcb8235abf733026f09ea447891f9069..776eec72bc80a1176663daf2b76502163e230604 100644 (file)
@@ -30,7 +30,7 @@ that purpose, selection target ``V4L2_SEL_TGT_COMPOSE`` is supported on the
 sink pad (0).
 
 Additionally, if a device has no scaler or digital crop functionality, the
-source pad (1) expses another digital crop selection rectangle that can only
+source pad (1) exposes another digital crop selection rectangle that can only
 crop at the end of the lines and frames.
 
 Scaler
@@ -78,6 +78,14 @@ For SMIA (non-++) compliant devices the static data file name is
 vvvv or vv denotes MIPI and SMIA manufacturer IDs respectively, mmmm model ID
 and rrrr or rr revision number.
 
+CCS tools
+~~~~~~~~~
+
+`CCS tools <https://github.com/MIPI-Alliance/ccs-tools/>`_ is a set of
+tools for working with CCS static data files. CCS tools includes a
+definition of the human-readable CCS static data YAML format and includes a
+program to convert it to a binary.
+
 Register definition generator
 -----------------------------
 
index 239045ecc8f420d8256de7016357536a13d9b0eb..58cba831ade5a03614fb55d9cd26a0103e6ec377 100644 (file)
@@ -13,7 +13,6 @@ Video4Linux devices
     v4l2-subdev
     v4l2-event
     v4l2-controls
-    v4l2-videobuf
     v4l2-videobuf2
     v4l2-dv-timings
     v4l2-flash-led-class
index 99e3b5fa7444743042b579c2811a518385cd31ee..d5cb19b21a9f79a9ef854daedc4d5a15b50d9560 100644 (file)
@@ -157,14 +157,6 @@ changing the e.g. exposure of the webcam.
 Of course, you can always do all the locking yourself by leaving both lock
 pointers at ``NULL``.
 
-If you use the old :ref:`videobuf framework <vb_framework>` then you must
-pass the :c:type:`video_device`->lock to the videobuf queue initialize
-function: if videobuf has to wait for a frame to arrive, then it will
-temporarily unlock the lock and relock it afterwards. If your driver also
-waits in the code, then you should do the same to allow other
-processes to access the device node while the first process is waiting for
-something.
-
 In the case of :ref:`videobuf2 <vb2_framework>` you will need to implement the
 ``wait_prepare()`` and ``wait_finish()`` callbacks to unlock/lock if applicable.
 If you use the ``queue->lock`` pointer, then you can use the helper functions
diff --git a/Documentation/driver-api/media/v4l2-videobuf.rst b/Documentation/driver-api/media/v4l2-videobuf.rst
deleted file mode 100644 (file)
index 4b1d84e..0000000
+++ /dev/null
@@ -1,403 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-.. _vb_framework:
-
-Videobuf Framework
-==================
-
-Author: Jonathan Corbet <corbet@lwn.net>
-
-Current as of 2.6.33
-
-.. note::
-
-   The videobuf framework was deprecated in favor of videobuf2. Shouldn't
-   be used on new drivers.
-
-Introduction
-------------
-
-The videobuf layer functions as a sort of glue layer between a V4L2 driver
-and user space.  It handles the allocation and management of buffers for
-the storage of video frames.  There is a set of functions which can be used
-to implement many of the standard POSIX I/O system calls, including read(),
-poll(), and, happily, mmap().  Another set of functions can be used to
-implement the bulk of the V4L2 ioctl() calls related to streaming I/O,
-including buffer allocation, queueing and dequeueing, and streaming
-control.  Using videobuf imposes a few design decisions on the driver
-author, but the payback comes in the form of reduced code in the driver and
-a consistent implementation of the V4L2 user-space API.
-
-Buffer types
-------------
-
-Not all video devices use the same kind of buffers.  In fact, there are (at
-least) three common variations:
-
- - Buffers which are scattered in both the physical and (kernel) virtual
-   address spaces.  (Almost) all user-space buffers are like this, but it
-   makes great sense to allocate kernel-space buffers this way as well when
-   it is possible.  Unfortunately, it is not always possible; working with
-   this kind of buffer normally requires hardware which can do
-   scatter/gather DMA operations.
-
- - Buffers which are physically scattered, but which are virtually
-   contiguous; buffers allocated with vmalloc(), in other words.  These
-   buffers are just as hard to use for DMA operations, but they can be
-   useful in situations where DMA is not available but virtually-contiguous
-   buffers are convenient.
-
- - Buffers which are physically contiguous.  Allocation of this kind of
-   buffer can be unreliable on fragmented systems, but simpler DMA
-   controllers cannot deal with anything else.
-
-Videobuf can work with all three types of buffers, but the driver author
-must pick one at the outset and design the driver around that decision.
-
-[It's worth noting that there's a fourth kind of buffer: "overlay" buffers
-which are located within the system's video memory.  The overlay
-functionality is considered to be deprecated for most use, but it still
-shows up occasionally in system-on-chip drivers where the performance
-benefits merit the use of this technique.  Overlay buffers can be handled
-as a form of scattered buffer, but there are very few implementations in
-the kernel and a description of this technique is currently beyond the
-scope of this document.]
-
-Data structures, callbacks, and initialization
-----------------------------------------------
-
-Depending on which type of buffers are being used, the driver should
-include one of the following files:
-
-.. code-block:: none
-
-    <media/videobuf-dma-sg.h>          /* Physically scattered */
-    <media/videobuf-vmalloc.h>         /* vmalloc() buffers    */
-    <media/videobuf-dma-contig.h>      /* Physically contiguous */
-
-The driver's data structure describing a V4L2 device should include a
-struct videobuf_queue instance for the management of the buffer queue,
-along with a list_head for the queue of available buffers.  There will also
-need to be an interrupt-safe spinlock which is used to protect (at least)
-the queue.
-
-The next step is to write four simple callbacks to help videobuf deal with
-the management of buffers:
-
-.. code-block:: none
-
-    struct videobuf_queue_ops {
-       int (*buf_setup)(struct videobuf_queue *q,
-                        unsigned int *count, unsigned int *size);
-       int (*buf_prepare)(struct videobuf_queue *q,
-                          struct videobuf_buffer *vb,
-                          enum v4l2_field field);
-       void (*buf_queue)(struct videobuf_queue *q,
-                         struct videobuf_buffer *vb);
-       void (*buf_release)(struct videobuf_queue *q,
-                           struct videobuf_buffer *vb);
-    };
-
-buf_setup() is called early in the I/O process, when streaming is being
-initiated; its purpose is to tell videobuf about the I/O stream.  The count
-parameter will be a suggested number of buffers to use; the driver should
-check it for rationality and adjust it if need be.  As a practical rule, a
-minimum of two buffers are needed for proper streaming, and there is
-usually a maximum (which cannot exceed 32) which makes sense for each
-device.  The size parameter should be set to the expected (maximum) size
-for each frame of data.
-
-Each buffer (in the form of a struct videobuf_buffer pointer) will be
-passed to buf_prepare(), which should set the buffer's size, width, height,
-and field fields properly.  If the buffer's state field is
-VIDEOBUF_NEEDS_INIT, the driver should pass it to:
-
-.. code-block:: none
-
-    int videobuf_iolock(struct videobuf_queue* q, struct videobuf_buffer *vb,
-                       struct v4l2_framebuffer *fbuf);
-
-Among other things, this call will usually allocate memory for the buffer.
-Finally, the buf_prepare() function should set the buffer's state to
-VIDEOBUF_PREPARED.
-
-When a buffer is queued for I/O, it is passed to buf_queue(), which should
-put it onto the driver's list of available buffers and set its state to
-VIDEOBUF_QUEUED.  Note that this function is called with the queue spinlock
-held; if it tries to acquire it as well things will come to a screeching
-halt.  Yes, this is the voice of experience.  Note also that videobuf may
-wait on the first buffer in the queue; placing other buffers in front of it
-could again gum up the works.  So use list_add_tail() to enqueue buffers.
-
-Finally, buf_release() is called when a buffer is no longer intended to be
-used.  The driver should ensure that there is no I/O active on the buffer,
-then pass it to the appropriate free routine(s):
-
-.. code-block:: none
-
-    /* Scatter/gather drivers */
-    int videobuf_dma_unmap(struct videobuf_queue *q,
-                          struct videobuf_dmabuf *dma);
-    int videobuf_dma_free(struct videobuf_dmabuf *dma);
-
-    /* vmalloc drivers */
-    void videobuf_vmalloc_free (struct videobuf_buffer *buf);
-
-    /* Contiguous drivers */
-    void videobuf_dma_contig_free(struct videobuf_queue *q,
-                                 struct videobuf_buffer *buf);
-
-One way to ensure that a buffer is no longer under I/O is to pass it to:
-
-.. code-block:: none
-
-    int videobuf_waiton(struct videobuf_buffer *vb, int non_blocking, int intr);
-
-Here, vb is the buffer, non_blocking indicates whether non-blocking I/O
-should be used (it should be zero in the buf_release() case), and intr
-controls whether an interruptible wait is used.
-
-File operations
----------------
-
-At this point, much of the work is done; much of the rest is slipping
-videobuf calls into the implementation of the other driver callbacks.  The
-first step is in the open() function, which must initialize the
-videobuf queue.  The function to use depends on the type of buffer used:
-
-.. code-block:: none
-
-    void videobuf_queue_sg_init(struct videobuf_queue *q,
-                               struct videobuf_queue_ops *ops,
-                               struct device *dev,
-                               spinlock_t *irqlock,
-                               enum v4l2_buf_type type,
-                               enum v4l2_field field,
-                               unsigned int msize,
-                               void *priv);
-
-    void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
-                               struct videobuf_queue_ops *ops,
-                               struct device *dev,
-                               spinlock_t *irqlock,
-                               enum v4l2_buf_type type,
-                               enum v4l2_field field,
-                               unsigned int msize,
-                               void *priv);
-
-    void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
-                                      struct videobuf_queue_ops *ops,
-                                      struct device *dev,
-                                      spinlock_t *irqlock,
-                                      enum v4l2_buf_type type,
-                                      enum v4l2_field field,
-                                      unsigned int msize,
-                                      void *priv);
-
-In each case, the parameters are the same: q is the queue structure for the
-device, ops is the set of callbacks as described above, dev is the device
-structure for this video device, irqlock is an interrupt-safe spinlock to
-protect access to the data structures, type is the buffer type used by the
-device (cameras will use V4L2_BUF_TYPE_VIDEO_CAPTURE, for example), field
-describes which field is being captured (often V4L2_FIELD_NONE for
-progressive devices), msize is the size of any containing structure used
-around struct videobuf_buffer, and priv is a private data pointer which
-shows up in the priv_data field of struct videobuf_queue.  Note that these
-are void functions which, evidently, are immune to failure.
-
-V4L2 capture drivers can be written to support either of two APIs: the
-read() system call and the rather more complicated streaming mechanism.  As
-a general rule, it is necessary to support both to ensure that all
-applications have a chance of working with the device.  Videobuf makes it
-easy to do that with the same code.  To implement read(), the driver need
-only make a call to one of:
-
-.. code-block:: none
-
-    ssize_t videobuf_read_one(struct videobuf_queue *q,
-                             char __user *data, size_t count,
-                             loff_t *ppos, int nonblocking);
-
-    ssize_t videobuf_read_stream(struct videobuf_queue *q,
-                                char __user *data, size_t count,
-                                loff_t *ppos, int vbihack, int nonblocking);
-
-Either one of these functions will read frame data into data, returning the
-amount actually read; the difference is that videobuf_read_one() will only
-read a single frame, while videobuf_read_stream() will read multiple frames
-if they are needed to satisfy the count requested by the application.  A
-typical driver read() implementation will start the capture engine, call
-one of the above functions, then stop the engine before returning (though a
-smarter implementation might leave the engine running for a little while in
-anticipation of another read() call happening in the near future).
-
-The poll() function can usually be implemented with a direct call to:
-
-.. code-block:: none
-
-    unsigned int videobuf_poll_stream(struct file *file,
-                                     struct videobuf_queue *q,
-                                     poll_table *wait);
-
-Note that the actual wait queue eventually used will be the one associated
-with the first available buffer.
-
-When streaming I/O is done to kernel-space buffers, the driver must support
-the mmap() system call to enable user space to access the data.  In many
-V4L2 drivers, the often-complex mmap() implementation simplifies to a
-single call to:
-
-.. code-block:: none
-
-    int videobuf_mmap_mapper(struct videobuf_queue *q,
-                            struct vm_area_struct *vma);
-
-Everything else is handled by the videobuf code.
-
-The release() function requires two separate videobuf calls:
-
-.. code-block:: none
-
-    void videobuf_stop(struct videobuf_queue *q);
-    int videobuf_mmap_free(struct videobuf_queue *q);
-
-The call to videobuf_stop() terminates any I/O in progress - though it is
-still up to the driver to stop the capture engine.  The call to
-videobuf_mmap_free() will ensure that all buffers have been unmapped; if
-so, they will all be passed to the buf_release() callback.  If buffers
-remain mapped, videobuf_mmap_free() returns an error code instead.  The
-purpose is clearly to cause the closing of the file descriptor to fail if
-buffers are still mapped, but every driver in the 2.6.32 kernel cheerfully
-ignores its return value.
-
-ioctl() operations
-------------------
-
-The V4L2 API includes a very long list of driver callbacks to respond to
-the many ioctl() commands made available to user space.  A number of these
-- those associated with streaming I/O - turn almost directly into videobuf
-calls.  The relevant helper functions are:
-
-.. code-block:: none
-
-    int videobuf_reqbufs(struct videobuf_queue *q,
-                        struct v4l2_requestbuffers *req);
-    int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b);
-    int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b);
-    int videobuf_dqbuf(struct videobuf_queue *q, struct v4l2_buffer *b,
-                      int nonblocking);
-    int videobuf_streamon(struct videobuf_queue *q);
-    int videobuf_streamoff(struct videobuf_queue *q);
-
-So, for example, a VIDIOC_REQBUFS call turns into a call to the driver's
-vidioc_reqbufs() callback which, in turn, usually only needs to locate the
-proper struct videobuf_queue pointer and pass it to videobuf_reqbufs().
-These support functions can replace a great deal of buffer management
-boilerplate in a lot of V4L2 drivers.
-
-The vidioc_streamon() and vidioc_streamoff() functions will be a bit more
-complex, of course, since they will also need to deal with starting and
-stopping the capture engine.
-
-Buffer allocation
------------------
-
-Thus far, we have talked about buffers, but have not looked at how they are
-allocated.  The scatter/gather case is the most complex on this front.  For
-allocation, the driver can leave buffer allocation entirely up to the
-videobuf layer; in this case, buffers will be allocated as anonymous
-user-space pages and will be very scattered indeed.  If the application is
-using user-space buffers, no allocation is needed; the videobuf layer will
-take care of calling get_user_pages() and filling in the scatterlist array.
-
-If the driver needs to do its own memory allocation, it should be done in
-the vidioc_reqbufs() function, *after* calling videobuf_reqbufs().  The
-first step is a call to:
-
-.. code-block:: none
-
-    struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf);
-
-The returned videobuf_dmabuf structure (defined in
-<media/videobuf-dma-sg.h>) includes a couple of relevant fields:
-
-.. code-block:: none
-
-    struct scatterlist  *sglist;
-    int                 sglen;
-
-The driver must allocate an appropriately-sized scatterlist array and
-populate it with pointers to the pieces of the allocated buffer; sglen
-should be set to the length of the array.
-
-Drivers using the vmalloc() method need not (and cannot) concern themselves
-with buffer allocation at all; videobuf will handle those details.  The
-same is normally true of contiguous-DMA drivers as well; videobuf will
-allocate the buffers (with dma_alloc_coherent()) when it sees fit.  That
-means that these drivers may be trying to do high-order allocations at any
-time, an operation which is not always guaranteed to work.  Some drivers
-play tricks by allocating DMA space at system boot time; videobuf does not
-currently play well with those drivers.
-
-As of 2.6.31, contiguous-DMA drivers can work with a user-supplied buffer,
-as long as that buffer is physically contiguous.  Normal user-space
-allocations will not meet that criterion, but buffers obtained from other
-kernel drivers, or those contained within huge pages, will work with these
-drivers.
-
-Filling the buffers
--------------------
-
-The final part of a videobuf implementation has no direct callback - it's
-the portion of the code which actually puts frame data into the buffers,
-usually in response to interrupts from the device.  For all types of
-drivers, this process works approximately as follows:
-
- - Obtain the next available buffer and make sure that somebody is actually
-   waiting for it.
-
- - Get a pointer to the memory and put video data there.
-
- - Mark the buffer as done and wake up the process waiting for it.
-
-Step (1) above is done by looking at the driver-managed list_head structure
-- the one which is filled in the buf_queue() callback.  Because starting
-the engine and enqueueing buffers are done in separate steps, it's possible
-for the engine to be running without any buffers available - in the
-vmalloc() case especially.  So the driver should be prepared for the list
-to be empty.  It is equally possible that nobody is yet interested in the
-buffer; the driver should not remove it from the list or fill it until a
-process is waiting on it.  That test can be done by examining the buffer's
-done field (a wait_queue_head_t structure) with waitqueue_active().
-
-A buffer's state should be set to VIDEOBUF_ACTIVE before being mapped for
-DMA; that ensures that the videobuf layer will not try to do anything with
-it while the device is transferring data.
-
-For scatter/gather drivers, the needed memory pointers will be found in the
-scatterlist structure described above.  Drivers using the vmalloc() method
-can get a memory pointer with:
-
-.. code-block:: none
-
-    void *videobuf_to_vmalloc(struct videobuf_buffer *buf);
-
-For contiguous DMA drivers, the function to use is:
-
-.. code-block:: none
-
-    dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf);
-
-The contiguous DMA API goes out of its way to hide the kernel-space address
-of the DMA buffer from drivers.
-
-The final step is to set the size field of the relevant videobuf_buffer
-structure to the actual size of the captured image, set state to
-VIDEOBUF_DONE, then call wake_up() on the done queue.  At this point, the
-buffer is owned by the videobuf layer and the driver should not touch it
-again.
-
-Developers who are interested in more information can go into the relevant
-header files; there are a few low-level functions declared there which have
-not been talked about here.  Note also that all of these calls are exported
-GPL-only, so they will not be available to non-GPL kernel modules.
index 198d805d611c6781d97a59d85941ccd8a5fe19a2..f04ce1215a03e700c7d750a37fb5d860ea8ba7cd 100644 (file)
@@ -122,12 +122,9 @@ are exportable by setting the s_export_op field in the struct
 super_block.  This field must point to a "struct export_operations"
 struct which has the following members:
 
-  encode_fh (optional)
+  encode_fh (mandatory)
     Takes a dentry and creates a filehandle fragment which may later be used
-    to find or create a dentry for the same object.  The default
-    implementation creates a filehandle fragment that encodes a 32bit inode
-    and generation number for the inode encoded, and if necessary the
-    same information for the parent.
+    to find or create a dentry for the same object.
 
   fh_to_dentry (mandatory)
     Given a filehandle fragment, this should find the implied object and
index 5b93268e400f4c78e3adf20210574cc840aa6b96..0407f361f32a236b28dc0284d01ae2ef3f2aa1c7 100644 (file)
@@ -344,10 +344,11 @@ escaping the colons with a single backslash.  For example:
 
   mount -t overlay overlay -olowerdir=/a\:lower\:\:dir /merged
 
-Since kernel version v6.5, directory names containing colons can also
-be provided as lower layer using the fsconfig syscall from new mount api:
+Since kernel version v6.8, directory names containing colons can also
+be configured as lower layer using the "lowerdir+" mount options and the
+fsconfig syscall from new mount api.  For example:
 
-  fsconfig(fs_fd, FSCONFIG_SET_STRING, "lowerdir", "/a:lower::dir", 0);
+  fsconfig(fs_fd, FSCONFIG_SET_STRING, "lowerdir+", "/a:lower::dir", 0);
 
 In the latter case, colons in lower layer directory names will be escaped
 as an octal characters (\072) when displayed in /proc/self/mountinfo.
@@ -416,6 +417,16 @@ Only the data of the files in the "data-only" lower layers may be visible
 when a "metacopy" file in one of the lower layers above it, has a "redirect"
 to the absolute path of the "lower data" file in the "data-only" lower layer.
 
+Since kernel version v6.8, "data-only" lower layers can also be added using
+the "datadir+" mount options and the fsconfig syscall from new mount api.
+For example:
+
+  fsconfig(fs_fd, FSCONFIG_SET_STRING, "lowerdir+", "/l1", 0);
+  fsconfig(fs_fd, FSCONFIG_SET_STRING, "lowerdir+", "/l2", 0);
+  fsconfig(fs_fd, FSCONFIG_SET_STRING, "lowerdir+", "/l3", 0);
+  fsconfig(fs_fd, FSCONFIG_SET_STRING, "datadir+", "/do1", 0);
+  fsconfig(fs_fd, FSCONFIG_SET_STRING, "datadir+", "/do2", 0);
+
 
 fs-verity support
 ----------------------
@@ -504,6 +515,29 @@ directory tree on the same or different underlying filesystem, and even
 to a different machine.  With the "inodes index" feature, trying to mount
 the copied layers will fail the verification of the lower root file handle.
 
+Nesting overlayfs mounts
+------------------------
+
+It is possible to use a lower directory that is stored on an overlayfs
+mount. For regular files this does not need any special care. However, files
+that have overlayfs attributes, such as whiteouts or "overlay.*" xattrs will be
+interpreted by the underlying overlayfs mount and stripped out. In order to
+allow the second overlayfs mount to see the attributes they must be escaped.
+
+Overlayfs specific xattrs are escaped by using a special prefix of
+"overlay.overlay.". So, a file with a "trusted.overlay.overlay.metacopy" xattr
+in the lower dir will be exposed as a regular file with a
+"trusted.overlay.metacopy" xattr in the overlayfs mount. This can be nested by
+repeating the prefix multiple time, as each instance only removes one prefix.
+
+A lower dir with a regular whiteout will always be handled by the overlayfs
+mount, so to support storing an effective whiteout file in an overlayfs mount an
+alternative form of whiteout is supported. This form is a regular, zero-size
+file with the "overlay.whiteout" xattr set, inside a directory with the
+"overlay.whiteouts" xattr set. Such whiteouts are never created by overlayfs,
+but can be used by userspace tools (like containers) that generate lower layers.
+These alternative whiteouts can be escaped using the standard xattr escape
+mechanism in order to properly nest to any depth.
 
 Non-standard behavior
 ---------------------
index d69f59700a23359077b94ac126d1e714967e6982..878e72b2f8b76ab04b11ff4a041887c02b7e8893 100644 (file)
@@ -1052,3 +1052,12 @@ kill_anon_super(), or kill_block_super() helpers.
 
 Lock ordering has been changed so that s_umount ranks above open_mutex again.
 All places where s_umount was taken under open_mutex have been fixed up.
+
+---
+
+**mandatory**
+
+export_operations ->encode_fh() no longer has a default implementation to
+encode FILEID_INO32_GEN* file handles.
+Filesystems that used the default implementation may use the generic helper
+generic_encode_ino32_fh() explicitly.
index e76e68ccf7182c55b53c7a0ee8d195ede1163632..10eced6c2e4625f78e89cfc52bbb1ead7f417660 100644 (file)
@@ -47,6 +47,7 @@ Supported adapters:
   * Intel Alder Lake (PCH)
   * Intel Raptor Lake (PCH)
   * Intel Meteor Lake (SOC and PCH)
+  * Intel Birch Stream (SOC)
 
    Datasheets: Publicly available at the Intel website
 
index 80b14e718b525de73915941b0f0a622a41e9f8b3..b0864d1268bd42ce4a6c3849da58cccc0c4d6789 100644 (file)
@@ -39,6 +39,10 @@ Also, codes returned by adapter probe methods follow rules which are
 specific to their host bus (such as PCI, or the platform bus).
 
 
+EAFNOSUPPORT
+       Returned by I2C adapters not supporting 10 bit addresses when
+       they are requested to use such an address.
+
 EAGAIN
        Returned by I2C adapters when they lose arbitration in master
        transmit mode:  some other master was transmitting different
index c6ba4889575a0979ebac1c817a81d7194d907bb2..572d83a414d0d08b5b833da360a149531528f172 100644 (file)
@@ -71,6 +71,10 @@ definitions:
         name: roce-bit
       -
         name: migratable-bit
+      -
+        name: ipsec-crypto-bit
+      -
+        name: ipsec-packet-bit
   -
     type: enum
     name: sb-threshold-type
index 6d8acdbe9be1de705efef5f11504abe5ad2929d2..769149d98773a28f6634b7a8ced908fdd5e2e3c3 100644 (file)
@@ -44,18 +44,16 @@ smcr_testlink_time - INTEGER
 
 wmem - INTEGER
        Initial size of send buffer used by SMC sockets.
-       The default value inherits from net.ipv4.tcp_wmem[1].
 
        The minimum value is 16KiB and there is no hard limit for max value, but
        only allowed 512KiB for SMC-R and 1MiB for SMC-D.
 
-       Default: 16K
+       Default: 64KiB
 
 rmem - INTEGER
        Initial size of receive buffer (RMB) used by SMC sockets.
-       The default value inherits from net.ipv4.tcp_rmem[1].
 
        The minimum value is 16KiB and there is no hard limit for max value, but
        only allowed 512KiB for SMC-R and 1MiB for SMC-D.
 
-       Default: 128K
+       Default: 64KiB
index 8e9bebcf0a2e8f0d05edca303d9fbdf718873ce1..e35e6b18df40cda0a1137fae36d64457a1247c84 100644 (file)
@@ -59,8 +59,12 @@ Synopsis of fprobe-events
                   and bitfield are supported.
 
   (\*1) This is available only when BTF is enabled.
-  (\*2) only for the probe on function entry (offs == 0).
-  (\*3) only for return probe.
+  (\*2) only for the probe on function entry (offs == 0). Note, this argument access
+        is best effort, because depending on the argument type, it may be passed on
+        the stack. But this only support the arguments via registers.
+  (\*3) only for return probe. Note that this is also best effort. Depending on the
+        return value type, it might be passed via a pair of registers. But this only
+        accesses one register.
   (\*4) this is useful for fetching a field of data structures.
   (\*5) "u" means user-space dereference.
 
index 8a2dfee3814544a76b67c18dd4f033786f4c8132..bf9cecb69fc9e331c3d9f2fa798b6931e009eb38 100644 (file)
@@ -61,8 +61,12 @@ Synopsis of kprobe_events
                  (x8/x16/x32/x64), "char", "string", "ustring", "symbol", "symstr"
                   and bitfield are supported.
 
-  (\*1) only for the probe on function entry (offs == 0).
-  (\*2) only for return probe.
+  (\*1) only for the probe on function entry (offs == 0). Note, this argument access
+        is best effort, because depending on the argument type, it may be passed on
+        the stack. But this only support the arguments via registers.
+  (\*2) only for return probe. Note that this is also best effort. Depending on the
+        return value type, it might be passed via a pair of registers. But this only
+        accesses one register.
   (\*3) this is useful for fetching a field of data structures.
   (\*4) "u" means user-space dereference. See :ref:`user_mem_access`.
 
index a88fcbc11eca6ec1d93d774e92bf139ae6cf2411..9cc97ec75d7a4ff6a7d91a0013ac4306200c1fd8 100644 (file)
@@ -768,18 +768,6 @@ const char *video_device_node_name(struct video_device *vdev);
 此功能,而非访问 video_device::num 和 video_device::minor 域。
 
 
-视频缓冲辅助函数
----------------
-
-v4l2 核心 API 提供了一个处理视频缓冲的标准方法(称为“videobuf”)。
-这些方法使驱动可以通过统一的方式实现 read()、mmap() 和 overlay()。
-目前在设备上支持视频缓冲的方法有分散/聚集 DMA(videobuf-dma-sg)、
-线性 DMA(videobuf-dma-contig)以及大多用于 USB 设备的用 vmalloc
-分配的缓冲(videobuf-vmalloc)。
-
-请参阅 Documentation/driver-api/media/v4l2-videobuf.rst,以获得更多关于 videobuf
-层的使用信息。
-
 v4l2_fh 结构体
 -------------
 
diff --git a/Documentation/userspace-api/media/drivers/camera-sensor.rst b/Documentation/userspace-api/media/drivers/camera-sensor.rst
new file mode 100644 (file)
index 0000000..919a50e
--- /dev/null
@@ -0,0 +1,104 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. _media_using_camera_sensor_drivers:
+
+Using camera sensor drivers
+===========================
+
+This section describes common practices for how the V4L2 sub-device interface is
+used to control the camera sensor drivers.
+
+You may also find :ref:`media_writing_camera_sensor_drivers` useful.
+
+Frame size
+----------
+
+There are two distinct ways to configure the frame size produced by camera
+sensors.
+
+Freely configurable camera sensor drivers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Freely configurable camera sensor drivers expose the device's internal
+processing pipeline as one or more sub-devices with different cropping and
+scaling configurations. The output size of the device is the result of a series
+of cropping and scaling operations from the device's pixel array's size.
+
+An example of such a driver is the CCS driver.
+
+Register list based drivers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Register list based drivers generally, instead of able to configure the device
+they control based on user requests, are limited to a number of preset
+configurations that combine a number of different parameters that on hardware
+level are independent. How a driver picks such configuration is based on the
+format set on a source pad at the end of the device's internal pipeline.
+
+Most sensor drivers are implemented this way.
+
+Frame interval configuration
+----------------------------
+
+There are two different methods for obtaining possibilities for different frame
+intervals as well as configuring the frame interval. Which one to implement
+depends on the type of the device.
+
+Raw camera sensors
+~~~~~~~~~~~~~~~~~~
+
+Instead of a high level parameter such as frame interval, the frame interval is
+a result of the configuration of a number of camera sensor implementation
+specific parameters. Luckily, these parameters tend to be the same for more or
+less all modern raw camera sensors.
+
+The frame interval is calculated using the following equation::
+
+       frame interval = (analogue crop width + horizontal blanking) *
+                        (analogue crop height + vertical blanking) / pixel rate
+
+The formula is bus independent and is applicable for raw timing parameters on
+large variety of devices beyond camera sensors. Devices that have no analogue
+crop, use the full source image size, i.e. pixel array size.
+
+Horizontal and vertical blanking are specified by ``V4L2_CID_HBLANK`` and
+``V4L2_CID_VBLANK``, respectively. The unit of the ``V4L2_CID_HBLANK`` control
+is pixels and the unit of the ``V4L2_CID_VBLANK`` is lines. The pixel rate in
+the sensor's **pixel array** is specified by ``V4L2_CID_PIXEL_RATE`` in the same
+sub-device. The unit of that control is pixels per second.
+
+Register list based drivers need to implement read-only sub-device nodes for the
+purpose. Devices that are not register list based need these to configure the
+device's internal processing pipeline.
+
+The first entity in the linear pipeline is the pixel array. The pixel array may
+be followed by other entities that are there to allow configuring binning,
+skipping, scaling or digital crop, see :ref:`VIDIOC_SUBDEV_G_SELECTION
+<VIDIOC_SUBDEV_G_SELECTION>`.
+
+USB cameras etc. devices
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+USB video class hardware, as well as many cameras offering a similar higher
+level interface natively, generally use the concept of frame interval (or frame
+rate) on device level in firmware or hardware. This means lower level controls
+implemented by raw cameras may not be used on uAPI (or even kAPI) to control the
+frame interval on these devices.
+
+Rotation, orientation and flipping
+----------------------------------
+
+Some systems have the camera sensor mounted upside down compared to its natural
+mounting rotation. In such cases, drivers shall expose the information to
+userspace with the :ref:`V4L2_CID_CAMERA_SENSOR_ROTATION
+<v4l2-camera-sensor-rotation>` control.
+
+Sensor drivers shall also report the sensor's mounting orientation with the
+:ref:`V4L2_CID_CAMERA_SENSOR_ORIENTATION <v4l2-camera-sensor-orientation>`.
+
+Sensor drivers that have any vertical or horizontal flips embedded in the
+register programming sequences shall initialize the :ref:`V4L2_CID_HFLIP
+<v4l2-cid-hflip>` and :ref:`V4L2_CID_VFLIP <v4l2-cid-vflip>` controls with the
+values programmed by the register sequences. The default values of these
+controls shall be 0 (disabled). Especially these controls shall not be inverted,
+independently of the sensor's mounting rotation.
index 6708d649afd75b21386566613fa9f89891c5a40b..1726f8ec86fa48fe80bc9c93893d3cdab253fecc 100644 (file)
@@ -32,11 +32,13 @@ For more details see the file COPYING in the source distribution of Linux.
        :numbered:
 
        aspeed-video
+       camera-sensor
        ccs
        cx2341x-uapi
        dw100
        imx-uapi
        max2175
+       npcm-video
        omap3isp-uapi
        st-vgxy61
        uvcvideo
diff --git a/Documentation/userspace-api/media/drivers/npcm-video.rst b/Documentation/userspace-api/media/drivers/npcm-video.rst
new file mode 100644 (file)
index 0000000..b47771d
--- /dev/null
@@ -0,0 +1,66 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. include:: <isonum.txt>
+
+NPCM video driver
+=================
+
+This driver is used to control the Video Capture/Differentiation (VCD) engine
+and Encoding Compression Engine (ECE) present on Nuvoton NPCM SoCs. The VCD can
+capture a frame from digital video input and compare two frames in memory, and
+the ECE can compress the frame data into HEXTILE format.
+
+Driver-specific Controls
+------------------------
+
+V4L2_CID_NPCM_CAPTURE_MODE
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The VCD engine supports two modes:
+
+- COMPLETE mode:
+
+  Capture the next complete frame into memory.
+
+- DIFF mode:
+
+  Compare the incoming frame with the frame stored in memory, and updates the
+  differentiated frame in memory.
+
+Application can use ``V4L2_CID_NPCM_CAPTURE_MODE`` control to set the VCD mode
+with different control values (enum v4l2_npcm_capture_mode):
+
+- ``V4L2_NPCM_CAPTURE_MODE_COMPLETE``: will set VCD to COMPLETE mode.
+- ``V4L2_NPCM_CAPTURE_MODE_DIFF``: will set VCD to DIFF mode.
+
+V4L2_CID_NPCM_RECT_COUNT
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+If using V4L2_PIX_FMT_HEXTILE format, VCD will capture frame data and then ECE
+will compress the data into HEXTILE rectangles and store them in V4L2 video
+buffer with the layout defined in Remote Framebuffer Protocol:
+::
+
+           (RFC 6143, https://www.rfc-editor.org/rfc/rfc6143.html#section-7.6.1)
+
+           +--------------+--------------+-------------------+
+           | No. of bytes | Type [Value] | Description       |
+           +--------------+--------------+-------------------+
+           | 2            | U16          | x-position        |
+           | 2            | U16          | y-position        |
+           | 2            | U16          | width             |
+           | 2            | U16          | height            |
+           | 4            | S32          | encoding-type (5) |
+           +--------------+--------------+-------------------+
+           |             HEXTILE rectangle data              |
+           +-------------------------------------------------+
+
+Application can get the video buffer through VIDIOC_DQBUF, and followed by
+calling ``V4L2_CID_NPCM_RECT_COUNT`` control to get the number of HEXTILE
+rectangles in this buffer.
+
+References
+----------
+include/uapi/linux/npcm-video.h
+
+**Copyright** |copy| 2022 Nuvoton Technologies
index e595d0bea10951d6110ca08d574496c40837d30c..4e8defd3612b043449ae15da22fd9332ff4b5a7e 100644 (file)
@@ -59,9 +59,7 @@ Generic Error Codes
 
     -  -  ``ENOTTY``
 
-       -  The ioctl is not supported by the driver, actually meaning that
-         the required functionality is not available, or the file
-         descriptor is not for a media device.
+       -  The ioctl is not supported by the file descriptor.
 
     -  -  ``ENOSPC``
 
index 04dec3e570edebd04115bfd56373cafe89a44bce..52bbee81c0807833404ee9d9184ce62f1c2603ce 100644 (file)
@@ -549,9 +549,9 @@ Buffer Flags
       - 0x00000400
       - The buffer has been prepared for I/O and can be queued by the
        application. Drivers set or clear this flag when the
-       :ref:`VIDIOC_QUERYBUF`,
+       :ref:`VIDIOC_QUERYBUF <VIDIOC_QUERYBUF>`,
        :ref:`VIDIOC_PREPARE_BUF <VIDIOC_QBUF>`,
-       :ref:`VIDIOC_QBUF` or
+       :ref:`VIDIOC_QBUF <VIDIOC_QBUF>` or
        :ref:`VIDIOC_DQBUF <VIDIOC_QBUF>` ioctl is called.
     * .. _`V4L2-BUF-FLAG-NO-CACHE-INVALIDATE`:
 
index 4463fce694b08d2f6142b9636832ed8f0ea0f53a..57893814a1e5d9382eeff7ae203ab6159a772c15 100644 (file)
@@ -143,9 +143,13 @@ Control IDs
     recognise the difference between digital and analogue gain use
     controls ``V4L2_CID_DIGITAL_GAIN`` and ``V4L2_CID_ANALOGUE_GAIN``.
 
+.. _v4l2-cid-hflip:
+
 ``V4L2_CID_HFLIP`` ``(boolean)``
     Mirror the picture horizontally.
 
+.. _v4l2-cid-vflip:
+
 ``V4L2_CID_VFLIP`` ``(boolean)``
     Mirror the picture vertically.
 
index a4f1df7093e8e749d8d03610edc089ebab7b5eea..43988516acddfa175ca6d3444760f361b0f3be58 100644 (file)
@@ -579,20 +579,19 @@ is started.
 
 There are three steps in configuring the streams:
 
-1) Set up links. Connect the pads between sub-devices using the :ref:`Media
-Controller API <media_controller>`
+1. Set up links. Connect the pads between sub-devices using the
+   :ref:`Media Controller API <media_controller>`
 
-2) Streams. Streams are declared and their routing is configured by
-setting the routing table for the sub-device using
-:ref:`VIDIOC_SUBDEV_S_ROUTING <VIDIOC_SUBDEV_G_ROUTING>` ioctl. Note that
-setting the routing table will reset formats and selections in the
-sub-device to default values.
+2. Streams. Streams are declared and their routing is configured by setting the
+   routing table for the sub-device using :ref:`VIDIOC_SUBDEV_S_ROUTING
+   <VIDIOC_SUBDEV_G_ROUTING>` ioctl. Note that setting the routing table will
+   reset formats and selections in the sub-device to default values.
 
-3) Configure formats and selections. Formats and selections of each stream
-are configured separately as documented for plain sub-devices in
-:ref:`format-propagation`. The stream ID is set to the same stream ID
-associated with either sink or source pads of routes configured using the
-:ref:`VIDIOC_SUBDEV_S_ROUTING <VIDIOC_SUBDEV_G_ROUTING>` ioctl.
+3. Configure formats and selections. Formats and selections of each stream are
+   configured separately as documented for plain sub-devices in
+   :ref:`format-propagation`. The stream ID is set to the same stream ID
+   associated with either sink or source pads of routes configured using the
+   :ref:`VIDIOC_SUBDEV_S_ROUTING <VIDIOC_SUBDEV_G_ROUTING>` ioctl.
 
 Multiplexed streams setup example
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -618,11 +617,11 @@ modeled as V4L2 devices, exposed to userspace via /dev/videoX nodes.
 
 To configure this pipeline, the userspace must take the following steps:
 
-1) Set up media links between entities: connect the sensors to the bridge,
-bridge to the receiver, and the receiver to the DMA engines. This step does
-not differ from normal non-multiplexed media controller setup.
+1. Set up media links between entities: connect the sensors to the bridge,
+   bridge to the receiver, and the receiver to the DMA engines. This step does
+   not differ from normal non-multiplexed media controller setup.
 
-2) Configure routing
+2. Configure routing
 
 .. flat-table:: Bridge routing table
     :header-rows:  1
@@ -656,14 +655,14 @@ not differ from normal non-multiplexed media controller setup.
       - V4L2_SUBDEV_ROUTE_FL_ACTIVE
       - Pixel data stream from Sensor B
 
-3) Configure formats and selections
+3. Configure formats and selections
 
-After configuring routing, the next step is configuring the formats and
-selections for the streams. This is similar to performing this step without
-streams, with just one exception: the ``stream`` field needs to be assigned
-to the value of the stream ID.
+   After configuring routing, the next step is configuring the formats and
+   selections for the streams. This is similar to performing this step without
+   streams, with just one exception: the ``stream`` field needs to be assigned
+   to the value of the stream ID.
 
-A common way to accomplish this is to start from the sensors and propagate the
-configurations along the stream towards the receiver,
-using :ref:`VIDIOC_SUBDEV_S_FMT <VIDIOC_SUBDEV_G_FMT>` ioctls to configure each
-stream endpoint in each sub-device.
+   A common way to accomplish this is to start from the sensors and propagate
+   the configurations along the stream towards the receiver, using
+   :ref:`VIDIOC_SUBDEV_S_FMT <VIDIOC_SUBDEV_G_FMT>` ioctls to configure each
+   stream endpoint in each sub-device.
index e17f056b129f0f7f0d30567f1c6bf8b49a3fd47e..4b19bcb4bd80d0c0994233d0288ec068fc612fb8 100644 (file)
@@ -33,6 +33,27 @@ current DV timings they use the
 the DV timings as seen by the video receiver applications use the
 :ref:`VIDIOC_QUERY_DV_TIMINGS` ioctl.
 
+When the hardware detects a video source change (e.g. the video
+signal appears or disappears, or the video resolution changes), then
+it will issue a `V4L2_EVENT_SOURCE_CHANGE` event. Use the
+:ref:`ioctl VIDIOC_SUBSCRIBE_EVENT <VIDIOC_SUBSCRIBE_EVENT>` and the
+:ref:`VIDIOC_DQEVENT` to check if this event was reported.
+
+If the video signal changed, then the application has to stop
+streaming, free all buffers, and call the :ref:`VIDIOC_QUERY_DV_TIMINGS`
+to obtain the new video timings, and if they are valid, it can set
+those by calling the :ref:`ioctl VIDIOC_S_DV_TIMINGS <VIDIOC_G_DV_TIMINGS>`.
+This will also update the format, so use the :ref:`ioctl VIDIOC_G_FMT <VIDIOC_G_FMT>`
+to obtain the new format. Now the application can allocate new buffers
+and start streaming again.
+
+The :ref:`VIDIOC_QUERY_DV_TIMINGS` will just report what the
+hardware detects, it will never change the configuration. If the
+currently set timings and the actually detected timings differ, then
+typically this will mean that you will not be able to capture any
+video. The correct approach is to rely on the `V4L2_EVENT_SOURCE_CHANGE`
+event so you know when something changed.
+
 Applications can make use of the :ref:`input-capabilities` and
 :ref:`output-capabilities` flags to determine whether the digital
 video ioctls can be used with the given input or output.
index 296ad2025e8d3abfda5486ee3004f2bbb2d53a4b..886ba7b08d6bfc1c37950e26e6b94ff89b658716 100644 (file)
@@ -288,6 +288,13 @@ please make a proposal on the linux-media mailing list.
       - 'MT2110R'
       - This format is two-planar 10-Bit raster mode and having similitude with
         ``V4L2_PIX_FMT_MM21`` in term of alignment and tiling. Used for AVC.
+    * .. _V4L2-PIX-FMT-HEXTILE:
+
+      - ``V4L2_PIX_FMT_HEXTILE``
+      - 'HXTL'
+      - Compressed format used by Nuvoton NPCM video driver. This format is
+        defined in Remote Framebuffer Protocol (RFC 6143, chapter 7.7.4 Hextile
+        Encoding).
 .. raw:: latex
 
     \normalsize
index b6e79e2f8ce46aff0dabf5fe22773219b0b5b58c..7c3810ff783c3414fddbe5f8a0db72b4341464a1 100644 (file)
@@ -60,7 +60,7 @@ Each cell is one byte.
           G\ :sub:`10low`\ (bits 3--0)
        -  G\ :sub:`12high`
        -  R\ :sub:`13high`
-       -  R\ :sub:`13low`\ (bits 3--2)
+       -  R\ :sub:`13low`\ (bits 7--4)
 
           G\ :sub:`12low`\ (bits 3--0)
     -  -  start + 12:
@@ -82,6 +82,6 @@ Each cell is one byte.
           G\ :sub:`30low`\ (bits 3--0)
        -  G\ :sub:`32high`
        -  R\ :sub:`33high`
-       -  R\ :sub:`33low`\ (bits 3--2)
+       -  R\ :sub:`33low`\ (bits 7--4)
 
           G\ :sub:`32low`\ (bits 3--0)
index a3a35eeed70846bab58d1d841bdb6aa7725b4bb1..eb3cd20b0cf2e3d68fffc349c077fb2f565a45f8 100644 (file)
@@ -949,6 +949,78 @@ The following tables list existing packed RGB formats.
       - b\ :sub:`2`
       - b\ :sub:`1`
       - b\ :sub:`0`
+    * .. _MEDIA-BUS-FMT-RGB666-2X9-BE:
+
+      - MEDIA_BUS_FMT_RGB666_2X9_BE
+      - 0x1025
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - r\ :sub:`5`
+      - r\ :sub:`4`
+      - r\ :sub:`3`
+      - r\ :sub:`2`
+      - r\ :sub:`1`
+      - r\ :sub:`0`
+      - g\ :sub:`5`
+      - g\ :sub:`4`
+      - g\ :sub:`3`
+    * -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      -
+      - g\ :sub:`2`
+      - g\ :sub:`1`
+      - g\ :sub:`0`
+      - b\ :sub:`5`
+      - b\ :sub:`4`
+      - b\ :sub:`3`
+      - b\ :sub:`2`
+      - b\ :sub:`1`
+      - b\ :sub:`0`
     * .. _MEDIA-BUS-FMT-BGR666-1X18:
 
       - MEDIA_BUS_FMT_BGR666_1X18
index d42727703adeb51e552668622f8f88ef645cfcc9..97f51d5ec1cfd715487a616c78afd40324082dfc 100644 (file)
@@ -294,6 +294,8 @@ F:  drivers/pnp/pnpacpi/
 F:     include/acpi/
 F:     include/linux/acpi.h
 F:     include/linux/fwnode.h
+F:     include/linux/fw_table.h
+F:     lib/fw_table.c
 F:     tools/power/acpi/
 
 ACPI APEI
@@ -309,7 +311,7 @@ ACPI COMPONENT ARCHITECTURE (ACPICA)
 M:     Robert Moore <robert.moore@intel.com>
 M:     "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
 L:     linux-acpi@vger.kernel.org
-L:     acpica-devel@lists.linuxfoundation.org
+L:     acpica-devel@lists.linux.dev
 S:     Supported
 W:     https://acpica.org/
 W:     https://github.com/acpica/acpica/
@@ -2508,6 +2510,18 @@ F:       drivers/rtc/rtc-nct3018y.c
 F:     include/dt-bindings/clock/nuvoton,npcm7xx-clock.h
 F:     include/dt-bindings/clock/nuvoton,npcm845-clk.h
 
+ARM/NUVOTON NPCM VIDEO ENGINE DRIVER
+M:     Joseph Liu <kwliu@nuvoton.com>
+M:     Marvin Lin <kflin@nuvoton.com>
+L:     linux-media@vger.kernel.org
+L:     openbmc@lists.ozlabs.org (moderated for non-subscribers)
+S:     Maintained
+F:     Documentation/devicetree/bindings/media/nuvoton,npcm-ece.yaml
+F:     Documentation/devicetree/bindings/media/nuvoton,npcm-vcd.yaml
+F:     Documentation/userspace-api/media/drivers/npcm-video.rst
+F:     drivers/media/platform/nuvoton/
+F:     include/uapi/linux/npcm-video.h
+
 ARM/NUVOTON WPCM450 ARCHITECTURE
 M:     Jonathan Neuschäfer <j.neuschaefer@gmx.net>
 L:     openbmc@lists.ozlabs.org (moderated for non-subscribers)
@@ -5244,6 +5258,7 @@ L:        linux-cxl@vger.kernel.org
 S:     Maintained
 F:     drivers/cxl/
 F:     include/uapi/linux/cxl_mem.h
+F:     tools/testing/cxl/
 
 COMPUTE EXPRESS LINK PMU (CPMU)
 M:     Jonathan Cameron <jonathan.cameron@huawei.com>
@@ -6140,6 +6155,13 @@ L:       linux-gpio@vger.kernel.org
 S:     Maintained
 F:     drivers/gpio/gpio-gpio-mm.c
 
+DIGITEQ AUTOMOTIVE MGB4 V4L2 DRIVER
+M:     Martin Tuma <martin.tuma@digiteqautomotive.com>
+L:     linux-media@vger.kernel.org
+S:     Maintained
+F:     Documentation/admin-guide/media/mgb4.rst
+F:     drivers/media/pci/mgb4/
+
 DIOLAN U2C-12 I2C DRIVER
 M:     Guenter Roeck <linux@roeck-us.net>
 L:     linux-i2c@vger.kernel.org
@@ -6512,7 +6534,7 @@ F:        drivers/gpu/drm/ast/
 
 DRM DRIVER FOR BOCHS VIRTUAL GPU
 M:     Gerd Hoffmann <kraxel@redhat.com>
-L:     virtualization@lists.linux-foundation.org
+L:     virtualization@lists.linux.dev
 S:     Maintained
 T:     git git://anongit.freedesktop.org/drm/drm-misc
 F:     drivers/gpu/drm/tiny/bochs.c
@@ -6759,7 +6781,7 @@ F:        drivers/gpu/drm/tiny/repaper.c
 DRM DRIVER FOR QEMU'S CIRRUS DEVICE
 M:     Dave Airlie <airlied@redhat.com>
 M:     Gerd Hoffmann <kraxel@redhat.com>
-L:     virtualization@lists.linux-foundation.org
+L:     virtualization@lists.linux.dev
 S:     Obsolete
 W:     https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
 T:     git git://anongit.freedesktop.org/drm/drm-misc
@@ -6768,7 +6790,7 @@ F:        drivers/gpu/drm/tiny/cirrus.c
 DRM DRIVER FOR QXL VIRTUAL GPU
 M:     Dave Airlie <airlied@redhat.com>
 M:     Gerd Hoffmann <kraxel@redhat.com>
-L:     virtualization@lists.linux-foundation.org
+L:     virtualization@lists.linux.dev
 L:     spice-devel@lists.freedesktop.org
 S:     Maintained
 T:     git git://anongit.freedesktop.org/drm/drm-misc
@@ -7111,7 +7133,7 @@ F:        drivers/gpu/host1x/
 F:     include/linux/host1x.h
 F:     include/uapi/drm/tegra_drm.h
 
-DRM DRIVERS FOR RENESAS
+DRM DRIVERS FOR RENESAS R-CAR
 M:     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
 M:     Kieran Bingham <kieran.bingham+renesas@ideasonboard.com>
 L:     dri-devel@lists.freedesktop.org
@@ -7122,7 +7144,16 @@ F:       Documentation/devicetree/bindings/display/bridge/renesas,dsi-csi2-tx.yaml
 F:     Documentation/devicetree/bindings/display/bridge/renesas,dw-hdmi.yaml
 F:     Documentation/devicetree/bindings/display/bridge/renesas,lvds.yaml
 F:     Documentation/devicetree/bindings/display/renesas,du.yaml
-F:     drivers/gpu/drm/renesas/
+F:     drivers/gpu/drm/renesas/rcar-du/
+
+DRM DRIVERS FOR RENESAS SHMOBILE
+M:     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+M:     Geert Uytterhoeven <geert+renesas@glider.be>
+L:     dri-devel@lists.freedesktop.org
+L:     linux-renesas-soc@vger.kernel.org
+S:     Supported
+F:     Documentation/devicetree/bindings/display/renesas,shmobile-lcdc.yaml
+F:     drivers/gpu/drm/renesas/shmobile/
 F:     include/linux/platform_data/shmob_drm.h
 
 DRM DRIVERS FOR ROCKCHIP
@@ -7858,7 +7889,7 @@ F:        drivers/net/can/usb/etas_es58x/
 ETHERNET BRIDGE
 M:     Roopa Prabhu <roopa@nvidia.com>
 M:     Nikolay Aleksandrov <razor@blackwall.org>
-L:     bridge@lists.linux-foundation.org (moderated for non-subscribers)
+L:     bridge@lists.linux.dev
 L:     netdev@vger.kernel.org
 S:     Maintained
 W:     http://www.linuxfoundation.org/en/Net:Bridge
@@ -8134,6 +8165,27 @@ F:       include/linux/fs_types.h
 F:     include/uapi/linux/fs.h
 F:     include/uapi/linux/openat2.h
 
+FILESYSTEMS [EXPORTFS]
+M:     Chuck Lever <chuck.lever@oracle.com>
+M:     Jeff Layton <jlayton@kernel.org>
+R:     Amir Goldstein <amir73il@gmail.com>
+L:     linux-fsdevel@vger.kernel.org
+L:     linux-nfs@vger.kernel.org
+S:     Supported
+F:     Documentation/filesystems/nfs/exporting.rst
+F:     fs/exportfs/
+F:     fs/fhandle.c
+F:     include/linux/exportfs.h
+
+FILESYSTEMS [IOMAP]
+M:     Christian Brauner <brauner@kernel.org>
+R:     Darrick J. Wong <djwong@kernel.org>
+L:     linux-xfs@vger.kernel.org
+L:     linux-fsdevel@vger.kernel.org
+S:     Supported
+F:     fs/iomap/
+F:     include/linux/iomap.h
+
 FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER
 M:     Riku Voipio <riku.voipio@iki.fi>
 L:     linux-hwmon@vger.kernel.org
@@ -8822,6 +8874,7 @@ F:        include/linux/phy/
 GENERIC PINCTRL I2C DEMULTIPLEXER DRIVER
 M:     Wolfram Sang <wsa+renesas@sang-engineering.com>
 S:     Supported
+F:     Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.yaml
 F:     drivers/i2c/muxes/i2c-demux-pinctrl.c
 
 GENERIC PM DOMAINS
@@ -11053,15 +11106,6 @@ L:     linux-mips@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/sgi/ioc3-eth.c
 
-IOMAP FILESYSTEM LIBRARY
-M:     Darrick J. Wong <djwong@kernel.org>
-L:     linux-xfs@vger.kernel.org
-L:     linux-fsdevel@vger.kernel.org
-S:     Supported
-T:     git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
-F:     fs/iomap/
-F:     include/linux/iomap.h
-
 IOMMU DMA-API LAYER
 M:     Robin Murphy <robin.murphy@arm.com>
 L:     iommu@lists.linux.dev
@@ -11525,7 +11569,6 @@ S:      Supported
 W:     http://nfs.sourceforge.net/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux.git
 F:     Documentation/filesystems/nfs/
-F:     fs/exportfs/
 F:     fs/lockd/
 F:     fs/nfs_common/
 F:     fs/nfsd/
@@ -13786,6 +13829,12 @@ F:     drivers/infiniband/hw/mlx5/
 F:     include/linux/mlx5/
 F:     include/uapi/rdma/mlx5-abi.h
 
+MELLANOX MLX5 VDPA DRIVER
+M:     Dragos Tatulea <dtatulea@nvidia.com>
+L:     virtualization@lists.linux-foundation.org
+S:     Supported
+F:     drivers/vdpa/mlx5/
+
 MELLANOX MLXCPLD I2C AND MUX DRIVER
 M:     Vadim Pasternak <vadimp@nvidia.com>
 M:     Michael Shych <michaelsh@nvidia.com>
@@ -14674,6 +14723,14 @@ L:     linux-mtd@lists.infradead.org
 S:     Maintained
 F:     drivers/mtd/devices/docg3*
 
+MT9M114 ONSEMI SENSOR DRIVER
+M:     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+L:     linux-media@vger.kernel.org
+S:     Maintained
+T:     git git://linuxtv.org/media_tree.git
+F:     Documentation/devicetree/bindings/media/i2c/onnn,mt9m114.yaml
+F:     drivers/media/i2c/mt9m114.c
+
 MT9P031 APTINA CAMERA SENSOR
 M:     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
 L:     linux-media@vger.kernel.org
@@ -15916,7 +15973,7 @@ L:      linux-media@vger.kernel.org
 S:     Maintained
 T:     git git://linuxtv.org/media_tree.git
 F:     Documentation/devicetree/bindings/media/i2c/ovti,ov4689.yaml
-F:     drivers/media/i2c/ov5647.c
+F:     drivers/media/i2c/ov4689.c
 
 OMNIVISION OV5640 SENSOR DRIVER
 M:     Steve Longerbeam <slongerbeam@gmail.com>
@@ -16006,8 +16063,7 @@ F:      Documentation/devicetree/bindings/media/i2c/ovti,ov8858.yaml
 F:     drivers/media/i2c/ov8858.c
 
 OMNIVISION OV9282 SENSOR DRIVER
-M:     Paul J. Murphy <paul.j.murphy@intel.com>
-M:     Daniele Alessandrelli <daniele.alessandrelli@intel.com>
+M:     Dave Stevenson <dave.stevenson@raspberrypi.com>
 L:     linux-media@vger.kernel.org
 S:     Maintained
 T:     git git://linuxtv.org/media_tree.git
@@ -16338,7 +16394,7 @@ M:      Juergen Gross <jgross@suse.com>
 R:     Ajay Kaher <akaher@vmware.com>
 R:     Alexey Makhalov <amakhalov@vmware.com>
 R:     VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
-L:     virtualization@lists.linux-foundation.org
+L:     virtualization@lists.linux.dev
 L:     x86@kernel.org
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
@@ -18656,6 +18712,7 @@ F:      sound/soc/rockchip/rockchip_i2s_tdm.*
 
 ROCKCHIP ISP V1 DRIVER
 M:     Dafna Hirschfeld <dafna@fastmail.com>
+M:     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
 L:     linux-media@vger.kernel.org
 L:     linux-rockchip@lists.infradead.org
 S:     Maintained
@@ -20150,19 +20207,15 @@ T:    git git://linuxtv.org/media_tree.git
 F:     drivers/media/i2c/imx319.c
 
 SONY IMX334 SENSOR DRIVER
-M:     Paul J. Murphy <paul.j.murphy@intel.com>
-M:     Daniele Alessandrelli <daniele.alessandrelli@intel.com>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 T:     git git://linuxtv.org/media_tree.git
 F:     Documentation/devicetree/bindings/media/i2c/sony,imx334.yaml
 F:     drivers/media/i2c/imx334.c
 
 SONY IMX335 SENSOR DRIVER
-M:     Paul J. Murphy <paul.j.murphy@intel.com>
-M:     Daniele Alessandrelli <daniele.alessandrelli@intel.com>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 T:     git git://linuxtv.org/media_tree.git
 F:     Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml
 F:     drivers/media/i2c/imx335.c
@@ -20175,10 +20228,8 @@ T:     git git://linuxtv.org/media_tree.git
 F:     drivers/media/i2c/imx355.c
 
 SONY IMX412 SENSOR DRIVER
-M:     Paul J. Murphy <paul.j.murphy@intel.com>
-M:     Daniele Alessandrelli <daniele.alessandrelli@intel.com>
 L:     linux-media@vger.kernel.org
-S:     Maintained
+S:     Orphan
 T:     git git://linuxtv.org/media_tree.git
 F:     Documentation/devicetree/bindings/media/i2c/sony,imx412.yaml
 F:     drivers/media/i2c/imx412.c
@@ -21742,6 +21793,13 @@ F:     Documentation/devicetree/bindings/media/i2c/ti,ds90*
 F:     drivers/media/i2c/ds90*
 F:     include/media/i2c/ds90*
 
+TI J721E CSI2RX DRIVER
+M:     Jai Luthra <j-luthra@ti.com>
+L:     linux-media@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/media/ti,j721e-csi2rx-shim.yaml
+F:     drivers/media/platform/ti/j721e-csi2rx/
+
 TI KEYSTONE MULTICORE NAVIGATOR DRIVERS
 M:     Nishanth Menon <nm@ti.com>
 M:     Santosh Shilimkar <ssantosh@kernel.org>
@@ -22058,6 +22116,14 @@ W:     https://github.com/srcres258/linux-doc
 T:     git git://github.com/srcres258/linux-doc.git doc-zh-tw
 F:     Documentation/translations/zh_TW/
 
+TRUSTED SECURITY MODULE (TSM) ATTESTATION REPORTS
+M:     Dan Williams <dan.j.williams@intel.com>
+L:     linux-coco@lists.linux.dev
+S:     Maintained
+F:     Documentation/ABI/testing/configfs-tsm
+F:     drivers/virt/coco/tsm.c
+F:     include/linux/tsm.h
+
 TTY LAYER AND SERIAL DRIVERS
 M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 M:     Jiri Slaby <jirislaby@kernel.org>
@@ -22906,7 +22972,7 @@ VIRTIO AND VHOST VSOCK DRIVER
 M:     Stefan Hajnoczi <stefanha@redhat.com>
 M:     Stefano Garzarella <sgarzare@redhat.com>
 L:     kvm@vger.kernel.org
-L:     virtualization@lists.linux-foundation.org
+L:     virtualization@lists.linux.dev
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/vhost/vsock.c
@@ -22918,7 +22984,7 @@ F:      net/vmw_vsock/virtio_transport_common.c
 VIRTIO BALLOON
 M:     "Michael S. Tsirkin" <mst@redhat.com>
 M:     David Hildenbrand <david@redhat.com>
-L:     virtualization@lists.linux-foundation.org
+L:     virtualization@lists.linux.dev
 S:     Maintained
 F:     drivers/virtio/virtio_balloon.c
 F:     include/linux/balloon_compaction.h
@@ -22930,7 +22996,7 @@ M:      "Michael S. Tsirkin" <mst@redhat.com>
 M:     Jason Wang <jasowang@redhat.com>
 R:     Paolo Bonzini <pbonzini@redhat.com>
 R:     Stefan Hajnoczi <stefanha@redhat.com>
-L:     virtualization@lists.linux-foundation.org
+L:     virtualization@lists.linux.dev
 S:     Maintained
 F:     drivers/block/virtio_blk.c
 F:     drivers/scsi/virtio_scsi.c
@@ -22939,7 +23005,7 @@ F:      include/uapi/linux/virtio_scsi.h
 
 VIRTIO CONSOLE DRIVER
 M:     Amit Shah <amit@kernel.org>
-L:     virtualization@lists.linux-foundation.org
+L:     virtualization@lists.linux.dev
 S:     Maintained
 F:     drivers/char/virtio_console.c
 F:     include/linux/virtio_console.h
@@ -22949,7 +23015,7 @@ VIRTIO CORE AND NET DRIVERS
 M:     "Michael S. Tsirkin" <mst@redhat.com>
 M:     Jason Wang <jasowang@redhat.com>
 R:     Xuan Zhuo <xuanzhuo@linux.alibaba.com>
-L:     virtualization@lists.linux-foundation.org
+L:     virtualization@lists.linux.dev
 S:     Maintained
 F:     Documentation/ABI/testing/sysfs-bus-vdpa
 F:     Documentation/ABI/testing/sysfs-class-vduse
@@ -22968,7 +23034,7 @@ F:      tools/virtio/
 
 VIRTIO CRYPTO DRIVER
 M:     Gonglei <arei.gonglei@huawei.com>
-L:     virtualization@lists.linux-foundation.org
+L:     virtualization@lists.linux.dev
 L:     linux-crypto@vger.kernel.org
 S:     Maintained
 F:     drivers/crypto/virtio/
@@ -22979,7 +23045,7 @@ M:      Cornelia Huck <cohuck@redhat.com>
 M:     Halil Pasic <pasic@linux.ibm.com>
 M:     Eric Farman <farman@linux.ibm.com>
 L:     linux-s390@vger.kernel.org
-L:     virtualization@lists.linux-foundation.org
+L:     virtualization@lists.linux.dev
 L:     kvm@vger.kernel.org
 S:     Supported
 F:     arch/s390/include/uapi/asm/virtio-ccw.h
@@ -22989,7 +23055,7 @@ VIRTIO FILE SYSTEM
 M:     Vivek Goyal <vgoyal@redhat.com>
 M:     Stefan Hajnoczi <stefanha@redhat.com>
 M:     Miklos Szeredi <miklos@szeredi.hu>
-L:     virtualization@lists.linux-foundation.org
+L:     virtualization@lists.linux.dev
 L:     linux-fsdevel@vger.kernel.org
 S:     Supported
 W:     https://virtio-fs.gitlab.io/
@@ -23001,7 +23067,7 @@ VIRTIO GPIO DRIVER
 M:     Enrico Weigelt, metux IT consult <info@metux.net>
 M:     Viresh Kumar <vireshk@kernel.org>
 L:     linux-gpio@vger.kernel.org
-L:     virtualization@lists.linux-foundation.org
+L:     virtualization@lists.linux.dev
 S:     Maintained
 F:     drivers/gpio/gpio-virtio.c
 F:     include/uapi/linux/virtio_gpio.h
@@ -23012,7 +23078,7 @@ M:      Gerd Hoffmann <kraxel@redhat.com>
 R:     Gurchetan Singh <gurchetansingh@chromium.org>
 R:     Chia-I Wu <olvaffe@gmail.com>
 L:     dri-devel@lists.freedesktop.org
-L:     virtualization@lists.linux-foundation.org
+L:     virtualization@lists.linux.dev
 S:     Maintained
 T:     git git://anongit.freedesktop.org/drm/drm-misc
 F:     drivers/gpu/drm/ci/xfails/virtio*
@@ -23023,7 +23089,7 @@ VIRTIO HOST (VHOST)
 M:     "Michael S. Tsirkin" <mst@redhat.com>
 M:     Jason Wang <jasowang@redhat.com>
 L:     kvm@vger.kernel.org
-L:     virtualization@lists.linux-foundation.org
+L:     virtualization@lists.linux.dev
 L:     netdev@vger.kernel.org
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost.git
@@ -23039,7 +23105,7 @@ M:      Jason Wang <jasowang@redhat.com>
 M:     Mike Christie <michael.christie@oracle.com>
 R:     Paolo Bonzini <pbonzini@redhat.com>
 R:     Stefan Hajnoczi <stefanha@redhat.com>
-L:     virtualization@lists.linux-foundation.org
+L:     virtualization@lists.linux.dev
 S:     Maintained
 F:     drivers/vhost/scsi.c
 
@@ -23047,7 +23113,7 @@ VIRTIO I2C DRIVER
 M:     Conghui Chen <conghui.chen@intel.com>
 M:     Viresh Kumar <viresh.kumar@linaro.org>
 L:     linux-i2c@vger.kernel.org
-L:     virtualization@lists.linux-foundation.org
+L:     virtualization@lists.linux.dev
 S:     Maintained
 F:     drivers/i2c/busses/i2c-virtio.c
 F:     include/uapi/linux/virtio_i2c.h
@@ -23060,14 +23126,14 @@ F:    include/uapi/linux/virtio_input.h
 
 VIRTIO IOMMU DRIVER
 M:     Jean-Philippe Brucker <jean-philippe@linaro.org>
-L:     virtualization@lists.linux-foundation.org
+L:     virtualization@lists.linux.dev
 S:     Maintained
 F:     drivers/iommu/virtio-iommu.c
 F:     include/uapi/linux/virtio_iommu.h
 
 VIRTIO MEM DRIVER
 M:     David Hildenbrand <david@redhat.com>
-L:     virtualization@lists.linux-foundation.org
+L:     virtualization@lists.linux.dev
 S:     Maintained
 W:     https://virtio-mem.gitlab.io/
 F:     drivers/virtio/virtio_mem.c
@@ -23075,7 +23141,7 @@ F:      include/uapi/linux/virtio_mem.h
 
 VIRTIO PMEM DRIVER
 M:     Pankaj Gupta <pankaj.gupta.linux@gmail.com>
-L:     virtualization@lists.linux-foundation.org
+L:     virtualization@lists.linux.dev
 S:     Maintained
 F:     drivers/nvdimm/nd_virtio.c
 F:     drivers/nvdimm/virtio_pmem.c
@@ -23083,7 +23149,7 @@ F:      drivers/nvdimm/virtio_pmem.c
 VIRTIO SOUND DRIVER
 M:     Anton Yakovlev <anton.yakovlev@opensynergy.com>
 M:     "Michael S. Tsirkin" <mst@redhat.com>
-L:     virtualization@lists.linux-foundation.org
+L:     virtualization@lists.linux.dev
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Maintained
 F:     include/uapi/linux/virtio_snd.h
@@ -23132,16 +23198,9 @@ W:     https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
 F:     drivers/media/test-drivers/vivid/*
 
-VLYNQ BUS
-M:     Florian Fainelli <f.fainelli@gmail.com>
-L:     openwrt-devel@lists.openwrt.org (subscribers-only)
-S:     Maintained
-F:     drivers/vlynq/vlynq.c
-F:     include/linux/vlynq.h
-
 VM SOCKETS (AF_VSOCK)
 M:     Stefano Garzarella <sgarzare@redhat.com>
-L:     virtualization@lists.linux-foundation.org
+L:     virtualization@lists.linux.dev
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/vsockmon.c
@@ -23185,7 +23244,7 @@ VMWARE HYPERVISOR INTERFACE
 M:     Ajay Kaher <akaher@vmware.com>
 M:     Alexey Makhalov <amakhalov@vmware.com>
 R:     VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
-L:     virtualization@lists.linux-foundation.org
+L:     virtualization@lists.linux.dev
 L:     x86@kernel.org
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vmware
index de1566e32cb89d885841687dec53b0bf9dfff582..68e8301c0df2c1e44bb15034f1eaa9d896884d3e 100644 (file)
@@ -32,9 +32,6 @@ struct kprobe;
 
 void arch_remove_kprobe(struct kprobe *p);
 
-int kprobe_exceptions_notify(struct notifier_block *self,
-                            unsigned long val, void *data);
-
 struct prev_kprobe {
        struct kprobe *kp;
        unsigned long status;
index bb9f0e5b0b637c9a82cad10e5ec22b62b9ce5e83..10fd74bf85f9b45e592bbfa1248192bedbeb932c 100644 (file)
@@ -1076,7 +1076,6 @@ CONFIG_QCOM_IPCC=y
 CONFIG_OMAP_IOMMU=y
 CONFIG_OMAP_IOMMU_DEBUG=y
 CONFIG_ROCKCHIP_IOMMU=y
-CONFIG_TEGRA_IOMMU_GART=y
 CONFIG_TEGRA_IOMMU_SMMU=y
 CONFIG_EXYNOS_IOMMU=y
 CONFIG_QCOM_IOMMU=y
index b2f0862f4bd918b4f5144c8a26238517f307cca3..7b1b41b4b160934af0e2050497e33b196df87f59 100644 (file)
@@ -477,7 +477,6 @@ CONFIG_LIRC=y
 CONFIG_RC_DEVICES=y
 CONFIG_IR_GPIO_TX=m
 CONFIG_IR_PWM_TX=m
-CONFIG_IR_RX51=m
 CONFIG_IR_SPI=m
 CONFIG_MEDIA_SUPPORT=m
 CONFIG_V4L_PLATFORM_DRIVERS=y
index 23c131b0854bd3373ad788a7861f63008c776010..9e81b1849e4c3a323315e86916ffefaee787d884 100644 (file)
@@ -100,7 +100,6 @@ CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_CONNECTOR=y
 CONFIG_MTD=y
-CONFIG_MTD_AR7_PARTS=m
 CONFIG_MTD_CMDLINE_PARTS=m
 CONFIG_MTD_OF_PARTS=m
 CONFIG_MTD_AFS_PARTS=m
index 613f07b8ce150596fc24684e590bb8b48ba42c05..8635b7216bfc5abdf4eb3906e14ab096a4d70ec5 100644 (file)
@@ -292,7 +292,6 @@ CONFIG_CHROME_PLATFORMS=y
 CONFIG_CROS_EC=y
 CONFIG_CROS_EC_I2C=m
 CONFIG_CROS_EC_SPI=m
-CONFIG_TEGRA_IOMMU_GART=y
 CONFIG_TEGRA_IOMMU_SMMU=y
 CONFIG_ARCH_TEGRA_2x_SOC=y
 CONFIG_ARCH_TEGRA_3x_SOC=y
index 72529f5e2bed9598196b6bcb2d559269a2fc0809..a41b503b7dcde09cfc67bf939cd932b8b1611341 100644 (file)
@@ -23,6 +23,8 @@
 #define PMUSERENR              __ACCESS_CP15(c9,  0, c14, 0)
 #define PMINTENSET             __ACCESS_CP15(c9,  0, c14, 1)
 #define PMINTENCLR             __ACCESS_CP15(c9,  0, c14, 2)
+#define PMCEID2                        __ACCESS_CP15(c9,  0, c14, 4)
+#define PMCEID3                        __ACCESS_CP15(c9,  0, c14, 5)
 #define PMMIR                  __ACCESS_CP15(c9,  0, c14, 6)
 #define PMCCFILTR              __ACCESS_CP15(c14, 0, c15, 7)
 
@@ -150,21 +152,6 @@ static inline u64 read_pmccntr(void)
        return read_sysreg(PMCCNTR);
 }
 
-static inline void write_pmxevcntr(u32 val)
-{
-       write_sysreg(val, PMXEVCNTR);
-}
-
-static inline u32 read_pmxevcntr(void)
-{
-       return read_sysreg(PMXEVCNTR);
-}
-
-static inline void write_pmxevtyper(u32 val)
-{
-       write_sysreg(val, PMXEVTYPER);
-}
-
 static inline void write_pmcntenset(u32 val)
 {
        write_sysreg(val, PMCNTENSET);
@@ -205,16 +192,6 @@ static inline void write_pmuserenr(u32 val)
        write_sysreg(val, PMUSERENR);
 }
 
-static inline u32 read_pmceid0(void)
-{
-       return read_sysreg(PMCEID0);
-}
-
-static inline u32 read_pmceid1(void)
-{
-       return read_sysreg(PMCEID1);
-}
-
 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
 static inline void kvm_clr_pmu_events(u32 clr) {}
 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
@@ -231,6 +208,7 @@ static inline void kvm_vcpu_pmu_resync_el0(void) {}
 
 /* PMU Version in DFR Register */
 #define ARMV8_PMU_DFR_VER_NI        0
+#define ARMV8_PMU_DFR_VER_V3P1      0x4
 #define ARMV8_PMU_DFR_VER_V3P4      0x5
 #define ARMV8_PMU_DFR_VER_V3P5      0x6
 #define ARMV8_PMU_DFR_VER_IMP_DEF   0xF
@@ -251,4 +229,24 @@ static inline bool is_pmuv3p5(int pmuver)
        return pmuver >= ARMV8_PMU_DFR_VER_V3P5;
 }
 
+static inline u64 read_pmceid0(void)
+{
+       u64 val = read_sysreg(PMCEID0);
+
+       if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1)
+               val |= (u64)read_sysreg(PMCEID2) << 32;
+
+       return val;
+}
+
+static inline u64 read_pmceid1(void)
+{
+       u64 val = read_sysreg(PMCEID1);
+
+       if (read_pmuver() >= ARMV8_PMU_DFR_VER_V3P1)
+               val |= (u64)read_sysreg(PMCEID3) << 32;
+
+       return val;
+}
+
 #endif
index e26a278d301ab0e05a73158bbd8fb5263f2f84d6..5b8dbf1b0be49e483ecf8e1880c657cdcc3dc1a9 100644 (file)
@@ -40,8 +40,6 @@ struct kprobe_ctlblk {
 
 void arch_remove_kprobe(struct kprobe *);
 int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
-int kprobe_exceptions_notify(struct notifier_block *self,
-                            unsigned long val, void *data);
 
 /* optinsn template addresses */
 extern __visible kprobe_opcode_t optprobe_template_entry[];
index 4a74f3b6d775e635126b54dbb754c841c2200711..bf7de35ffcbc8ae440c34761d0a8578b9b69bc73 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&scp_pins>;
 
-       cros_ec {
+       cros-ec-rpmsg {
                compatible = "google,cros-ec-rpmsg";
                mediatek,rpmsg-name = "cros-ec-rpmsg";
        };
index 1447eed0ea3603308d4726ded46f47a5bd60071d..f2281250ac35da2514d73191cbcdb2e195afcbcb 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&scp_pins>;
 
-       cros-ec {
+       cros-ec-rpmsg {
                compatible = "google,cros-ec-rpmsg";
                mediatek,rpmsg-name = "cros-ec-rpmsg";
        };
index 18dc2fb3d7b7b2d0633bbe2f4ef18ec2e8ce4f67..c27404fa4418ad36682e33a92d073375bb8442b9 100644 (file)
@@ -46,12 +46,12 @@ static inline u32 read_pmuver(void)
                        ID_AA64DFR0_EL1_PMUVer_SHIFT);
 }
 
-static inline void write_pmcr(u32 val)
+static inline void write_pmcr(u64 val)
 {
        write_sysreg(val, pmcr_el0);
 }
 
-static inline u32 read_pmcr(void)
+static inline u64 read_pmcr(void)
 {
        return read_sysreg(pmcr_el0);
 }
@@ -71,21 +71,6 @@ static inline u64 read_pmccntr(void)
        return read_sysreg(pmccntr_el0);
 }
 
-static inline void write_pmxevcntr(u32 val)
-{
-       write_sysreg(val, pmxevcntr_el0);
-}
-
-static inline u32 read_pmxevcntr(void)
-{
-       return read_sysreg(pmxevcntr_el0);
-}
-
-static inline void write_pmxevtyper(u32 val)
-{
-       write_sysreg(val, pmxevtyper_el0);
-}
-
 static inline void write_pmcntenset(u32 val)
 {
        write_sysreg(val, pmcntenset_el0);
@@ -106,7 +91,7 @@ static inline void write_pmintenclr(u32 val)
        write_sysreg(val, pmintenclr_el1);
 }
 
-static inline void write_pmccfiltr(u32 val)
+static inline void write_pmccfiltr(u64 val)
 {
        write_sysreg(val, pmccfiltr_el0);
 }
@@ -126,12 +111,12 @@ static inline void write_pmuserenr(u32 val)
        write_sysreg(val, pmuserenr_el0);
 }
 
-static inline u32 read_pmceid0(void)
+static inline u64 read_pmceid0(void)
 {
        return read_sysreg(pmceid0_el0);
 }
 
-static inline u32 read_pmceid1(void)
+static inline u64 read_pmceid1(void)
 {
        return read_sysreg(pmceid1_el0);
 }
index 05cd82eeca1365b5f5be1f7b6ac107982a9afa14..be7a3680dadff7cc2859c3dcfdd703ce50e093e9 100644 (file)
@@ -37,8 +37,6 @@ struct kprobe_ctlblk {
 
 void arch_remove_kprobe(struct kprobe *);
 int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
-int kprobe_exceptions_notify(struct notifier_block *self,
-                            unsigned long val, void *data);
 void __kretprobe_trampoline(void);
 void __kprobes *trampoline_probe_handler(struct pt_regs *regs);
 
index 17f687510c4851ac7c0838c5cb76e5637275e341..d977713ec0ba85dd10d240e8b25906818fc3d99e 100644 (file)
@@ -54,7 +54,6 @@
        ALLOW_ERROR_INJECTION(__arm64_sys##name, ERRNO);                        \
        static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__));             \
        static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));      \
-       asmlinkage long __arm64_sys##name(const struct pt_regs *regs);          \
        asmlinkage long __arm64_sys##name(const struct pt_regs *regs)           \
        {                                                                       \
                return __se_sys##name(SC_ARM64_REGS_TO_ARGS(x,__VA_ARGS__));    \
index f6b2e2906fc9aa2dbe6b84c53e1d954e64e4bab3..646591c67e7a5c11856606d8caeffdccab541c49 100644 (file)
@@ -999,6 +999,37 @@ static void init_32bit_cpu_features(struct cpuinfo_32bit *info)
        init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
 }
 
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+static bool enable_pseudo_nmi;
+
+static int __init early_enable_pseudo_nmi(char *p)
+{
+       return kstrtobool(p, &enable_pseudo_nmi);
+}
+early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi);
+
+static __init void detect_system_supports_pseudo_nmi(void)
+{
+       struct device_node *np;
+
+       if (!enable_pseudo_nmi)
+               return;
+
+       /*
+        * Detect broken MediaTek firmware that doesn't properly save and
+        * restore GIC priorities.
+        */
+       np = of_find_compatible_node(NULL, NULL, "arm,gic-v3");
+       if (np && of_property_read_bool(np, "mediatek,broken-save-restore-fw")) {
+               pr_info("Pseudo-NMI disabled due to MediaTek Chromebook GICR save problem\n");
+               enable_pseudo_nmi = false;
+       }
+       of_node_put(np);
+}
+#else /* CONFIG_ARM64_PSEUDO_NMI */
+static inline void detect_system_supports_pseudo_nmi(void) { }
+#endif
+
 void __init init_cpu_features(struct cpuinfo_arm64 *info)
 {
        /* Before we start using the tables, make sure it is sorted */
@@ -1057,6 +1088,13 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
         */
        init_cpucap_indirect_list();
 
+       /*
+        * Detect broken pseudo-NMI. Must be called _before_ the call to
+        * setup_boot_cpu_capabilities() since it interacts with
+        * can_use_gic_priorities().
+        */
+       detect_system_supports_pseudo_nmi();
+
        /*
         * Detect and enable early CPU capabilities based on the boot CPU,
         * after we have initialised the CPU feature infrastructure.
@@ -2085,14 +2123,6 @@ static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap)
 #endif /* CONFIG_ARM64_E0PD */
 
 #ifdef CONFIG_ARM64_PSEUDO_NMI
-static bool enable_pseudo_nmi;
-
-static int __init early_enable_pseudo_nmi(char *p)
-{
-       return kstrtobool(p, &enable_pseudo_nmi);
-}
-early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi);
-
 static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
                                   int scope)
 {
index be95b523c101811569c4ef73da8f39e184ecbc7b..defbab84e9e5c7968cabe030e28ef261c9507fb7 100644 (file)
@@ -965,10 +965,7 @@ static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
 
 static bool ipi_should_be_nmi(enum ipi_msg_type ipi)
 {
-       DECLARE_STATIC_KEY_FALSE(supports_pseudo_nmis);
-
-       if (!system_uses_irq_prio_masking() ||
-           !static_branch_likely(&supports_pseudo_nmis))
+       if (!system_uses_irq_prio_masking())
                return false;
 
        switch (ipi) {
index caad195ba5c196ca8862b23af55e857f1e751e67..a2311c4bce6a6ac17044b6a1d3ea29782abf5395 100644 (file)
@@ -2,7 +2,6 @@
 # All platforms listed in alphabetic order
 
 platform-$(CONFIG_MIPS_ALCHEMY)                += alchemy/
-platform-$(CONFIG_AR7)                 += ar7/
 platform-$(CONFIG_ATH25)               += ath25/
 platform-$(CONFIG_ATH79)               += ath79/
 platform-$(CONFIG_BCM47XX)             += bcm47xx/
index bc8421859006fa4a1a57cf5c0bf8ac9ab28c8efe..76db82542519c5555f384fb2aaa5e5587a6ee820 100644 (file)
@@ -202,28 +202,6 @@ config MIPS_ALCHEMY
        select SYS_SUPPORTS_ZBOOT
        select COMMON_CLK
 
-config AR7
-       bool "Texas Instruments AR7"
-       select BOOT_ELF32
-       select COMMON_CLK
-       select DMA_NONCOHERENT
-       select CEVT_R4K
-       select CSRC_R4K
-       select IRQ_MIPS_CPU
-       select NO_EXCEPT_FILL
-       select SWAP_IO_SPACE
-       select SYS_HAS_CPU_MIPS32_R1
-       select SYS_HAS_EARLY_PRINTK
-       select SYS_SUPPORTS_32BIT_KERNEL
-       select SYS_SUPPORTS_LITTLE_ENDIAN
-       select SYS_SUPPORTS_MIPS16
-       select SYS_SUPPORTS_ZBOOT_UART16550
-       select GPIOLIB
-       select VLYNQ
-       help
-         Support for the Texas Instruments AR7 System-on-a-Chip
-         family: TNETD7100, 7200 and 7300.
-
 config ATH25
        bool "Atheros AR231x/AR531x SoC support"
        select CEVT_R4K
diff --git a/arch/mips/ar7/Makefile b/arch/mips/ar7/Makefile
deleted file mode 100644 (file)
index cd51c6c..0000000
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-
-obj-y := \
-       prom.o \
-       setup.o \
-       memory.o \
-       irq.o \
-       time.o \
-       platform.o \
-       gpio.o \
-       clock.o
diff --git a/arch/mips/ar7/Platform b/arch/mips/ar7/Platform
deleted file mode 100644 (file)
index a9257cc..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Texas Instruments AR7
-#
-cflags-$(CONFIG_AR7)           += -I$(srctree)/arch/mips/include/asm/mach-ar7
-load-$(CONFIG_AR7)             += 0xffffffff94100000
diff --git a/arch/mips/ar7/clock.c b/arch/mips/ar7/clock.c
deleted file mode 100644 (file)
index c717acb..0000000
+++ /dev/null
@@ -1,439 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2007 Eugene Konev <ejka@openwrt.org>
- * Copyright (C) 2009 Florian Fainelli <florian@openwrt.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/export.h>
-#include <linux/delay.h>
-#include <linux/gcd.h>
-#include <linux/io.h>
-#include <linux/err.h>
-#include <linux/clkdev.h>
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-
-#include <asm/addrspace.h>
-#include <asm/mach-ar7/ar7.h>
-
-#define BOOT_PLL_SOURCE_MASK   0x3
-#define CPU_PLL_SOURCE_SHIFT   16
-#define BUS_PLL_SOURCE_SHIFT   14
-#define USB_PLL_SOURCE_SHIFT   18
-#define DSP_PLL_SOURCE_SHIFT   22
-#define BOOT_PLL_SOURCE_AFE    0
-#define BOOT_PLL_SOURCE_BUS    0
-#define BOOT_PLL_SOURCE_REF    1
-#define BOOT_PLL_SOURCE_XTAL   2
-#define BOOT_PLL_SOURCE_CPU    3
-#define BOOT_PLL_BYPASS                0x00000020
-#define BOOT_PLL_ASYNC_MODE    0x02000000
-#define BOOT_PLL_2TO1_MODE     0x00008000
-
-#define TNETD7200_CLOCK_ID_CPU 0
-#define TNETD7200_CLOCK_ID_DSP 1
-#define TNETD7200_CLOCK_ID_USB 2
-
-#define TNETD7200_DEF_CPU_CLK  211000000
-#define TNETD7200_DEF_DSP_CLK  125000000
-#define TNETD7200_DEF_USB_CLK  48000000
-
-struct tnetd7300_clock {
-       u32 ctrl;
-#define PREDIV_MASK    0x001f0000
-#define PREDIV_SHIFT   16
-#define POSTDIV_MASK   0x0000001f
-       u32 unused1[3];
-       u32 pll;
-#define MUL_MASK       0x0000f000
-#define MUL_SHIFT      12
-#define PLL_MODE_MASK  0x00000001
-#define PLL_NDIV       0x00000800
-#define PLL_DIV                0x00000002
-#define PLL_STATUS     0x00000001
-       u32 unused2[3];
-};
-
-struct tnetd7300_clocks {
-       struct tnetd7300_clock bus;
-       struct tnetd7300_clock cpu;
-       struct tnetd7300_clock usb;
-       struct tnetd7300_clock dsp;
-};
-
-struct tnetd7200_clock {
-       u32 ctrl;
-       u32 unused1[3];
-#define DIVISOR_ENABLE_MASK 0x00008000
-       u32 mul;
-       u32 prediv;
-       u32 postdiv;
-       u32 postdiv2;
-       u32 unused2[6];
-       u32 cmd;
-       u32 status;
-       u32 cmden;
-       u32 padding[15];
-};
-
-struct tnetd7200_clocks {
-       struct tnetd7200_clock cpu;
-       struct tnetd7200_clock dsp;
-       struct tnetd7200_clock usb;
-};
-
-struct clk_rate {
-       u32 rate;
-};
-static struct clk_rate bus_clk = {
-       .rate   = 125000000,
-};
-
-static struct clk_rate cpu_clk = {
-       .rate   = 150000000,
-};
-
-static void approximate(int base, int target, int *prediv,
-                       int *postdiv, int *mul)
-{
-       int i, j, k, freq, res = target;
-       for (i = 1; i <= 16; i++)
-               for (j = 1; j <= 32; j++)
-                       for (k = 1; k <= 32; k++) {
-                               freq = abs(base / j * i / k - target);
-                               if (freq < res) {
-                                       res = freq;
-                                       *mul = i;
-                                       *prediv = j;
-                                       *postdiv = k;
-                               }
-                       }
-}
-
-static void calculate(int base, int target, int *prediv, int *postdiv,
-       int *mul)
-{
-       int tmp_gcd, tmp_base, tmp_freq;
-
-       for (*prediv = 1; *prediv <= 32; (*prediv)++) {
-               tmp_base = base / *prediv;
-               tmp_gcd = gcd(target, tmp_base);
-               *mul = target / tmp_gcd;
-               *postdiv = tmp_base / tmp_gcd;
-               if ((*mul < 1) || (*mul >= 16))
-                       continue;
-               if ((*postdiv > 0) & (*postdiv <= 32))
-                       break;
-       }
-
-       if (base / *prediv * *mul / *postdiv != target) {
-               approximate(base, target, prediv, postdiv, mul);
-               tmp_freq = base / *prediv * *mul / *postdiv;
-               printk(KERN_WARNING
-                      "Adjusted requested frequency %d to %d\n",
-                      target, tmp_freq);
-       }
-
-       printk(KERN_DEBUG "Clocks: prediv: %d, postdiv: %d, mul: %d\n",
-              *prediv, *postdiv, *mul);
-}
-
-static int tnetd7300_dsp_clock(void)
-{
-       u32 didr1, didr2;
-       u8 rev = ar7_chip_rev();
-       didr1 = readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x18));
-       didr2 = readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x1c));
-       if (didr2 & (1 << 23))
-               return 0;
-       if ((rev >= 0x23) && (rev != 0x57))
-               return 250000000;
-       if ((((didr2 & 0x1fff) << 10) | ((didr1 & 0xffc00000) >> 22))
-           > 4208000)
-               return 250000000;
-       return 0;
-}
-
-static int tnetd7300_get_clock(u32 shift, struct tnetd7300_clock *clock,
-       u32 *bootcr, u32 bus_clock)
-{
-       int product;
-       int base_clock = AR7_REF_CLOCK;
-       u32 ctrl = readl(&clock->ctrl);
-       u32 pll = readl(&clock->pll);
-       int prediv = ((ctrl & PREDIV_MASK) >> PREDIV_SHIFT) + 1;
-       int postdiv = (ctrl & POSTDIV_MASK) + 1;
-       int divisor = prediv * postdiv;
-       int mul = ((pll & MUL_MASK) >> MUL_SHIFT) + 1;
-
-       switch ((*bootcr & (BOOT_PLL_SOURCE_MASK << shift)) >> shift) {
-       case BOOT_PLL_SOURCE_BUS:
-               base_clock = bus_clock;
-               break;
-       case BOOT_PLL_SOURCE_REF:
-               base_clock = AR7_REF_CLOCK;
-               break;
-       case BOOT_PLL_SOURCE_XTAL:
-               base_clock = AR7_XTAL_CLOCK;
-               break;
-       case BOOT_PLL_SOURCE_CPU:
-               base_clock = cpu_clk.rate;
-               break;
-       }
-
-       if (*bootcr & BOOT_PLL_BYPASS)
-               return base_clock / divisor;
-
-       if ((pll & PLL_MODE_MASK) == 0)
-               return (base_clock >> (mul / 16 + 1)) / divisor;
-
-       if ((pll & (PLL_NDIV | PLL_DIV)) == (PLL_NDIV | PLL_DIV)) {
-               product = (mul & 1) ?
-                       (base_clock * mul) >> 1 :
-                       (base_clock * (mul - 1)) >> 2;
-               return product / divisor;
-       }
-
-       if (mul == 16)
-               return base_clock / divisor;
-
-       return base_clock * mul / divisor;
-}
-
-static void tnetd7300_set_clock(u32 shift, struct tnetd7300_clock *clock,
-       u32 *bootcr, u32 frequency)
-{
-       int prediv, postdiv, mul;
-       int base_clock = bus_clk.rate;
-
-       switch ((*bootcr & (BOOT_PLL_SOURCE_MASK << shift)) >> shift) {
-       case BOOT_PLL_SOURCE_BUS:
-               base_clock = bus_clk.rate;
-               break;
-       case BOOT_PLL_SOURCE_REF:
-               base_clock = AR7_REF_CLOCK;
-               break;
-       case BOOT_PLL_SOURCE_XTAL:
-               base_clock = AR7_XTAL_CLOCK;
-               break;
-       case BOOT_PLL_SOURCE_CPU:
-               base_clock = cpu_clk.rate;
-               break;
-       }
-
-       calculate(base_clock, frequency, &prediv, &postdiv, &mul);
-
-       writel(((prediv - 1) << PREDIV_SHIFT) | (postdiv - 1), &clock->ctrl);
-       mdelay(1);
-       writel(4, &clock->pll);
-       while (readl(&clock->pll) & PLL_STATUS)
-               ;
-       writel(((mul - 1) << MUL_SHIFT) | (0xff << 3) | 0x0e, &clock->pll);
-       mdelay(75);
-}
-
-static void __init tnetd7300_init_clocks(void)
-{
-       u32 *bootcr = (u32 *)ioremap(AR7_REGS_DCL, 4);
-       struct tnetd7300_clocks *clocks =
-                                       ioremap(UR8_REGS_CLOCKS,
-                                       sizeof(struct tnetd7300_clocks));
-       u32 dsp_clk;
-       struct clk *clk;
-
-       bus_clk.rate = tnetd7300_get_clock(BUS_PLL_SOURCE_SHIFT,
-               &clocks->bus, bootcr, AR7_AFE_CLOCK);
-
-       if (*bootcr & BOOT_PLL_ASYNC_MODE)
-               cpu_clk.rate = tnetd7300_get_clock(CPU_PLL_SOURCE_SHIFT,
-                       &clocks->cpu, bootcr, AR7_AFE_CLOCK);
-       else
-               cpu_clk.rate = bus_clk.rate;
-
-       dsp_clk = tnetd7300_dsp_clock();
-       if (dsp_clk == 250000000)
-               tnetd7300_set_clock(DSP_PLL_SOURCE_SHIFT, &clocks->dsp,
-                       bootcr, dsp_clk);
-
-       iounmap(clocks);
-       iounmap(bootcr);
-
-       clk = clk_register_fixed_rate(NULL, "cpu", NULL, 0, cpu_clk.rate);
-       clkdev_create(clk, "cpu", NULL);
-       clk = clk_register_fixed_rate(NULL, "dsp", NULL, 0, dsp_clk);
-       clkdev_create(clk, "dsp", NULL);
-}
-
-static void tnetd7200_set_clock(int base, struct tnetd7200_clock *clock,
-       int prediv, int postdiv, int postdiv2, int mul, u32 frequency)
-{
-       printk(KERN_INFO
-               "Clocks: base = %d, frequency = %u, prediv = %d, "
-               "postdiv = %d, postdiv2 = %d, mul = %d\n",
-               base, frequency, prediv, postdiv, postdiv2, mul);
-
-       writel(0, &clock->ctrl);
-       writel(DIVISOR_ENABLE_MASK | ((prediv - 1) & 0x1F), &clock->prediv);
-       writel((mul - 1) & 0xF, &clock->mul);
-
-       while (readl(&clock->status) & 0x1)
-               ; /* nop */
-
-       writel(DIVISOR_ENABLE_MASK | ((postdiv - 1) & 0x1F), &clock->postdiv);
-
-       writel(readl(&clock->cmden) | 1, &clock->cmden);
-       writel(readl(&clock->cmd) | 1, &clock->cmd);
-
-       while (readl(&clock->status) & 0x1)
-               ; /* nop */
-
-       writel(DIVISOR_ENABLE_MASK | ((postdiv2 - 1) & 0x1F), &clock->postdiv2);
-
-       writel(readl(&clock->cmden) | 1, &clock->cmden);
-       writel(readl(&clock->cmd) | 1, &clock->cmd);
-
-       while (readl(&clock->status) & 0x1)
-               ; /* nop */
-
-       writel(readl(&clock->ctrl) | 1, &clock->ctrl);
-}
-
-static int tnetd7200_get_clock_base(int clock_id, u32 *bootcr)
-{
-       if (*bootcr & BOOT_PLL_ASYNC_MODE)
-               /* Async */
-               switch (clock_id) {
-               case TNETD7200_CLOCK_ID_DSP:
-                       return AR7_REF_CLOCK;
-               default:
-                       return AR7_AFE_CLOCK;
-               }
-       else
-               /* Sync */
-               if (*bootcr & BOOT_PLL_2TO1_MODE)
-                       /* 2:1 */
-                       switch (clock_id) {
-                       case TNETD7200_CLOCK_ID_DSP:
-                               return AR7_REF_CLOCK;
-                       default:
-                               return AR7_AFE_CLOCK;
-                       }
-               else
-                       /* 1:1 */
-                       return AR7_REF_CLOCK;
-}
-
-
-static void __init tnetd7200_init_clocks(void)
-{
-       u32 *bootcr = (u32 *)ioremap(AR7_REGS_DCL, 4);
-       struct tnetd7200_clocks *clocks =
-                                       ioremap(AR7_REGS_CLOCKS,
-                                       sizeof(struct tnetd7200_clocks));
-       int cpu_base, cpu_mul, cpu_prediv, cpu_postdiv;
-       int dsp_base, dsp_mul, dsp_prediv, dsp_postdiv;
-       int usb_base, usb_mul, usb_prediv, usb_postdiv;
-       struct clk *clk;
-
-       cpu_base = tnetd7200_get_clock_base(TNETD7200_CLOCK_ID_CPU, bootcr);
-       dsp_base = tnetd7200_get_clock_base(TNETD7200_CLOCK_ID_DSP, bootcr);
-
-       if (*bootcr & BOOT_PLL_ASYNC_MODE) {
-               printk(KERN_INFO "Clocks: Async mode\n");
-
-               printk(KERN_INFO "Clocks: Setting DSP clock\n");
-               calculate(dsp_base, TNETD7200_DEF_DSP_CLK,
-                       &dsp_prediv, &dsp_postdiv, &dsp_mul);
-               bus_clk.rate =
-                       ((dsp_base / dsp_prediv) * dsp_mul) / dsp_postdiv;
-               tnetd7200_set_clock(dsp_base, &clocks->dsp,
-                       dsp_prediv, dsp_postdiv * 2, dsp_postdiv, dsp_mul * 2,
-                       bus_clk.rate);
-
-               printk(KERN_INFO "Clocks: Setting CPU clock\n");
-               calculate(cpu_base, TNETD7200_DEF_CPU_CLK, &cpu_prediv,
-                       &cpu_postdiv, &cpu_mul);
-               cpu_clk.rate =
-                       ((cpu_base / cpu_prediv) * cpu_mul) / cpu_postdiv;
-               tnetd7200_set_clock(cpu_base, &clocks->cpu,
-                       cpu_prediv, cpu_postdiv, -1, cpu_mul,
-                       cpu_clk.rate);
-
-       } else
-               if (*bootcr & BOOT_PLL_2TO1_MODE) {
-                       printk(KERN_INFO "Clocks: Sync 2:1 mode\n");
-
-                       printk(KERN_INFO "Clocks: Setting CPU clock\n");
-                       calculate(cpu_base, TNETD7200_DEF_CPU_CLK, &cpu_prediv,
-                               &cpu_postdiv, &cpu_mul);
-                       cpu_clk.rate = ((cpu_base / cpu_prediv) * cpu_mul)
-                                                               / cpu_postdiv;
-                       tnetd7200_set_clock(cpu_base, &clocks->cpu,
-                               cpu_prediv, cpu_postdiv, -1, cpu_mul,
-                               cpu_clk.rate);
-
-                       printk(KERN_INFO "Clocks: Setting DSP clock\n");
-                       calculate(dsp_base, TNETD7200_DEF_DSP_CLK, &dsp_prediv,
-                               &dsp_postdiv, &dsp_mul);
-                       bus_clk.rate = cpu_clk.rate / 2;
-                       tnetd7200_set_clock(dsp_base, &clocks->dsp,
-                               dsp_prediv, dsp_postdiv * 2, dsp_postdiv,
-                               dsp_mul * 2, bus_clk.rate);
-               } else {
-                       printk(KERN_INFO "Clocks: Sync 1:1 mode\n");
-
-                       printk(KERN_INFO "Clocks: Setting DSP clock\n");
-                       calculate(dsp_base, TNETD7200_DEF_DSP_CLK, &dsp_prediv,
-                               &dsp_postdiv, &dsp_mul);
-                       bus_clk.rate = ((dsp_base / dsp_prediv) * dsp_mul)
-                                                               / dsp_postdiv;
-                       tnetd7200_set_clock(dsp_base, &clocks->dsp,
-                               dsp_prediv, dsp_postdiv * 2, dsp_postdiv,
-                               dsp_mul * 2, bus_clk.rate);
-
-                       cpu_clk.rate = bus_clk.rate;
-               }
-
-       printk(KERN_INFO "Clocks: Setting USB clock\n");
-       usb_base = bus_clk.rate;
-       calculate(usb_base, TNETD7200_DEF_USB_CLK, &usb_prediv,
-               &usb_postdiv, &usb_mul);
-       tnetd7200_set_clock(usb_base, &clocks->usb,
-               usb_prediv, usb_postdiv, -1, usb_mul,
-               TNETD7200_DEF_USB_CLK);
-
-       iounmap(clocks);
-       iounmap(bootcr);
-
-       clk = clk_register_fixed_rate(NULL, "cpu", NULL, 0, cpu_clk.rate);
-       clkdev_create(clk, "cpu", NULL);
-       clkdev_create(clk, "dsp", NULL);
-}
-
-void __init ar7_init_clocks(void)
-{
-       struct clk *clk;
-
-       switch (ar7_chip_id()) {
-       case AR7_CHIP_7100:
-       case AR7_CHIP_7200:
-               tnetd7200_init_clocks();
-               break;
-       case AR7_CHIP_7300:
-               tnetd7300_init_clocks();
-               break;
-       default:
-               break;
-       }
-       clk = clk_register_fixed_rate(NULL, "bus", NULL, 0, bus_clk.rate);
-       clkdev_create(clk, "bus", NULL);
-       /* adjust vbus clock rate */
-       clk = clk_register_fixed_factor(NULL, "vbus", "bus", 0, 1, 2);
-       clkdev_create(clk, "vbus", NULL);
-       clkdev_create(clk, "cpmac", "cpmac.1");
-       clkdev_create(clk, "cpmac", "cpmac.1");
-}
diff --git a/arch/mips/ar7/gpio.c b/arch/mips/ar7/gpio.c
deleted file mode 100644 (file)
index 4ed833b..0000000
+++ /dev/null
@@ -1,332 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2007 Eugene Konev <ejka@openwrt.org>
- * Copyright (C) 2009-2010 Florian Fainelli <florian@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/gpio/driver.h>
-
-#include <asm/mach-ar7/ar7.h>
-
-#define AR7_GPIO_MAX 32
-#define TITAN_GPIO_MAX 51
-
-struct ar7_gpio_chip {
-       void __iomem            *regs;
-       struct gpio_chip        chip;
-};
-
-static int ar7_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
-{
-       struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
-       void __iomem *gpio_in = gpch->regs + AR7_GPIO_INPUT;
-
-       return !!(readl(gpio_in) & (1 << gpio));
-}
-
-static int titan_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
-{
-       struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
-       void __iomem *gpio_in0 = gpch->regs + TITAN_GPIO_INPUT_0;
-       void __iomem *gpio_in1 = gpch->regs + TITAN_GPIO_INPUT_1;
-
-       return readl(gpio >> 5 ? gpio_in1 : gpio_in0) & (1 << (gpio & 0x1f));
-}
-
-static void ar7_gpio_set_value(struct gpio_chip *chip,
-                               unsigned gpio, int value)
-{
-       struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
-       void __iomem *gpio_out = gpch->regs + AR7_GPIO_OUTPUT;
-       unsigned tmp;
-
-       tmp = readl(gpio_out) & ~(1 << gpio);
-       if (value)
-               tmp |= 1 << gpio;
-       writel(tmp, gpio_out);
-}
-
-static void titan_gpio_set_value(struct gpio_chip *chip,
-                               unsigned gpio, int value)
-{
-       struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
-       void __iomem *gpio_out0 = gpch->regs + TITAN_GPIO_OUTPUT_0;
-       void __iomem *gpio_out1 = gpch->regs + TITAN_GPIO_OUTPUT_1;
-       unsigned tmp;
-
-       tmp = readl(gpio >> 5 ? gpio_out1 : gpio_out0) & ~(1 << (gpio & 0x1f));
-       if (value)
-               tmp |= 1 << (gpio & 0x1f);
-       writel(tmp, gpio >> 5 ? gpio_out1 : gpio_out0);
-}
-
-static int ar7_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
-{
-       struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
-       void __iomem *gpio_dir = gpch->regs + AR7_GPIO_DIR;
-
-       writel(readl(gpio_dir) | (1 << gpio), gpio_dir);
-
-       return 0;
-}
-
-static int titan_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
-{
-       struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
-       void __iomem *gpio_dir0 = gpch->regs + TITAN_GPIO_DIR_0;
-       void __iomem *gpio_dir1 = gpch->regs + TITAN_GPIO_DIR_1;
-
-       if (gpio >= TITAN_GPIO_MAX)
-               return -EINVAL;
-
-       writel(readl(gpio >> 5 ? gpio_dir1 : gpio_dir0) | (1 << (gpio & 0x1f)),
-                       gpio >> 5 ? gpio_dir1 : gpio_dir0);
-       return 0;
-}
-
-static int ar7_gpio_direction_output(struct gpio_chip *chip,
-                                       unsigned gpio, int value)
-{
-       struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
-       void __iomem *gpio_dir = gpch->regs + AR7_GPIO_DIR;
-
-       ar7_gpio_set_value(chip, gpio, value);
-       writel(readl(gpio_dir) & ~(1 << gpio), gpio_dir);
-
-       return 0;
-}
-
-static int titan_gpio_direction_output(struct gpio_chip *chip,
-                                       unsigned gpio, int value)
-{
-       struct ar7_gpio_chip *gpch = gpiochip_get_data(chip);
-       void __iomem *gpio_dir0 = gpch->regs + TITAN_GPIO_DIR_0;
-       void __iomem *gpio_dir1 = gpch->regs + TITAN_GPIO_DIR_1;
-
-       if (gpio >= TITAN_GPIO_MAX)
-               return -EINVAL;
-
-       titan_gpio_set_value(chip, gpio, value);
-       writel(readl(gpio >> 5 ? gpio_dir1 : gpio_dir0) & ~(1 <<
-               (gpio & 0x1f)), gpio >> 5 ? gpio_dir1 : gpio_dir0);
-
-       return 0;
-}
-
-static struct ar7_gpio_chip ar7_gpio_chip = {
-       .chip = {
-               .label                  = "ar7-gpio",
-               .direction_input        = ar7_gpio_direction_input,
-               .direction_output       = ar7_gpio_direction_output,
-               .set                    = ar7_gpio_set_value,
-               .get                    = ar7_gpio_get_value,
-               .base                   = 0,
-               .ngpio                  = AR7_GPIO_MAX,
-       }
-};
-
-static struct ar7_gpio_chip titan_gpio_chip = {
-       .chip = {
-               .label                  = "titan-gpio",
-               .direction_input        = titan_gpio_direction_input,
-               .direction_output       = titan_gpio_direction_output,
-               .set                    = titan_gpio_set_value,
-               .get                    = titan_gpio_get_value,
-               .base                   = 0,
-               .ngpio                  = TITAN_GPIO_MAX,
-       }
-};
-
-static inline int ar7_gpio_enable_ar7(unsigned gpio)
-{
-       void __iomem *gpio_en = ar7_gpio_chip.regs + AR7_GPIO_ENABLE;
-
-       writel(readl(gpio_en) | (1 << gpio), gpio_en);
-
-       return 0;
-}
-
-static inline int ar7_gpio_enable_titan(unsigned gpio)
-{
-       void __iomem *gpio_en0 = titan_gpio_chip.regs  + TITAN_GPIO_ENBL_0;
-       void __iomem *gpio_en1 = titan_gpio_chip.regs  + TITAN_GPIO_ENBL_1;
-
-       writel(readl(gpio >> 5 ? gpio_en1 : gpio_en0) | (1 << (gpio & 0x1f)),
-               gpio >> 5 ? gpio_en1 : gpio_en0);
-
-       return 0;
-}
-
-int ar7_gpio_enable(unsigned gpio)
-{
-       return ar7_is_titan() ? ar7_gpio_enable_titan(gpio) :
-                               ar7_gpio_enable_ar7(gpio);
-}
-EXPORT_SYMBOL(ar7_gpio_enable);
-
-static inline int ar7_gpio_disable_ar7(unsigned gpio)
-{
-       void __iomem *gpio_en = ar7_gpio_chip.regs + AR7_GPIO_ENABLE;
-
-       writel(readl(gpio_en) & ~(1 << gpio), gpio_en);
-
-       return 0;
-}
-
-static inline int ar7_gpio_disable_titan(unsigned gpio)
-{
-       void __iomem *gpio_en0 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_0;
-       void __iomem *gpio_en1 = titan_gpio_chip.regs + TITAN_GPIO_ENBL_1;
-
-       writel(readl(gpio >> 5 ? gpio_en1 : gpio_en0) & ~(1 << (gpio & 0x1f)),
-                       gpio >> 5 ? gpio_en1 : gpio_en0);
-
-       return 0;
-}
-
-int ar7_gpio_disable(unsigned gpio)
-{
-       return ar7_is_titan() ? ar7_gpio_disable_titan(gpio) :
-                               ar7_gpio_disable_ar7(gpio);
-}
-EXPORT_SYMBOL(ar7_gpio_disable);
-
-struct titan_gpio_cfg {
-       u32 reg;
-       u32 shift;
-       u32 func;
-};
-
-static const struct titan_gpio_cfg titan_gpio_table[] = {
-       /* reg, start bit, mux value */
-       {4, 24, 1},
-       {4, 26, 1},
-       {4, 28, 1},
-       {4, 30, 1},
-       {5, 6, 1},
-       {5, 8, 1},
-       {5, 10, 1},
-       {5, 12, 1},
-       {7, 14, 3},
-       {7, 16, 3},
-       {7, 18, 3},
-       {7, 20, 3},
-       {7, 22, 3},
-       {7, 26, 3},
-       {7, 28, 3},
-       {7, 30, 3},
-       {8, 0, 3},
-       {8, 2, 3},
-       {8, 4, 3},
-       {8, 10, 3},
-       {8, 14, 3},
-       {8, 16, 3},
-       {8, 18, 3},
-       {8, 20, 3},
-       {9, 8, 3},
-       {9, 10, 3},
-       {9, 12, 3},
-       {9, 14, 3},
-       {9, 18, 3},
-       {9, 20, 3},
-       {9, 24, 3},
-       {9, 26, 3},
-       {9, 28, 3},
-       {9, 30, 3},
-       {10, 0, 3},
-       {10, 2, 3},
-       {10, 8, 3},
-       {10, 10, 3},
-       {10, 12, 3},
-       {10, 14, 3},
-       {13, 12, 3},
-       {13, 14, 3},
-       {13, 16, 3},
-       {13, 18, 3},
-       {13, 24, 3},
-       {13, 26, 3},
-       {13, 28, 3},
-       {13, 30, 3},
-       {14, 2, 3},
-       {14, 6, 3},
-       {14, 8, 3},
-       {14, 12, 3}
-};
-
-static int titan_gpio_pinsel(unsigned gpio)
-{
-       struct titan_gpio_cfg gpio_cfg;
-       u32 mux_status, pin_sel_reg, tmp;
-       void __iomem *pin_sel = (void __iomem *)KSEG1ADDR(AR7_REGS_PINSEL);
-
-       if (gpio >= ARRAY_SIZE(titan_gpio_table))
-               return -EINVAL;
-
-       gpio_cfg = titan_gpio_table[gpio];
-       pin_sel_reg = gpio_cfg.reg - 1;
-
-       mux_status = (readl(pin_sel + pin_sel_reg) >> gpio_cfg.shift) & 0x3;
-
-       /* Check the mux status */
-       if (!((mux_status == 0) || (mux_status == gpio_cfg.func)))
-               return 0;
-
-       /* Set the pin sel value */
-       tmp = readl(pin_sel + pin_sel_reg);
-       tmp |= ((gpio_cfg.func & 0x3) << gpio_cfg.shift);
-       writel(tmp, pin_sel + pin_sel_reg);
-
-       return 0;
-}
-
-/* Perform minimal Titan GPIO configuration */
-static void titan_gpio_init(void)
-{
-       unsigned i;
-
-       for (i = 44; i < 48; i++) {
-               titan_gpio_pinsel(i);
-               ar7_gpio_enable_titan(i);
-               titan_gpio_direction_input(&titan_gpio_chip.chip, i);
-       }
-}
-
-int __init ar7_gpio_init(void)
-{
-       int ret;
-       struct ar7_gpio_chip *gpch;
-       unsigned size;
-
-       if (!ar7_is_titan()) {
-               gpch = &ar7_gpio_chip;
-               size = 0x10;
-       } else {
-               gpch = &titan_gpio_chip;
-               size = 0x1f;
-       }
-
-       gpch->regs = ioremap(AR7_REGS_GPIO, size);
-       if (!gpch->regs) {
-               printk(KERN_ERR "%s: failed to ioremap regs\n",
-                                       gpch->chip.label);
-               return -ENOMEM;
-       }
-
-       ret = gpiochip_add_data(&gpch->chip, gpch);
-       if (ret) {
-               printk(KERN_ERR "%s: failed to add gpiochip\n",
-                                       gpch->chip.label);
-               iounmap(gpch->regs);
-               return ret;
-       }
-       printk(KERN_INFO "%s: registered %d GPIOs\n",
-                               gpch->chip.label, gpch->chip.ngpio);
-
-       if (ar7_is_titan())
-               titan_gpio_init();
-
-       return ret;
-}
diff --git a/arch/mips/ar7/irq.c b/arch/mips/ar7/irq.c
deleted file mode 100644 (file)
index f0a7942..0000000
+++ /dev/null
@@ -1,165 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2006,2007 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2006,2007 Eugene Konev <ejka@openwrt.org>
- */
-
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-
-#include <asm/irq_cpu.h>
-#include <asm/mipsregs.h>
-#include <asm/mach-ar7/ar7.h>
-
-#define EXCEPT_OFFSET  0x80
-#define PACE_OFFSET    0xA0
-#define CHNLS_OFFSET   0x200
-
-#define REG_OFFSET(irq, reg)   ((irq) / 32 * 0x4 + reg * 0x10)
-#define SEC_REG_OFFSET(reg)    (EXCEPT_OFFSET + reg * 0x8)
-#define SEC_SR_OFFSET          (SEC_REG_OFFSET(0))     /* 0x80 */
-#define CR_OFFSET(irq)         (REG_OFFSET(irq, 1))    /* 0x10 */
-#define SEC_CR_OFFSET          (SEC_REG_OFFSET(1))     /* 0x88 */
-#define ESR_OFFSET(irq)                (REG_OFFSET(irq, 2))    /* 0x20 */
-#define SEC_ESR_OFFSET         (SEC_REG_OFFSET(2))     /* 0x90 */
-#define ECR_OFFSET(irq)                (REG_OFFSET(irq, 3))    /* 0x30 */
-#define SEC_ECR_OFFSET         (SEC_REG_OFFSET(3))     /* 0x98 */
-#define PIR_OFFSET             (0x40)
-#define MSR_OFFSET             (0x44)
-#define PM_OFFSET(irq)         (REG_OFFSET(irq, 5))    /* 0x50 */
-#define TM_OFFSET(irq)         (REG_OFFSET(irq, 6))    /* 0x60 */
-
-#define REG(addr) ((u32 *)(KSEG1ADDR(AR7_REGS_IRQ) + addr))
-
-#define CHNL_OFFSET(chnl) (CHNLS_OFFSET + (chnl * 4))
-
-static int ar7_irq_base;
-
-static void ar7_unmask_irq(struct irq_data *d)
-{
-       writel(1 << ((d->irq - ar7_irq_base) % 32),
-              REG(ESR_OFFSET(d->irq - ar7_irq_base)));
-}
-
-static void ar7_mask_irq(struct irq_data *d)
-{
-       writel(1 << ((d->irq - ar7_irq_base) % 32),
-              REG(ECR_OFFSET(d->irq - ar7_irq_base)));
-}
-
-static void ar7_ack_irq(struct irq_data *d)
-{
-       writel(1 << ((d->irq - ar7_irq_base) % 32),
-              REG(CR_OFFSET(d->irq - ar7_irq_base)));
-}
-
-static void ar7_unmask_sec_irq(struct irq_data *d)
-{
-       writel(1 << (d->irq - ar7_irq_base - 40), REG(SEC_ESR_OFFSET));
-}
-
-static void ar7_mask_sec_irq(struct irq_data *d)
-{
-       writel(1 << (d->irq - ar7_irq_base - 40), REG(SEC_ECR_OFFSET));
-}
-
-static void ar7_ack_sec_irq(struct irq_data *d)
-{
-       writel(1 << (d->irq - ar7_irq_base - 40), REG(SEC_CR_OFFSET));
-}
-
-static struct irq_chip ar7_irq_type = {
-       .name = "AR7",
-       .irq_unmask = ar7_unmask_irq,
-       .irq_mask = ar7_mask_irq,
-       .irq_ack = ar7_ack_irq
-};
-
-static struct irq_chip ar7_sec_irq_type = {
-       .name = "AR7",
-       .irq_unmask = ar7_unmask_sec_irq,
-       .irq_mask = ar7_mask_sec_irq,
-       .irq_ack = ar7_ack_sec_irq,
-};
-
-static void __init ar7_irq_init(int base)
-{
-       int i;
-       /*
-        * Disable interrupts and clear pending
-        */
-       writel(0xffffffff, REG(ECR_OFFSET(0)));
-       writel(0xff, REG(ECR_OFFSET(32)));
-       writel(0xffffffff, REG(SEC_ECR_OFFSET));
-       writel(0xffffffff, REG(CR_OFFSET(0)));
-       writel(0xff, REG(CR_OFFSET(32)));
-       writel(0xffffffff, REG(SEC_CR_OFFSET));
-
-       ar7_irq_base = base;
-
-       for (i = 0; i < 40; i++) {
-               writel(i, REG(CHNL_OFFSET(i)));
-               /* Primary IRQ's */
-               irq_set_chip_and_handler(base + i, &ar7_irq_type,
-                                        handle_level_irq);
-               /* Secondary IRQ's */
-               if (i < 32)
-                       irq_set_chip_and_handler(base + i + 40,
-                                                &ar7_sec_irq_type,
-                                                handle_level_irq);
-       }
-
-       if (request_irq(2, no_action, IRQF_NO_THREAD, "AR7 cascade interrupt",
-                       NULL))
-               pr_err("Failed to request irq 2 (AR7 cascade interrupt)\n");
-       if (request_irq(ar7_irq_base, no_action, IRQF_NO_THREAD,
-                       "AR7 cascade interrupt", NULL)) {
-               pr_err("Failed to request irq %d (AR7 cascade interrupt)\n",
-                      ar7_irq_base);
-       }
-       set_c0_status(IE_IRQ0);
-}
-
-void __init arch_init_irq(void)
-{
-       mips_cpu_irq_init();
-       ar7_irq_init(8);
-}
-
-static void ar7_cascade(void)
-{
-       u32 status;
-       int i, irq;
-
-       /* Primary IRQ's */
-       irq = readl(REG(PIR_OFFSET)) & 0x3f;
-       if (irq) {
-               do_IRQ(ar7_irq_base + irq);
-               return;
-       }
-
-       /* Secondary IRQ's are cascaded through primary '0' */
-       writel(1, REG(CR_OFFSET(irq)));
-       status = readl(REG(SEC_SR_OFFSET));
-       for (i = 0; i < 32; i++) {
-               if (status & 1) {
-                       do_IRQ(ar7_irq_base + i + 40);
-                       return;
-               }
-               status >>= 1;
-       }
-
-       spurious_interrupt();
-}
-
-asmlinkage void plat_irq_dispatch(void)
-{
-       unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
-       if (pending & STATUSF_IP7)              /* cpu timer */
-               do_IRQ(7);
-       else if (pending & STATUSF_IP2)         /* int0 hardware line */
-               ar7_cascade();
-       else
-               spurious_interrupt();
-}
diff --git a/arch/mips/ar7/memory.c b/arch/mips/ar7/memory.c
deleted file mode 100644 (file)
index ce8024c..0000000
+++ /dev/null
@@ -1,51 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2007 Eugene Konev <ejka@openwrt.org>
- */
-#include <linux/memblock.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/pfn.h>
-#include <linux/proc_fs.h>
-#include <linux/string.h>
-#include <linux/swap.h>
-
-#include <asm/bootinfo.h>
-#include <asm/page.h>
-#include <asm/sections.h>
-
-#include <asm/mach-ar7/ar7.h>
-
-static int __init memsize(void)
-{
-       u32 size = (64 << 20);
-       u32 *addr = (u32 *)KSEG1ADDR(AR7_SDRAM_BASE + size - 4);
-       u32 *kernel_end = (u32 *)KSEG1ADDR(CPHYSADDR((u32)&_end));
-       u32 *tmpaddr = addr;
-
-       while (tmpaddr > kernel_end) {
-               *tmpaddr = (u32)tmpaddr;
-               size >>= 1;
-               tmpaddr -= size >> 2;
-       }
-
-       do {
-               tmpaddr += size >> 2;
-               if (*tmpaddr != (u32)tmpaddr)
-                       break;
-               size <<= 1;
-       } while (size < (64 << 20));
-
-       writel((u32)tmpaddr, &addr);
-
-       return size;
-}
-
-void __init prom_meminit(void)
-{
-       unsigned long pages;
-
-       pages = memsize() >> PAGE_SHIFT;
-       memblock_add(PHYS_OFFSET, pages << PAGE_SHIFT);
-}
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
deleted file mode 100644 (file)
index 215149a..0000000
+++ /dev/null
@@ -1,722 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2006,2007 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2006,2007 Eugene Konev <ejka@openwrt.org>
- */
-
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/physmap.h>
-#include <linux/serial.h>
-#include <linux/serial_8250.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/vlynq.h>
-#include <linux/leds.h>
-#include <linux/string.h>
-#include <linux/etherdevice.h>
-#include <linux/phy.h>
-#include <linux/phy_fixed.h>
-#include <linux/gpio.h>
-#include <linux/clk.h>
-
-#include <asm/addrspace.h>
-#include <asm/mach-ar7/ar7.h>
-#include <asm/mach-ar7/prom.h>
-
-/*****************************************************************************
- * VLYNQ Bus
- ****************************************************************************/
-struct plat_vlynq_data {
-       struct plat_vlynq_ops ops;
-       int gpio_bit;
-       int reset_bit;
-};
-
-static int vlynq_on(struct vlynq_device *dev)
-{
-       int ret;
-       struct plat_vlynq_data *pdata = dev->dev.platform_data;
-
-       ret = gpio_request(pdata->gpio_bit, "vlynq");
-       if (ret)
-               goto out;
-
-       ar7_device_reset(pdata->reset_bit);
-
-       ret = ar7_gpio_disable(pdata->gpio_bit);
-       if (ret)
-               goto out_enabled;
-
-       ret = ar7_gpio_enable(pdata->gpio_bit);
-       if (ret)
-               goto out_enabled;
-
-       ret = gpio_direction_output(pdata->gpio_bit, 0);
-       if (ret)
-               goto out_gpio_enabled;
-
-       msleep(50);
-
-       gpio_set_value(pdata->gpio_bit, 1);
-
-       msleep(50);
-
-       return 0;
-
-out_gpio_enabled:
-       ar7_gpio_disable(pdata->gpio_bit);
-out_enabled:
-       ar7_device_disable(pdata->reset_bit);
-       gpio_free(pdata->gpio_bit);
-out:
-       return ret;
-}
-
-static void vlynq_off(struct vlynq_device *dev)
-{
-       struct plat_vlynq_data *pdata = dev->dev.platform_data;
-
-       ar7_gpio_disable(pdata->gpio_bit);
-       gpio_free(pdata->gpio_bit);
-       ar7_device_disable(pdata->reset_bit);
-}
-
-static struct resource vlynq_low_res[] = {
-       {
-               .name   = "regs",
-               .flags  = IORESOURCE_MEM,
-               .start  = AR7_REGS_VLYNQ0,
-               .end    = AR7_REGS_VLYNQ0 + 0xff,
-       },
-       {
-               .name   = "irq",
-               .flags  = IORESOURCE_IRQ,
-               .start  = 29,
-               .end    = 29,
-       },
-       {
-               .name   = "mem",
-               .flags  = IORESOURCE_MEM,
-               .start  = 0x04000000,
-               .end    = 0x04ffffff,
-       },
-       {
-               .name   = "devirq",
-               .flags  = IORESOURCE_IRQ,
-               .start  = 80,
-               .end    = 111,
-       },
-};
-
-static struct resource vlynq_high_res[] = {
-       {
-               .name   = "regs",
-               .flags  = IORESOURCE_MEM,
-               .start  = AR7_REGS_VLYNQ1,
-               .end    = AR7_REGS_VLYNQ1 + 0xff,
-       },
-       {
-               .name   = "irq",
-               .flags  = IORESOURCE_IRQ,
-               .start  = 33,
-               .end    = 33,
-       },
-       {
-               .name   = "mem",
-               .flags  = IORESOURCE_MEM,
-               .start  = 0x0c000000,
-               .end    = 0x0cffffff,
-       },
-       {
-               .name   = "devirq",
-               .flags  = IORESOURCE_IRQ,
-               .start  = 112,
-               .end    = 143,
-       },
-};
-
-static struct plat_vlynq_data vlynq_low_data = {
-       .ops = {
-               .on     = vlynq_on,
-               .off    = vlynq_off,
-       },
-       .reset_bit      = 20,
-       .gpio_bit       = 18,
-};
-
-static struct plat_vlynq_data vlynq_high_data = {
-       .ops = {
-               .on     = vlynq_on,
-               .off    = vlynq_off,
-       },
-       .reset_bit      = 16,
-       .gpio_bit       = 19,
-};
-
-static struct platform_device vlynq_low = {
-       .id             = 0,
-       .name           = "vlynq",
-       .dev = {
-               .platform_data  = &vlynq_low_data,
-       },
-       .resource       = vlynq_low_res,
-       .num_resources  = ARRAY_SIZE(vlynq_low_res),
-};
-
-static struct platform_device vlynq_high = {
-       .id             = 1,
-       .name           = "vlynq",
-       .dev = {
-               .platform_data  = &vlynq_high_data,
-       },
-       .resource       = vlynq_high_res,
-       .num_resources  = ARRAY_SIZE(vlynq_high_res),
-};
-
-/*****************************************************************************
- * Flash
- ****************************************************************************/
-static struct resource physmap_flash_resource = {
-       .name   = "mem",
-       .flags  = IORESOURCE_MEM,
-       .start  = 0x10000000,
-       .end    = 0x107fffff,
-};
-
-static const char *ar7_probe_types[] = { "ar7part", NULL };
-
-static struct physmap_flash_data physmap_flash_data = {
-       .width  = 2,
-       .part_probe_types = ar7_probe_types,
-};
-
-static struct platform_device physmap_flash = {
-       .name           = "physmap-flash",
-       .dev = {
-               .platform_data  = &physmap_flash_data,
-       },
-       .resource       = &physmap_flash_resource,
-       .num_resources  = 1,
-};
-
-/*****************************************************************************
- * Ethernet
- ****************************************************************************/
-static struct resource cpmac_low_res[] = {
-       {
-               .name   = "regs",
-               .flags  = IORESOURCE_MEM,
-               .start  = AR7_REGS_MAC0,
-               .end    = AR7_REGS_MAC0 + 0x7ff,
-       },
-       {
-               .name   = "irq",
-               .flags  = IORESOURCE_IRQ,
-               .start  = 27,
-               .end    = 27,
-       },
-};
-
-static struct resource cpmac_high_res[] = {
-       {
-               .name   = "regs",
-               .flags  = IORESOURCE_MEM,
-               .start  = AR7_REGS_MAC1,
-               .end    = AR7_REGS_MAC1 + 0x7ff,
-       },
-       {
-               .name   = "irq",
-               .flags  = IORESOURCE_IRQ,
-               .start  = 41,
-               .end    = 41,
-       },
-};
-
-static struct fixed_phy_status fixed_phy_status __initdata = {
-       .link           = 1,
-       .speed          = 100,
-       .duplex         = 1,
-};
-
-static struct plat_cpmac_data cpmac_low_data = {
-       .reset_bit      = 17,
-       .power_bit      = 20,
-       .phy_mask       = 0x80000000,
-};
-
-static struct plat_cpmac_data cpmac_high_data = {
-       .reset_bit      = 21,
-       .power_bit      = 22,
-       .phy_mask       = 0x7fffffff,
-};
-
-static u64 cpmac_dma_mask = DMA_BIT_MASK(32);
-
-static struct platform_device cpmac_low = {
-       .id             = 0,
-       .name           = "cpmac",
-       .dev = {
-               .dma_mask               = &cpmac_dma_mask,
-               .coherent_dma_mask      = DMA_BIT_MASK(32),
-               .platform_data          = &cpmac_low_data,
-       },
-       .resource       = cpmac_low_res,
-       .num_resources  = ARRAY_SIZE(cpmac_low_res),
-};
-
-static struct platform_device cpmac_high = {
-       .id             = 1,
-       .name           = "cpmac",
-       .dev = {
-               .dma_mask               = &cpmac_dma_mask,
-               .coherent_dma_mask      = DMA_BIT_MASK(32),
-               .platform_data          = &cpmac_high_data,
-       },
-       .resource       = cpmac_high_res,
-       .num_resources  = ARRAY_SIZE(cpmac_high_res),
-};
-
-static void __init cpmac_get_mac(int instance, unsigned char *dev_addr)
-{
-       char name[5], *mac;
-
-       sprintf(name, "mac%c", 'a' + instance);
-       mac = prom_getenv(name);
-       if (!mac && instance) {
-               sprintf(name, "mac%c", 'a');
-               mac = prom_getenv(name);
-       }
-
-       if (mac) {
-               if (!mac_pton(mac, dev_addr)) {
-                       pr_warn("cannot parse mac address, using random address\n");
-                       eth_random_addr(dev_addr);
-               }
-       } else
-               eth_random_addr(dev_addr);
-}
-
-/*****************************************************************************
- * USB
- ****************************************************************************/
-static struct resource usb_res[] = {
-       {
-               .name   = "regs",
-               .flags  = IORESOURCE_MEM,
-               .start  = AR7_REGS_USB,
-               .end    = AR7_REGS_USB + 0xff,
-       },
-       {
-               .name   = "irq",
-               .flags  = IORESOURCE_IRQ,
-               .start  = 32,
-               .end    = 32,
-       },
-       {
-               .name   = "mem",
-               .flags  = IORESOURCE_MEM,
-               .start  = 0x03400000,
-               .end    = 0x03401fff,
-       },
-};
-
-static struct platform_device ar7_udc = {
-       .name           = "ar7_udc",
-       .resource       = usb_res,
-       .num_resources  = ARRAY_SIZE(usb_res),
-};
-
-/*****************************************************************************
- * LEDs
- ****************************************************************************/
-static const struct gpio_led default_leds[] = {
-       {
-               .name                   = "status",
-               .gpio                   = 8,
-               .active_low             = 1,
-       },
-};
-
-static const struct gpio_led titan_leds[] = {
-       { .name = "status", .gpio = 8, .active_low = 1, },
-       { .name = "wifi", .gpio = 13, .active_low = 1, },
-};
-
-static const struct gpio_led dsl502t_leds[] = {
-       {
-               .name                   = "status",
-               .gpio                   = 9,
-               .active_low             = 1,
-       },
-       {
-               .name                   = "ethernet",
-               .gpio                   = 7,
-               .active_low             = 1,
-       },
-       {
-               .name                   = "usb",
-               .gpio                   = 12,
-               .active_low             = 1,
-       },
-};
-
-static const struct gpio_led dg834g_leds[] = {
-       {
-               .name                   = "ppp",
-               .gpio                   = 6,
-               .active_low             = 1,
-       },
-       {
-               .name                   = "status",
-               .gpio                   = 7,
-               .active_low             = 1,
-       },
-       {
-               .name                   = "adsl",
-               .gpio                   = 8,
-               .active_low             = 1,
-       },
-       {
-               .name                   = "wifi",
-               .gpio                   = 12,
-               .active_low             = 1,
-       },
-       {
-               .name                   = "power",
-               .gpio                   = 14,
-               .active_low             = 1,
-               .default_trigger        = "default-on",
-       },
-};
-
-static const struct gpio_led fb_sl_leds[] = {
-       {
-               .name                   = "1",
-               .gpio                   = 7,
-       },
-       {
-               .name                   = "2",
-               .gpio                   = 13,
-               .active_low             = 1,
-       },
-       {
-               .name                   = "3",
-               .gpio                   = 10,
-               .active_low             = 1,
-       },
-       {
-               .name                   = "4",
-               .gpio                   = 12,
-               .active_low             = 1,
-       },
-       {
-               .name                   = "5",
-               .gpio                   = 9,
-               .active_low             = 1,
-       },
-};
-
-static const struct gpio_led fb_fon_leds[] = {
-       {
-               .name                   = "1",
-               .gpio                   = 8,
-       },
-       {
-               .name                   = "2",
-               .gpio                   = 3,
-               .active_low             = 1,
-       },
-       {
-               .name                   = "3",
-               .gpio                   = 5,
-       },
-       {
-               .name                   = "4",
-               .gpio                   = 4,
-               .active_low             = 1,
-       },
-       {
-               .name                   = "5",
-               .gpio                   = 11,
-               .active_low             = 1,
-       },
-};
-
-static const struct gpio_led gt701_leds[] = {
-       {
-               .name                   = "inet:green",
-               .gpio                   = 13,
-               .active_low             = 1,
-       },
-       {
-               .name                   = "usb",
-               .gpio                   = 12,
-               .active_low             = 1,
-       },
-       {
-               .name                   = "inet:red",
-               .gpio                   = 9,
-               .active_low             = 1,
-       },
-       {
-               .name                   = "power:red",
-               .gpio                   = 7,
-               .active_low             = 1,
-       },
-       {
-               .name                   = "power:green",
-               .gpio                   = 8,
-               .active_low             = 1,
-               .default_trigger        = "default-on",
-       },
-       {
-               .name                   = "ethernet",
-               .gpio                   = 10,
-               .active_low             = 1,
-       },
-};
-
-static struct gpio_led_platform_data ar7_led_data;
-
-static struct platform_device ar7_gpio_leds = {
-       .name = "leds-gpio",
-       .dev = {
-               .platform_data = &ar7_led_data,
-       }
-};
-
-static void __init detect_leds(void)
-{
-       char *prid, *usb_prod;
-
-       /* Default LEDs */
-       ar7_led_data.num_leds = ARRAY_SIZE(default_leds);
-       ar7_led_data.leds = default_leds;
-
-       /* FIXME: the whole thing is unreliable */
-       prid = prom_getenv("ProductID");
-       usb_prod = prom_getenv("usb_prod");
-
-       /* If we can't get the product id from PROM, use the default LEDs */
-       if (!prid)
-               return;
-
-       if (strstr(prid, "Fritz_Box_FON")) {
-               ar7_led_data.num_leds = ARRAY_SIZE(fb_fon_leds);
-               ar7_led_data.leds = fb_fon_leds;
-       } else if (strstr(prid, "Fritz_Box_")) {
-               ar7_led_data.num_leds = ARRAY_SIZE(fb_sl_leds);
-               ar7_led_data.leds = fb_sl_leds;
-       } else if ((!strcmp(prid, "AR7RD") || !strcmp(prid, "AR7DB"))
-               && usb_prod != NULL && strstr(usb_prod, "DSL-502T")) {
-               ar7_led_data.num_leds = ARRAY_SIZE(dsl502t_leds);
-               ar7_led_data.leds = dsl502t_leds;
-       } else if (strstr(prid, "DG834")) {
-               ar7_led_data.num_leds = ARRAY_SIZE(dg834g_leds);
-               ar7_led_data.leds = dg834g_leds;
-       } else if (strstr(prid, "CYWM") || strstr(prid, "CYWL")) {
-               ar7_led_data.num_leds = ARRAY_SIZE(titan_leds);
-               ar7_led_data.leds = titan_leds;
-       } else if (strstr(prid, "GT701")) {
-               ar7_led_data.num_leds = ARRAY_SIZE(gt701_leds);
-               ar7_led_data.leds = gt701_leds;
-       }
-}
-
-/*****************************************************************************
- * Watchdog
- ****************************************************************************/
-static struct resource ar7_wdt_res = {
-       .name           = "regs",
-       .flags          = IORESOURCE_MEM,
-       .start          = -1,   /* Filled at runtime */
-       .end            = -1,   /* Filled at runtime */
-};
-
-static struct platform_device ar7_wdt = {
-       .name           = "ar7_wdt",
-       .resource       = &ar7_wdt_res,
-       .num_resources  = 1,
-};
-
-/*****************************************************************************
- * Init
- ****************************************************************************/
-static int __init ar7_register_uarts(void)
-{
-#ifdef CONFIG_SERIAL_8250
-       static struct uart_port uart_port __initdata;
-       struct clk *bus_clk;
-       int res;
-
-       memset(&uart_port, 0, sizeof(struct uart_port));
-
-       bus_clk = clk_get(NULL, "bus");
-       if (IS_ERR(bus_clk))
-               panic("unable to get bus clk");
-
-       uart_port.type          = PORT_AR7;
-       uart_port.uartclk       = clk_get_rate(bus_clk) / 2;
-       uart_port.iotype        = UPIO_MEM32;
-       uart_port.flags         = UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF;
-       uart_port.regshift      = 2;
-
-       uart_port.line          = 0;
-       uart_port.irq           = AR7_IRQ_UART0;
-       uart_port.mapbase       = AR7_REGS_UART0;
-       uart_port.membase       = ioremap(uart_port.mapbase, 256);
-
-       res = early_serial_setup(&uart_port);
-       if (res)
-               return res;
-
-       /* Only TNETD73xx have a second serial port */
-       if (ar7_has_second_uart()) {
-               uart_port.line          = 1;
-               uart_port.irq           = AR7_IRQ_UART1;
-               uart_port.mapbase       = UR8_REGS_UART1;
-               uart_port.membase       = ioremap(uart_port.mapbase, 256);
-
-               res = early_serial_setup(&uart_port);
-               if (res)
-                       return res;
-       }
-#endif
-
-       return 0;
-}
-
-static void __init titan_fixup_devices(void)
-{
-       /* Set vlynq0 data */
-       vlynq_low_data.reset_bit = 15;
-       vlynq_low_data.gpio_bit = 14;
-
-       /* Set vlynq1 data */
-       vlynq_high_data.reset_bit = 16;
-       vlynq_high_data.gpio_bit = 7;
-
-       /* Set vlynq0 resources */
-       vlynq_low_res[0].start = TITAN_REGS_VLYNQ0;
-       vlynq_low_res[0].end = TITAN_REGS_VLYNQ0 + 0xff;
-       vlynq_low_res[1].start = 33;
-       vlynq_low_res[1].end = 33;
-       vlynq_low_res[2].start = 0x0c000000;
-       vlynq_low_res[2].end = 0x0fffffff;
-       vlynq_low_res[3].start = 80;
-       vlynq_low_res[3].end = 111;
-
-       /* Set vlynq1 resources */
-       vlynq_high_res[0].start = TITAN_REGS_VLYNQ1;
-       vlynq_high_res[0].end = TITAN_REGS_VLYNQ1 + 0xff;
-       vlynq_high_res[1].start = 34;
-       vlynq_high_res[1].end = 34;
-       vlynq_high_res[2].start = 0x40000000;
-       vlynq_high_res[2].end = 0x43ffffff;
-       vlynq_high_res[3].start = 112;
-       vlynq_high_res[3].end = 143;
-
-       /* Set cpmac0 data */
-       cpmac_low_data.phy_mask = 0x40000000;
-
-       /* Set cpmac1 data */
-       cpmac_high_data.phy_mask = 0x80000000;
-
-       /* Set cpmac0 resources */
-       cpmac_low_res[0].start = TITAN_REGS_MAC0;
-       cpmac_low_res[0].end = TITAN_REGS_MAC0 + 0x7ff;
-
-       /* Set cpmac1 resources */
-       cpmac_high_res[0].start = TITAN_REGS_MAC1;
-       cpmac_high_res[0].end = TITAN_REGS_MAC1 + 0x7ff;
-}
-
-static int __init ar7_register_devices(void)
-{
-       void __iomem *bootcr;
-       u32 val;
-       int res;
-
-       res = ar7_gpio_init();
-       if (res)
-               pr_warn("unable to register gpios: %d\n", res);
-
-       res = ar7_register_uarts();
-       if (res)
-               pr_err("unable to setup uart(s): %d\n", res);
-
-       res = platform_device_register(&physmap_flash);
-       if (res)
-               pr_warn("unable to register physmap-flash: %d\n", res);
-
-       if (ar7_is_titan())
-               titan_fixup_devices();
-
-       ar7_device_disable(vlynq_low_data.reset_bit);
-       res = platform_device_register(&vlynq_low);
-       if (res)
-               pr_warn("unable to register vlynq-low: %d\n", res);
-
-       if (ar7_has_high_vlynq()) {
-               ar7_device_disable(vlynq_high_data.reset_bit);
-               res = platform_device_register(&vlynq_high);
-               if (res)
-                       pr_warn("unable to register vlynq-high: %d\n", res);
-       }
-
-       if (ar7_has_high_cpmac()) {
-               res = fixed_phy_add(PHY_POLL, cpmac_high.id,
-                                   &fixed_phy_status);
-               if (!res) {
-                       cpmac_get_mac(1, cpmac_high_data.dev_addr);
-
-                       res = platform_device_register(&cpmac_high);
-                       if (res)
-                               pr_warn("unable to register cpmac-high: %d\n",
-                                       res);
-               } else
-                       pr_warn("unable to add cpmac-high phy: %d\n", res);
-       } else
-               cpmac_low_data.phy_mask = 0xffffffff;
-
-       res = fixed_phy_add(PHY_POLL, cpmac_low.id, &fixed_phy_status);
-       if (!res) {
-               cpmac_get_mac(0, cpmac_low_data.dev_addr);
-               res = platform_device_register(&cpmac_low);
-               if (res)
-                       pr_warn("unable to register cpmac-low: %d\n", res);
-       } else
-               pr_warn("unable to add cpmac-low phy: %d\n", res);
-
-       detect_leds();
-       res = platform_device_register(&ar7_gpio_leds);
-       if (res)
-               pr_warn("unable to register leds: %d\n", res);
-
-       res = platform_device_register(&ar7_udc);
-       if (res)
-               pr_warn("unable to register usb slave: %d\n", res);
-
-       /* Register watchdog only if enabled in hardware */
-       bootcr = ioremap(AR7_REGS_DCL, 4);
-       val = readl(bootcr);
-       iounmap(bootcr);
-       if (val & AR7_WDT_HW_ENA) {
-               if (ar7_has_high_vlynq())
-                       ar7_wdt_res.start = UR8_REGS_WDT;
-               else
-                       ar7_wdt_res.start = AR7_REGS_WDT;
-
-               ar7_wdt_res.end = ar7_wdt_res.start + 0x20;
-               res = platform_device_register(&ar7_wdt);
-               if (res)
-                       pr_warn("unable to register watchdog: %d\n", res);
-       }
-
-       return 0;
-}
-device_initcall(ar7_register_devices);
diff --git a/arch/mips/ar7/prom.c b/arch/mips/ar7/prom.c
deleted file mode 100644 (file)
index 5810d39..0000000
+++ /dev/null
@@ -1,256 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- *
- * Putting things on the screen/serial line using YAMONs facilities.
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/serial_reg.h>
-#include <linux/spinlock.h>
-#include <linux/export.h>
-#include <linux/string.h>
-#include <linux/io.h>
-#include <asm/bootinfo.h>
-#include <asm/setup.h>
-
-#include <asm/mach-ar7/ar7.h>
-#include <asm/mach-ar7/prom.h>
-
-#define MAX_ENTRY 80
-
-struct env_var {
-       char    *name;
-       char    *value;
-};
-
-static struct env_var adam2_env[MAX_ENTRY];
-
-char *prom_getenv(const char *name)
-{
-       int i;
-
-       for (i = 0; (i < MAX_ENTRY) && adam2_env[i].name; i++)
-               if (!strcmp(name, adam2_env[i].name))
-                       return adam2_env[i].value;
-
-       return NULL;
-}
-EXPORT_SYMBOL(prom_getenv);
-
-static void  __init ar7_init_cmdline(int argc, char *argv[])
-{
-       int i;
-
-       for (i = 1; i < argc; i++) {
-               strlcat(arcs_cmdline, argv[i], COMMAND_LINE_SIZE);
-               if (i < (argc - 1))
-                       strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE);
-       }
-}
-
-struct psbl_rec {
-       u32     psbl_size;
-       u32     env_base;
-       u32     env_size;
-       u32     ffs_base;
-       u32     ffs_size;
-};
-
-static const char psp_env_version[] __initconst = "TIENV0.8";
-
-struct psp_env_chunk {
-       u8      num;
-       u8      ctrl;
-       u16     csum;
-       u8      len;
-       char    data[11];
-} __packed;
-
-struct psp_var_map_entry {
-       u8      num;
-       char    *value;
-};
-
-static const struct psp_var_map_entry psp_var_map[] = {
-       {  1,   "cpufrequency" },
-       {  2,   "memsize" },
-       {  3,   "flashsize" },
-       {  4,   "modetty0" },
-       {  5,   "modetty1" },
-       {  8,   "maca" },
-       {  9,   "macb" },
-       { 28,   "sysfrequency" },
-       { 38,   "mipsfrequency" },
-};
-
-/*
-
-Well-known variable (num is looked up in table above for matching variable name)
-Example: cpufrequency=211968000
-+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
-| 01 |CTRL|CHECKSUM | 01 | _2 | _1 | _1 | _9 | _6 | _8 | _0 | _0 | _0 | \0 | FF
-+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
-
-Name=Value pair in a single chunk
-Example: NAME=VALUE
-+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
-| 00 |CTRL|CHECKSUM | 01 | _N | _A | _M | _E | _0 | _V | _A | _L | _U | _E | \0
-+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
-
-Name=Value pair in 2 chunks (len is the number of chunks)
-Example: bootloaderVersion=1.3.7.15
-+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
-| 00 |CTRL|CHECKSUM | 02 | _b | _o | _o | _t | _l | _o | _a | _d | _e | _r | _V
-+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
-| _e | _r | _s | _i | _o | _n | \0 | _1 | _. | _3 | _. | _7 | _. | _1 | _5 | \0
-+----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+---
-
-Data is padded with 0xFF
-
-*/
-
-#define PSP_ENV_SIZE  4096
-
-static char psp_env_data[PSP_ENV_SIZE] = { 0, };
-
-static char * __init lookup_psp_var_map(u8 num)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(psp_var_map); i++)
-               if (psp_var_map[i].num == num)
-                       return psp_var_map[i].value;
-
-       return NULL;
-}
-
-static void __init add_adam2_var(char *name, char *value)
-{
-       int i;
-
-       for (i = 0; i < MAX_ENTRY; i++) {
-               if (!adam2_env[i].name) {
-                       adam2_env[i].name = name;
-                       adam2_env[i].value = value;
-                       return;
-               } else if (!strcmp(adam2_env[i].name, name)) {
-                       adam2_env[i].value = value;
-                       return;
-               }
-       }
-}
-
-static int __init parse_psp_env(void *psp_env_base)
-{
-       int i, n;
-       char *name, *value;
-       struct psp_env_chunk *chunks = (struct psp_env_chunk *)psp_env_data;
-
-       memcpy_fromio(chunks, psp_env_base, PSP_ENV_SIZE);
-
-       i = 1;
-       n = PSP_ENV_SIZE / sizeof(struct psp_env_chunk);
-       while (i < n) {
-               if ((chunks[i].num == 0xff) || ((i + chunks[i].len) > n))
-                       break;
-               value = chunks[i].data;
-               if (chunks[i].num) {
-                       name = lookup_psp_var_map(chunks[i].num);
-               } else {
-                       name = value;
-                       value += strlen(name) + 1;
-               }
-               if (name)
-                       add_adam2_var(name, value);
-               i += chunks[i].len;
-       }
-       return 0;
-}
-
-static void __init ar7_init_env(struct env_var *env)
-{
-       int i;
-       struct psbl_rec *psbl = (struct psbl_rec *)(KSEG1ADDR(0x14000300));
-       void *psp_env = (void *)KSEG1ADDR(psbl->env_base);
-
-       if (strcmp(psp_env, psp_env_version) == 0) {
-               parse_psp_env(psp_env);
-       } else {
-               for (i = 0; i < MAX_ENTRY; i++, env++)
-                       if (env->name)
-                               add_adam2_var(env->name, env->value);
-       }
-}
-
-static void __init console_config(void)
-{
-#ifdef CONFIG_SERIAL_8250_CONSOLE
-       char console_string[40];
-       int baud = 0;
-       char parity = '\0', bits = '\0', flow = '\0';
-       char *s, *p;
-
-       if (strstr(arcs_cmdline, "console="))
-               return;
-
-       s = prom_getenv("modetty0");
-       if (s) {
-               baud = simple_strtoul(s, &p, 10);
-               s = p;
-               if (*s == ',')
-                       s++;
-               if (*s)
-                       parity = *s++;
-               if (*s == ',')
-                       s++;
-               if (*s)
-                       bits = *s++;
-               if (*s == ',')
-                       s++;
-               if (*s == 'h')
-                       flow = 'r';
-       }
-
-       if (baud == 0)
-               baud = 38400;
-       if (parity != 'n' && parity != 'o' && parity != 'e')
-               parity = 'n';
-       if (bits != '7' && bits != '8')
-               bits = '8';
-
-       if (flow == 'r')
-               sprintf(console_string, " console=ttyS0,%d%c%c%c", baud,
-                       parity, bits, flow);
-       else
-               sprintf(console_string, " console=ttyS0,%d%c%c", baud, parity,
-                       bits);
-       strlcat(arcs_cmdline, console_string, COMMAND_LINE_SIZE);
-#endif
-}
-
-void __init prom_init(void)
-{
-       ar7_init_cmdline(fw_arg0, (char **)fw_arg1);
-       ar7_init_env((struct env_var *)fw_arg2);
-       console_config();
-}
-
-#define PORT(offset) (KSEG1ADDR(AR7_REGS_UART0 + (offset * 4)))
-static inline unsigned int serial_in(int offset)
-{
-       return readl((void *)PORT(offset));
-}
-
-static inline void serial_out(int offset, int value)
-{
-       writel(value, (void *)PORT(offset));
-}
-
-void prom_putchar(char c)
-{
-       while ((serial_in(UART_LSR) & UART_LSR_TEMT) == 0)
-               ;
-       serial_out(UART_TX, c);
-}
diff --git a/arch/mips/ar7/setup.c b/arch/mips/ar7/setup.c
deleted file mode 100644 (file)
index 352d5db..0000000
+++ /dev/null
@@ -1,93 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
- */
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/pm.h>
-#include <linux/time.h>
-
-#include <asm/reboot.h>
-#include <asm/mach-ar7/ar7.h>
-#include <asm/mach-ar7/prom.h>
-
-static void ar7_machine_restart(char *command)
-{
-       u32 *softres_reg = ioremap(AR7_REGS_RESET + AR7_RESET_SOFTWARE, 1);
-
-       writel(1, softres_reg);
-}
-
-static void ar7_machine_halt(void)
-{
-       while (1)
-               ;
-}
-
-static void ar7_machine_power_off(void)
-{
-       u32 *power_reg = (u32 *)ioremap(AR7_REGS_POWER, 1);
-       u32 power_state = readl(power_reg) | (3 << 30);
-
-       writel(power_state, power_reg);
-       ar7_machine_halt();
-}
-
-const char *get_system_type(void)
-{
-       u16 chip_id = ar7_chip_id();
-       u16 titan_variant_id = titan_chip_id();
-
-       switch (chip_id) {
-       case AR7_CHIP_7100:
-               return "TI AR7 (TNETD7100)";
-       case AR7_CHIP_7200:
-               return "TI AR7 (TNETD7200)";
-       case AR7_CHIP_7300:
-               return "TI AR7 (TNETD7300)";
-       case AR7_CHIP_TITAN:
-               switch (titan_variant_id) {
-               case TITAN_CHIP_1050:
-                       return "TI AR7 (TNETV1050)";
-               case TITAN_CHIP_1055:
-                       return "TI AR7 (TNETV1055)";
-               case TITAN_CHIP_1056:
-                       return "TI AR7 (TNETV1056)";
-               case TITAN_CHIP_1060:
-                       return "TI AR7 (TNETV1060)";
-               }
-               fallthrough;
-       default:
-               return "TI AR7 (unknown)";
-       }
-}
-
-static int __init ar7_init_console(void)
-{
-       return 0;
-}
-console_initcall(ar7_init_console);
-
-/*
- * Initializes basic routines and structures pointers, memory size (as
- * given by the bios and saves the command line.
- */
-void __init plat_mem_setup(void)
-{
-       unsigned long io_base;
-
-       _machine_restart = ar7_machine_restart;
-       _machine_halt = ar7_machine_halt;
-       pm_power_off = ar7_machine_power_off;
-
-       io_base = (unsigned long)ioremap(AR7_REGS_BASE, 0x10000);
-       if (!io_base)
-               panic("Can't remap IO base!");
-       set_io_port_base(io_base);
-
-       prom_meminit();
-
-       printk(KERN_INFO "%s, ID: 0x%04x, Revision: 0x%02x\n",
-                       get_system_type(), ar7_chip_id(), ar7_chip_rev());
-}
diff --git a/arch/mips/ar7/time.c b/arch/mips/ar7/time.c
deleted file mode 100644 (file)
index 72aa77d..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- *
- * Setting up the clock on the MIPS boards.
- */
-
-#include <linux/init.h>
-#include <linux/time.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-
-#include <asm/time.h>
-#include <asm/mach-ar7/ar7.h>
-
-void __init plat_time_init(void)
-{
-       struct clk *cpu_clk;
-
-       /* Initialize ar7 clocks so the CPU clock frequency is correct */
-       ar7_init_clocks();
-
-       cpu_clk = clk_get(NULL, "cpu");
-       if (IS_ERR(cpu_clk)) {
-               printk(KERN_ERR "unable to get cpu clock\n");
-               return;
-       }
-
-       mips_hpt_frequency = clk_get_rate(cpu_clk) / 2;
-}
index 96d28f2111219696a0e1688d643be1d9621a1596..09dcd2c561d9afed5688ebb953e4b8678869c2ff 100644 (file)
 #define PORT(offset) (CKSEG1ADDR(UART_BASE) + (offset))
 #endif
 
-#ifdef CONFIG_AR7
-#include <ar7.h>
-#define PORT(offset) (CKSEG1ADDR(AR7_REGS_UART0) + (4 * offset))
-#endif
-
 #ifdef CONFIG_MACH_INGENIC
 #define INGENIC_UART_BASE_ADDR (0x10030000 + 0x1000 * CONFIG_ZBOOT_INGENIC_UART)
 #define PORT(offset) (CKSEG1ADDR(INGENIC_UART_BASE_ADDR) + (4 * offset))
index acbbe8c4664c110e9f978c4555ab8c814e3482f7..c5c5a094c37d2e08ac162c1e4f7d5ee6f471c5f9 100644 (file)
 
        rom: memory@1fc00000 {
                compatible = "mtd-rom";
-               probe-type = "map_rom";
                reg = <0x1fc00000 0x2000>;
 
                bank-width = <4>;
index 9c0099919db7aba9b231fcc6722a0c06df0091ce..504e895e916e57bf30fc894c4f9ede189b309013 100644 (file)
 
        rom: memory@1fc00000 {
                compatible = "mtd-rom";
-               probe-type = "map_rom";
                reg = <0x1fc00000 0x2000>;
 
                bank-width = <4>;
index 129b6710b699dfb8cb9eadcc14c8b887632dad55..f9c262cc2e96bf114986d1a0b0a3d002336dbdca 100644 (file)
@@ -8,7 +8,7 @@
 
 / {
        compatible = "gnubee,gb-pc1", "mediatek,mt7621-soc";
-       model = "GB-PC1";
+       model = "GnuBee GB-PC1";
 
        memory@0 {
                device_type = "memory";
index f810cd10f4f4fc391eea7fcca5abec602d21ced7..b281e13f22ed265f1f8ed63d701d1dd84d96ef01 100644 (file)
@@ -8,7 +8,7 @@
 
 / {
        compatible = "gnubee,gb-pc2", "mediatek,mt7621-soc";
-       model = "GB-PC2";
+       model = "GnuBee GB-PC2";
 
        memory@0 {
                device_type = "memory";
index 7caed0d14f11a68de7f309ba83d4990ebc5e5e37..35a10258f2357bba8f95834c8b84092c73871aaf 100644 (file)
                compatible = "mediatek,mt7621-eth";
                reg = <0x1e100000 0x10000>;
 
-               clocks = <&sysc MT7621_CLK_FE>,
-                        <&sysc MT7621_CLK_ETH>;
+               clocks = <&sysc MT7621_CLK_FE>, <&sysc MT7621_CLK_ETH>;
                clock-names = "fe", "ethif";
 
                #address-cells = <1>;
                #size-cells = <0>;
 
-               resets = <&sysc MT7621_RST_FE &sysc MT7621_RST_ETH>;
+               resets = <&sysc MT7621_RST_FE>, <&sysc MT7621_RST_ETH>;
                reset-names = "fe", "eth";
 
                interrupt-parent = <&gic>;
diff --git a/arch/mips/configs/ar7_defconfig b/arch/mips/configs/ar7_defconfig
deleted file mode 100644 (file)
index 329c60a..0000000
+++ /dev/null
@@ -1,119 +0,0 @@
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_KERNEL_LZMA=y
-CONFIG_SYSVIPC=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RELAY=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-# CONFIG_ELF_CORE is not set
-# CONFIG_KALLSYMS is not set
-# CONFIG_VM_EVENT_COUNTERS is not set
-# CONFIG_COMPAT_BRK is not set
-CONFIG_AR7=y
-CONFIG_HZ_100=y
-CONFIG_KEXEC=y
-# CONFIG_SECCOMP is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-CONFIG_BSD_DISKLABEL=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_MULTIPATH=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_MROUTE=y
-CONFIG_SYN_COOKIES=y
-# CONFIG_INET_DIAG is not set
-CONFIG_TCP_CONG_ADVANCED=y
-# CONFIG_TCP_CONG_BIC is not set
-# CONFIG_TCP_CONG_CUBIC is not set
-CONFIG_TCP_CONG_WESTWOOD=y
-# CONFIG_TCP_CONG_HTCP is not set
-# CONFIG_IPV6 is not set
-CONFIG_NETFILTER=y
-# CONFIG_BRIDGE_NETFILTER is not set
-CONFIG_NF_CONNTRACK=m
-CONFIG_NF_CONNTRACK_MARK=y
-CONFIG_NF_CONNTRACK_FTP=m
-CONFIG_NF_CONNTRACK_IRC=m
-CONFIG_NF_CONNTRACK_TFTP=m
-CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
-CONFIG_NETFILTER_XT_MATCH_LIMIT=m
-CONFIG_NETFILTER_XT_MATCH_MAC=m
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
-CONFIG_NETFILTER_XT_MATCH_STATE=m
-CONFIG_IP_NF_IPTABLES=m
-CONFIG_IP_NF_FILTER=m
-CONFIG_IP_NF_TARGET_REJECT=m
-CONFIG_IP_NF_MANGLE=m
-CONFIG_IP_NF_RAW=m
-CONFIG_ATM=m
-CONFIG_ATM_BR2684=m
-CONFIG_ATM_BR2684_IPFILTER=y
-CONFIG_BRIDGE=y
-CONFIG_VLAN_8021Q=y
-CONFIG_NET_SCHED=y
-CONFIG_NET_CLS_ACT=y
-CONFIG_NET_ACT_POLICE=y
-CONFIG_HAMRADIO=y
-CONFIG_CFG80211=m
-CONFIG_MAC80211=m
-CONFIG_MTD=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_CFI_INTELEXT=y
-CONFIG_MTD_CFI_AMDSTD=y
-CONFIG_MTD_CFI_STAA=y
-CONFIG_MTD_COMPLEX_MAPPINGS=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_NETDEVICES=y
-CONFIG_CPMAC=y
-CONFIG_FIXED_PHY=y
-CONFIG_PPP=m
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPPOATM=m
-CONFIG_PPPOE=m
-CONFIG_PPP_ASYNC=m
-# CONFIG_INPUT is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_NR_UARTS=2
-CONFIG_SERIAL_8250_RUNTIME_UARTS=2
-CONFIG_HW_RANDOM=y
-CONFIG_GPIO_SYSFS=y
-# CONFIG_HWMON is not set
-CONFIG_WATCHDOG=y
-CONFIG_AR7_WDT=y
-# CONFIG_USB_SUPPORT is not set
-CONFIG_NEW_LEDS=y
-CONFIG_LEDS_CLASS=y
-CONFIG_LEDS_GPIO=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
-# CONFIG_DNOTIFY is not set
-CONFIG_PROC_KCORE=y
-# CONFIG_PROC_PAGE_MONITOR is not set
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_JFFS2_SUMMARY=y
-CONFIG_JFFS2_COMPRESSION_OPTIONS=y
-CONFIG_SQUASHFS=y
-# CONFIG_CRYPTO_HW is not set
-CONFIG_STRIP_ASM_SYMS=y
-CONFIG_DEBUG_FS=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="rootfstype=squashfs,jffs2"
index 1843468f84a30d4ac3a8bd8315b0aec6764a1875..00329bb5de5ab0cb43b5b38d0bb280335d22fe49 100644 (file)
@@ -177,7 +177,6 @@ CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 CONFIG_EXT4_FS_POSIX_ACL=y
 CONFIG_EXT4_FS_SECURITY=y
-CONFIG_REISERFS_FS=m
 CONFIG_AUTOFS_FS=y
 CONFIG_FUSE_FS=y
 CONFIG_ISO9660_FS=m
index fdf3745741054736fb5cf2cacfea84963a03ecd1..65adb538030d02e0ae979c9d278930948ab155b9 100644 (file)
@@ -70,10 +70,6 @@ CONFIG_FRAMEBUFFER_CONSOLE=y
 # CONFIG_HWMON is not set
 CONFIG_EXT2_FS=m
 CONFIG_EXT3_FS=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
 CONFIG_XFS_FS=m
 CONFIG_XFS_QUOTA=y
 CONFIG_AUTOFS_FS=m
index 83d9a8ff4270822c068c99dea345bc661b22158c..38f17b6584218739adbe4c8139f21be774101cf5 100644 (file)
@@ -229,9 +229,6 @@ CONFIG_EXT2_FS=m
 CONFIG_EXT3_FS=y
 CONFIG_EXT3_FS_POSIX_ACL=y
 CONFIG_EXT3_FS_SECURITY=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_PROC_INFO=y
-CONFIG_REISERFS_FS_XATTR=y
 CONFIG_JFS_FS=m
 CONFIG_JFS_POSIX_ACL=y
 CONFIG_XFS_FS=m
index ae1a7793e810f8a5d9549ec2162293be1fbda3e4..6f80460245573ea3f8ab8796e63b548c8bf58181 100644 (file)
@@ -317,11 +317,6 @@ CONFIG_UIO=m
 CONFIG_UIO_CIF=m
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_PROC_INFO=y
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
 CONFIG_JFS_FS=m
 CONFIG_JFS_POSIX_ACL=y
 CONFIG_JFS_SECURITY=y
index c07e30f63d8bc6094935ec356529a66d6b137719..16a91eeff67fe934f0c391f2d62d1bbdf6f7e0d6 100644 (file)
@@ -323,11 +323,6 @@ CONFIG_UIO=m
 CONFIG_UIO_CIF=m
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_PROC_INFO=y
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
 CONFIG_JFS_FS=m
 CONFIG_JFS_POSIX_ACL=y
 CONFIG_JFS_SECURITY=y
index 0a5701020d3f25b6afc11212b45699af25cd4ccf..264aba29ea4feaad739217808ddca339173372df 100644 (file)
@@ -323,11 +323,6 @@ CONFIG_UIO=m
 CONFIG_UIO_CIF=m
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_PROC_INFO=y
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
 CONFIG_JFS_FS=m
 CONFIG_JFS_POSIX_ACL=y
 CONFIG_JFS_SECURITY=y
index 5c5e2186210cfd4e6401094bd68b6e6d4ba9516b..08e1c1f2f4debf122cbbdfd02932b7c1db437ed8 100644 (file)
@@ -310,10 +310,6 @@ CONFIG_USB_LD=m
 CONFIG_USB_TEST=m
 CONFIG_EXT2_FS=m
 CONFIG_EXT3_FS=y
-CONFIG_REISERFS_FS=m
-CONFIG_REISERFS_FS_XATTR=y
-CONFIG_REISERFS_FS_POSIX_ACL=y
-CONFIG_REISERFS_FS_SECURITY=y
 CONFIG_XFS_FS=m
 CONFIG_XFS_QUOTA=y
 CONFIG_AUTOFS_FS=m
index 68b1e5d458cfb6a28278289c37ca78e94e61230d..bc27d99c94363418b03ad0d2b48814e6472fdd4a 100644 (file)
@@ -71,8 +71,6 @@ struct kprobe_ctlblk {
        struct prev_kprobe prev_kprobe;
 };
 
-extern int kprobe_exceptions_notify(struct notifier_block *self,
-                                   unsigned long val, void *data);
 
 #endif /* CONFIG_KPROBES */
 #endif /* _ASM_KPROBES_H */
diff --git a/arch/mips/include/asm/mach-ar7/ar7.h b/arch/mips/include/asm/mach-ar7/ar7.h
deleted file mode 100644 (file)
index 1e8621a..0000000
+++ /dev/null
@@ -1,191 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2006,2007 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2006,2007 Eugene Konev <ejka@openwrt.org>
- */
-
-#ifndef __AR7_H__
-#define __AR7_H__
-
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/errno.h>
-
-#include <asm/addrspace.h>
-
-#define AR7_SDRAM_BASE 0x14000000
-
-#define AR7_REGS_BASE  0x08610000
-
-#define AR7_REGS_MAC0  (AR7_REGS_BASE + 0x0000)
-#define AR7_REGS_GPIO  (AR7_REGS_BASE + 0x0900)
-/* 0x08610A00 - 0x08610BFF (512 bytes, 128 bytes / clock) */
-#define AR7_REGS_POWER (AR7_REGS_BASE + 0x0a00)
-#define AR7_REGS_CLOCKS (AR7_REGS_POWER + 0x80)
-#define UR8_REGS_CLOCKS (AR7_REGS_POWER + 0x20)
-#define AR7_REGS_UART0 (AR7_REGS_BASE + 0x0e00)
-#define AR7_REGS_USB   (AR7_REGS_BASE + 0x1200)
-#define AR7_REGS_RESET (AR7_REGS_BASE + 0x1600)
-#define AR7_REGS_PINSEL (AR7_REGS_BASE + 0x160C)
-#define AR7_REGS_VLYNQ0 (AR7_REGS_BASE + 0x1800)
-#define AR7_REGS_DCL   (AR7_REGS_BASE + 0x1a00)
-#define AR7_REGS_VLYNQ1 (AR7_REGS_BASE + 0x1c00)
-#define AR7_REGS_MDIO  (AR7_REGS_BASE + 0x1e00)
-#define AR7_REGS_IRQ   (AR7_REGS_BASE + 0x2400)
-#define AR7_REGS_MAC1  (AR7_REGS_BASE + 0x2800)
-
-#define AR7_REGS_WDT   (AR7_REGS_BASE + 0x1f00)
-#define UR8_REGS_WDT   (AR7_REGS_BASE + 0x0b00)
-#define UR8_REGS_UART1 (AR7_REGS_BASE + 0x0f00)
-
-/* Titan registers */
-#define TITAN_REGS_ESWITCH_BASE (0x08640000)
-#define TITAN_REGS_MAC0                (TITAN_REGS_ESWITCH_BASE)
-#define TITAN_REGS_MAC1                (TITAN_REGS_ESWITCH_BASE + 0x0800)
-#define TITAN_REGS_MDIO                (TITAN_REGS_ESWITCH_BASE + 0x02000)
-#define TITAN_REGS_VLYNQ0      (AR7_REGS_BASE + 0x1c00)
-#define TITAN_REGS_VLYNQ1      (AR7_REGS_BASE + 0x1300)
-
-#define AR7_RESET_PERIPHERAL   0x0
-#define AR7_RESET_SOFTWARE     0x4
-#define AR7_RESET_STATUS       0x8
-
-#define AR7_RESET_BIT_CPMAC_LO 17
-#define AR7_RESET_BIT_CPMAC_HI 21
-#define AR7_RESET_BIT_MDIO     22
-#define AR7_RESET_BIT_EPHY     26
-
-#define TITAN_RESET_BIT_EPHY1  28
-
-/* GPIO control registers */
-#define AR7_GPIO_INPUT 0x0
-#define AR7_GPIO_OUTPUT 0x4
-#define AR7_GPIO_DIR   0x8
-#define AR7_GPIO_ENABLE 0xc
-#define TITAN_GPIO_INPUT_0     0x0
-#define TITAN_GPIO_INPUT_1     0x4
-#define TITAN_GPIO_OUTPUT_0    0x8
-#define TITAN_GPIO_OUTPUT_1    0xc
-#define TITAN_GPIO_DIR_0       0x10
-#define TITAN_GPIO_DIR_1       0x14
-#define TITAN_GPIO_ENBL_0      0x18
-#define TITAN_GPIO_ENBL_1      0x1c
-
-#define AR7_CHIP_7100  0x18
-#define AR7_CHIP_7200  0x2b
-#define AR7_CHIP_7300  0x05
-#define AR7_CHIP_TITAN 0x07
-#define TITAN_CHIP_1050 0x0f
-#define TITAN_CHIP_1055 0x0e
-#define TITAN_CHIP_1056 0x0d
-#define TITAN_CHIP_1060 0x07
-
-/* Interrupts */
-#define AR7_IRQ_UART0  15
-#define AR7_IRQ_UART1  16
-
-/* Clocks */
-#define AR7_AFE_CLOCK  35328000
-#define AR7_REF_CLOCK  25000000
-#define AR7_XTAL_CLOCK 24000000
-
-/* DCL */
-#define AR7_WDT_HW_ENA 0x10
-
-struct plat_cpmac_data {
-       int reset_bit;
-       int power_bit;
-       u32 phy_mask;
-       char dev_addr[6];
-};
-
-struct plat_dsl_data {
-       int reset_bit_dsl;
-       int reset_bit_sar;
-};
-
-static inline int ar7_is_titan(void)
-{
-       return (readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x24)) & 0xffff) ==
-               AR7_CHIP_TITAN;
-}
-
-static inline u16 ar7_chip_id(void)
-{
-       return ar7_is_titan() ? AR7_CHIP_TITAN : (readl((void *)
-               KSEG1ADDR(AR7_REGS_GPIO + 0x14)) & 0xffff);
-}
-
-static inline u16 titan_chip_id(void)
-{
-       unsigned int val = readl((void *)KSEG1ADDR(AR7_REGS_GPIO +
-                                               TITAN_GPIO_INPUT_1));
-       return ((val >> 12) & 0x0f);
-}
-
-static inline u8 ar7_chip_rev(void)
-{
-       return (readl((void *)KSEG1ADDR(AR7_REGS_GPIO + (ar7_is_titan() ? 0x24 :
-               0x14))) >> 16) & 0xff;
-}
-
-static inline int ar7_has_high_cpmac(void)
-{
-       u16 chip_id = ar7_chip_id();
-       switch (chip_id) {
-       case AR7_CHIP_7100:
-       case AR7_CHIP_7200:
-               return 0;
-       case AR7_CHIP_7300:
-               return 1;
-       default:
-               return -ENXIO;
-       }
-}
-#define ar7_has_high_vlynq ar7_has_high_cpmac
-#define ar7_has_second_uart ar7_has_high_cpmac
-
-static inline void ar7_device_enable(u32 bit)
-{
-       void *reset_reg =
-               (void *)KSEG1ADDR(AR7_REGS_RESET + AR7_RESET_PERIPHERAL);
-       writel(readl(reset_reg) | (1 << bit), reset_reg);
-       msleep(20);
-}
-
-static inline void ar7_device_disable(u32 bit)
-{
-       void *reset_reg =
-               (void *)KSEG1ADDR(AR7_REGS_RESET + AR7_RESET_PERIPHERAL);
-       writel(readl(reset_reg) & ~(1 << bit), reset_reg);
-       msleep(20);
-}
-
-static inline void ar7_device_reset(u32 bit)
-{
-       ar7_device_disable(bit);
-       ar7_device_enable(bit);
-}
-
-static inline void ar7_device_on(u32 bit)
-{
-       void *power_reg = (void *)KSEG1ADDR(AR7_REGS_POWER);
-       writel(readl(power_reg) | (1 << bit), power_reg);
-       msleep(20);
-}
-
-static inline void ar7_device_off(u32 bit)
-{
-       void *power_reg = (void *)KSEG1ADDR(AR7_REGS_POWER);
-       writel(readl(power_reg) & ~(1 << bit), power_reg);
-       msleep(20);
-}
-
-int __init ar7_gpio_init(void);
-void __init ar7_init_clocks(void);
-
-/* Board specific GPIO functions */
-int ar7_gpio_enable(unsigned gpio);
-int ar7_gpio_disable(unsigned gpio);
-
-#endif /* __AR7_H__ */
diff --git a/arch/mips/include/asm/mach-ar7/irq.h b/arch/mips/include/asm/mach-ar7/irq.h
deleted file mode 100644 (file)
index 46bb730..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Shamelessly copied from asm-mips/mach-emma2rh/
- * Copyright (C) 2003 by Ralf Baechle
- */
-#ifndef __ASM_AR7_IRQ_H
-#define __ASM_AR7_IRQ_H
-
-#define NR_IRQS 256
-
-#include <asm/mach-generic/irq.h>
-
-#endif /* __ASM_AR7_IRQ_H */
diff --git a/arch/mips/include/asm/mach-ar7/prom.h b/arch/mips/include/asm/mach-ar7/prom.h
deleted file mode 100644 (file)
index 9e1d20b..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2006, 2007 Florian Fainelli <florian@openwrt.org>
- */
-
-#ifndef __PROM_H__
-#define __PROM_H__
-
-extern char *prom_getenv(const char *name);
-extern void prom_meminit(void);
-
-#endif /* __PROM_H__ */
diff --git a/arch/mips/include/asm/mach-ar7/spaces.h b/arch/mips/include/asm/mach-ar7/spaces.h
deleted file mode 100644 (file)
index a004d94..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
- * Copyright (C) 2000, 2002  Maciej W. Rozycki
- * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
- */
-#ifndef _ASM_AR7_SPACES_H
-#define _ASM_AR7_SPACES_H
-
-/*
- * This handles the memory map.
- * We handle pages at KSEG0 for kernels with 32 bit address space.
- */
-#define PAGE_OFFSET    _AC(0x94000000, UL)
-#define PHYS_OFFSET    _AC(0x14000000, UL)
-
-#include <asm/mach-generic/spaces.h>
-
-#endif /* __ASM_AR7_SPACES_H */
diff --git a/arch/mips/include/asm/mach-loongson32/dma.h b/arch/mips/include/asm/mach-loongson32/dma.h
deleted file mode 100644 (file)
index e917b3c..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2015 Zhang, Keguang <keguang.zhang@gmail.com>
- *
- * Loongson 1 NAND platform support.
- */
-
-#ifndef __ASM_MACH_LOONGSON32_DMA_H
-#define __ASM_MACH_LOONGSON32_DMA_H
-
-#define LS1X_DMA_CHANNEL0      0
-#define LS1X_DMA_CHANNEL1      1
-#define LS1X_DMA_CHANNEL2      2
-
-struct plat_ls1x_dma {
-       int nr_channels;
-};
-
-extern struct plat_ls1x_dma ls1b_dma_pdata;
-
-#endif /* __ASM_MACH_LOONGSON32_DMA_H */
diff --git a/arch/mips/include/asm/mach-loongson32/nand.h b/arch/mips/include/asm/mach-loongson32/nand.h
deleted file mode 100644 (file)
index aaf5ed1..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (c) 2015 Zhang, Keguang <keguang.zhang@gmail.com>
- *
- * Loongson 1 NAND platform support.
- */
-
-#ifndef __ASM_MACH_LOONGSON32_NAND_H
-#define __ASM_MACH_LOONGSON32_NAND_H
-
-#include <linux/dmaengine.h>
-#include <linux/mtd/partitions.h>
-
-struct plat_ls1x_nand {
-       struct mtd_partition *parts;
-       unsigned int nr_parts;
-
-       int hold_cycle;
-       int wait_cycle;
-};
-
-extern struct plat_ls1x_nand ls1b_nand_pdata;
-
-bool ls1x_dma_filter_fn(struct dma_chan *chan, void *param);
-
-#endif /* __ASM_MACH_LOONGSON32_NAND_H */
index 2cdcfb5f6012d467b0f4daa2cae056562e11d953..f74292b13bc3ba7b15bc440adc4e3e4945279544 100644 (file)
@@ -8,9 +8,6 @@
 
 #include <linux/platform_device.h>
 
-#include <dma.h>
-#include <nand.h>
-
 extern struct platform_device ls1x_uart_pdev;
 extern struct platform_device ls1x_eth0_pdev;
 extern struct platform_device ls1x_eth1_pdev;
index f5b2ef979b4378c692607b84d7d10a5368e8b681..8f0a7263a9d61ff87ee2af5872e02a380bc6cebc 100644 (file)
@@ -66,7 +66,6 @@ copy_word:
        LONG_ADDIU      s6, s6, -1
        beq             s6, zero, process_entry
        b               copy_word
-       b               process_entry
 
 done:
 #ifdef CONFIG_SMP
index 8075590a9f8346d59873347e3d3bd2644280b6a5..623eb4bc7b41ea3e63a4fc249baf119729d0a1ba 100644 (file)
@@ -15,8 +15,6 @@
 
 #include <platform.h>
 #include <loongson1.h>
-#include <dma.h>
-#include <nand.h>
 
 /* 8250/16550 compatible UART */
 #define LS1X_UART(_id)                                         \
index fed8d432ef2068ade08e9696ed2879d1f87d3bd8..fe115bdcb22ce98054c5df283886bf2d77db222f 100644 (file)
@@ -8,8 +8,6 @@
 #include <linux/sizes.h>
 
 #include <loongson1.h>
-#include <dma.h>
-#include <nand.h>
 #include <platform.h>
 
 static const struct gpio_led ls1x_gpio_leds[] __initconst = {
index 105569c1b7127b6a03bb76abf812908d486e0160..13009666204f94a4c0eaf608af6ca59b5f41c137 100644 (file)
@@ -4,8 +4,8 @@
  *  Copyright (C) 2012 John Crispin <john@phrozen.org>
  */
 
-#include <linux/of_irq.h>
 #include <linux/of_pci.h>
+#include <linux/pci.h>
 
 int (*ltq_pci_plat_arch_init)(struct pci_dev *dev) = NULL;
 int (*ltq_pci_plat_dev_init)(struct pci_dev *dev) = NULL;
index c8e4b4fd4e33020261861c76d0139b7abab792d5..4525a9c68260d9017dcc303e27c8e0d70dc6e5d9 100644 (file)
@@ -84,8 +84,6 @@ struct arch_optimized_insn {
        kprobe_opcode_t *insn;
 };
 
-extern int kprobe_exceptions_notify(struct notifier_block *self,
-                                       unsigned long val, void *data);
 extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
 extern int kprobe_handler(struct pt_regs *regs);
 extern int kprobe_post_handler(struct pt_regs *regs);
index 3e28579f7c625b802194f0cd7e8613339aa73aa5..ebe259bdd46298e0654fb681b0cf8853c8381079 100644 (file)
@@ -1280,13 +1280,19 @@ struct iommu_table_group_ops spapr_tce_table_group_ops = {
 /*
  * A simple iommu_ops to allow less cruft in generic VFIO code.
  */
-static int spapr_tce_blocking_iommu_attach_dev(struct iommu_domain *dom,
-                                              struct device *dev)
+static int
+spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain,
+                                   struct device *dev)
 {
+       struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
        struct iommu_group *grp = iommu_group_get(dev);
        struct iommu_table_group *table_group;
        int ret = -EINVAL;
 
+       /* At first attach the ownership is already set */
+       if (!domain)
+               return 0;
+
        if (!grp)
                return -ENODEV;
 
@@ -1297,17 +1303,22 @@ static int spapr_tce_blocking_iommu_attach_dev(struct iommu_domain *dom,
        return ret;
 }
 
-static void spapr_tce_blocking_iommu_set_platform_dma(struct device *dev)
-{
-       struct iommu_group *grp = iommu_group_get(dev);
-       struct iommu_table_group *table_group;
+static const struct iommu_domain_ops spapr_tce_platform_domain_ops = {
+       .attach_dev = spapr_tce_platform_iommu_attach_dev,
+};
 
-       table_group = iommu_group_get_iommudata(grp);
-       table_group->ops->release_ownership(table_group);
-}
+static struct iommu_domain spapr_tce_platform_domain = {
+       .type = IOMMU_DOMAIN_PLATFORM,
+       .ops = &spapr_tce_platform_domain_ops,
+};
 
-static const struct iommu_domain_ops spapr_tce_blocking_domain_ops = {
-       .attach_dev = spapr_tce_blocking_iommu_attach_dev,
+static struct iommu_domain spapr_tce_blocked_domain = {
+       .type = IOMMU_DOMAIN_BLOCKED,
+       /*
+        * FIXME: SPAPR mixes blocked and platform behaviors, the blocked domain
+        * also sets the dma_api ops
+        */
+       .ops = &spapr_tce_platform_domain_ops,
 };
 
 static bool spapr_tce_iommu_capable(struct device *dev, enum iommu_cap cap)
@@ -1322,22 +1333,6 @@ static bool spapr_tce_iommu_capable(struct device *dev, enum iommu_cap cap)
        return false;
 }
 
-static struct iommu_domain *spapr_tce_iommu_domain_alloc(unsigned int type)
-{
-       struct iommu_domain *dom;
-
-       if (type != IOMMU_DOMAIN_BLOCKED)
-               return NULL;
-
-       dom = kzalloc(sizeof(*dom), GFP_KERNEL);
-       if (!dom)
-               return NULL;
-
-       dom->ops = &spapr_tce_blocking_domain_ops;
-
-       return dom;
-}
-
 static struct iommu_device *spapr_tce_iommu_probe_device(struct device *dev)
 {
        struct pci_dev *pdev;
@@ -1371,12 +1366,12 @@ static struct iommu_group *spapr_tce_iommu_device_group(struct device *dev)
 }
 
 static const struct iommu_ops spapr_tce_iommu_ops = {
+       .default_domain = &spapr_tce_platform_domain,
+       .blocked_domain = &spapr_tce_blocked_domain,
        .capable = spapr_tce_iommu_capable,
-       .domain_alloc = spapr_tce_iommu_domain_alloc,
        .probe_device = spapr_tce_iommu_probe_device,
        .release_device = spapr_tce_iommu_release_device,
        .device_group = spapr_tce_iommu_device_group,
-       .set_platform_dma_ops = spapr_tce_blocking_iommu_set_platform_dma,
 };
 
 static struct attribute *spapr_tce_iommu_attrs[] = {
index eaa15a20e6ae1537d14efceeafc3b62bd4273fc5..95a2a06acc6a62412894e491c3bfd5d4a161d15b 100644 (file)
@@ -39,6 +39,7 @@ config RISCV
        select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
        select ARCH_HAS_UBSAN_SANITIZE_ALL
        select ARCH_HAS_VDSO_DATA
+       select ARCH_KEEP_MEMBLOCK if ACPI
        select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
        select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
        select ARCH_STACKWALK
@@ -48,6 +49,7 @@ config RISCV
        select ARCH_SUPPORTS_HUGETLBFS if MMU
        select ARCH_SUPPORTS_PAGE_TABLE_CHECK if MMU
        select ARCH_SUPPORTS_PER_VMA_LOCK if MMU
+       select ARCH_SUPPORTS_SHADOW_CALL_STACK if HAVE_SHADOW_CALL_STACK
        select ARCH_USE_MEMTEST
        select ARCH_USE_QUEUED_RWLOCKS
        select ARCH_USES_CFI_TRAPS if CFI_CLANG
@@ -174,6 +176,11 @@ config GCC_SUPPORTS_DYNAMIC_FTRACE
        def_bool CC_IS_GCC
        depends on $(cc-option,-fpatchable-function-entry=8)
 
+config HAVE_SHADOW_CALL_STACK
+       def_bool $(cc-option,-fsanitize=shadow-call-stack)
+       # https://github.com/riscv-non-isa/riscv-elf-psabi-doc/commit/a484e843e6eeb51f0cb7b8819e50da6d2444d769
+       depends on $(ld-option,--no-relax-gp)
+
 config ARCH_MMAP_RND_BITS_MIN
        default 18 if 64BIT
        default 8
@@ -635,6 +642,15 @@ config THREAD_SIZE_ORDER
          Specify the Pages of thread stack size (from 4KB to 64KB), which also
          affects irq stack size, which is equal to thread stack size.
 
+config RISCV_MISALIGNED
+       bool "Support misaligned load/store traps for kernel and userspace"
+       select SYSCTL_ARCH_UNALIGN_ALLOW
+       default y
+       help
+         Say Y here if you want the kernel to embed support for misaligned
+         load/store for both kernel and userspace. When disable, misaligned
+         accesses will generate SIGBUS in userspace and panic in kernel.
+
 endmenu # "Platform type"
 
 menu "Kernel features"
@@ -902,6 +918,9 @@ config PORTABLE
        select MMU
        select OF
 
+config ARCH_PROC_KCORE_TEXT
+       def_bool y
+
 menu "Power management options"
 
 source "kernel/power/Kconfig"
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..eafe17ebf7102c93925eea57be96fd6ee1e191a1 100644 (file)
@@ -0,0 +1 @@
+source "arch/riscv/kernel/tests/Kconfig.debug"
index 4d06f3402674017a9e8fb030965cf8a041b2ee55..a74be78678eb0bcabf3d9571669a125401adb64d 100644 (file)
@@ -54,6 +54,10 @@ endif
 endif
 endif
 
+ifeq ($(CONFIG_SHADOW_CALL_STACK),y)
+       KBUILD_LDFLAGS += --no-relax-gp
+endif
+
 # ISA string setting
 riscv-march-$(CONFIG_ARCH_RV32I)       := rv32ima
 riscv-march-$(CONFIG_ARCH_RV64I)       := rv64ima
index 22b13947bd131e842ec36ac56be71cec69dd503e..8e7fc0edf21d3ecef979f217446bf815c6cd4917 100644 (file)
@@ -17,6 +17,7 @@
 KCOV_INSTRUMENT := n
 
 OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
+OBJCOPYFLAGS_loader.bin :=-O binary
 OBJCOPYFLAGS_xipImage :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
 
 targets := Image Image.* loader loader.o loader.lds loader.bin
index 1edf3cd886c54f13e96d2313fae48aafaafa3640..905881282a7cd115fa222a68faab57545e868e10 100644 (file)
@@ -37,6 +37,13 @@ CONFIG_SMP=y
 CONFIG_HOTPLUG_CPU=y
 CONFIG_PM=y
 CONFIG_CPU_IDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=m
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
+CONFIG_CPUFREQ_DT=y
 CONFIG_VIRTUALIZATION=y
 CONFIG_KVM=m
 CONFIG_ACPI=y
@@ -95,6 +102,7 @@ CONFIG_NETLINK_DIAG=y
 CONFIG_CGROUP_NET_PRIO=y
 CONFIG_NET_9P=y
 CONFIG_NET_9P_VIRTIO=y
+CONFIG_CAN=m
 CONFIG_PCI=y
 CONFIG_PCIEPORTBUS=y
 CONFIG_PCI_HOST_GENERIC=y
@@ -102,6 +110,11 @@ CONFIG_PCIE_XILINX=y
 CONFIG_PCIE_FU740=y
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_SPI_NOR=y
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_VIRTIO_BLK=y
 CONFIG_BLK_DEV_NVME=m
@@ -124,8 +137,11 @@ CONFIG_VIRTIO_NET=y
 CONFIG_MACB=y
 CONFIG_E1000E=y
 CONFIG_R8169=y
+CONFIG_RAVB=y
 CONFIG_STMMAC_ETH=m
+CONFIG_MICREL_PHY=y
 CONFIG_MICROSEMI_PHY=y
+CONFIG_CAN_RCAR_CANFD=m
 CONFIG_INPUT_MOUSEDEV=y
 CONFIG_KEYBOARD_SUN4I_LRADC=m
 CONFIG_SERIAL_8250=y
@@ -136,16 +152,24 @@ CONFIG_SERIAL_SH_SCI=y
 CONFIG_VIRTIO_CONSOLE=y
 CONFIG_HW_RANDOM=y
 CONFIG_HW_RANDOM_VIRTIO=y
+CONFIG_I2C_CHARDEV=m
 CONFIG_I2C_MV64XXX=m
+CONFIG_I2C_RIIC=y
 CONFIG_SPI=y
+CONFIG_SPI_RSPI=m
 CONFIG_SPI_SIFIVE=y
 CONFIG_SPI_SUN6I=y
 # CONFIG_PTP_1588_CLOCK is not set
 CONFIG_GPIO_SIFIVE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_RZG2L_THERMAL=y
 CONFIG_WATCHDOG=y
 CONFIG_SUNXI_WATCHDOG=y
+CONFIG_RENESAS_RZG2LWDT=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_GPIO=y
 CONFIG_DRM=m
 CONFIG_DRM_RADEON=m
 CONFIG_DRM_NOUVEAU=m
@@ -153,39 +177,69 @@ CONFIG_DRM_SUN4I=m
 CONFIG_DRM_VIRTIO_GPU=m
 CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_RZ=m
+CONFIG_SND_SOC_WM8978=m
+CONFIG_SND_SIMPLE_CARD=m
 CONFIG_USB=y
+CONFIG_USB_OTG=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_XHCI_PLATFORM=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_HCD_PLATFORM=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_RENESAS_USBHS=m
 CONFIG_USB_STORAGE=y
 CONFIG_USB_UAS=y
 CONFIG_USB_MUSB_HDRC=m
 CONFIG_USB_MUSB_SUNXI=m
 CONFIG_NOP_USB_XCEIV=m
+CONFIG_USB_GADGET=y
+CONFIG_USB_RENESAS_USBHS_UDC=m
+CONFIG_USB_CONFIGFS=m
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_ACM=y
+CONFIG_USB_CONFIGFS_OBEX=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_ECM=y
+CONFIG_USB_CONFIGFS_ECM_SUBSET=y
+CONFIG_USB_CONFIGFS_RNDIS=y
+CONFIG_USB_CONFIGFS_EEM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
 CONFIG_MMC=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_CADENCE=y
 CONFIG_MMC_SPI=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_STARFIVE=y
+CONFIG_MMC_SDHI=y
 CONFIG_MMC_SUNXI=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_SUN6I=y
 CONFIG_DMADEVICES=y
 CONFIG_DMA_SUN6I=m
+CONFIG_RZ_DMAC=y
 CONFIG_VIRTIO_PCI=y
 CONFIG_VIRTIO_BALLOON=y
 CONFIG_VIRTIO_INPUT=y
 CONFIG_VIRTIO_MMIO=y
+CONFIG_RENESAS_OSTM=y
 CONFIG_SUN8I_DE2_CCU=m
 CONFIG_SUN50I_IOMMU=y
 CONFIG_RPMSG_CHAR=y
 CONFIG_RPMSG_CTRL=y
 CONFIG_RPMSG_VIRTIO=y
 CONFIG_ARCH_R9A07G043=y
+CONFIG_IIO=y
+CONFIG_RZG2L_ADC=m
+CONFIG_RESET_RZG2L_USBPHY_CTRL=y
 CONFIG_PHY_SUN4I_USB=m
+CONFIG_PHY_RCAR_GEN3_USB2=y
 CONFIG_LIBNVDIMM=y
 CONFIG_NVMEM_SUNXI_SID=y
 CONFIG_EXT4_FS=y
index d5604d2073bc2e51f17da549381e834eca6e7779..7dad0cf9d701f66135257753d0bb047fda59b5d2 100644 (file)
@@ -66,6 +66,8 @@ int acpi_get_riscv_isa(struct acpi_table_header *table,
                       unsigned int cpu, const char **isa);
 
 static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; }
+void acpi_get_cbo_block_size(struct acpi_table_header *table, u32 *cbom_size,
+                            u32 *cboz_size, u32 *cbop_size);
 #else
 static inline void acpi_init_rintc_map(void) { }
 static inline struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu)
@@ -79,6 +81,10 @@ static inline int acpi_get_riscv_isa(struct acpi_table_header *table,
        return -EINVAL;
 }
 
+static inline void acpi_get_cbo_block_size(struct acpi_table_header *table,
+                                          u32 *cbom_size, u32 *cboz_size,
+                                          u32 *cbop_size) { }
+
 #endif /* CONFIG_ACPI */
 
 #endif /*_ASM_ACPI_H*/
index 61ba8ed43d8feb7f2c87eec92c0d04c499ed6c68..36b955c762ba08e92ca0441ee8fbae9219c1f2fa 100644 (file)
@@ -25,7 +25,6 @@ DECLARE_DO_ERROR_INFO(do_trap_ecall_s);
 DECLARE_DO_ERROR_INFO(do_trap_ecall_m);
 DECLARE_DO_ERROR_INFO(do_trap_break);
 
-asmlinkage unsigned long get_overflow_stack(void);
 asmlinkage void handle_bad_stack(struct pt_regs *regs);
 asmlinkage void do_page_fault(struct pt_regs *regs);
 asmlinkage void do_irq(struct pt_regs *regs);
index 114bbadaef41ebb5c98a19cbf43f59b5d25d24c1..b0487b39e6747ae384fb51f310bf784759825240 100644 (file)
        .endr
 .endm
 
+#ifdef CONFIG_SMP
+#ifdef CONFIG_32BIT
+#define PER_CPU_OFFSET_SHIFT 2
+#else
+#define PER_CPU_OFFSET_SHIFT 3
+#endif
+
+.macro asm_per_cpu dst sym tmp
+       REG_L \tmp, TASK_TI_CPU_NUM(tp)
+       slli  \tmp, \tmp, PER_CPU_OFFSET_SHIFT
+       la    \dst, __per_cpu_offset
+       add   \dst, \dst, \tmp
+       REG_L \tmp, 0(\dst)
+       la    \dst, \sym
+       add   \dst, \dst, \tmp
+.endm
+#else /* CONFIG_SMP */
+.macro asm_per_cpu dst sym tmp
+       la    \dst, \sym
+.endm
+#endif /* CONFIG_SMP */
+
+.macro load_per_cpu dst ptr tmp
+       asm_per_cpu \dst \ptr \tmp
+       REG_L \dst, 0(\dst)
+.endm
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+/* gp is used as the shadow call stack pointer instead */
+.macro load_global_pointer
+.endm
+#else
+/* load __global_pointer to gp */
+.macro load_global_pointer
+.option push
+.option norelax
+       la gp, __global_pointer$
+.option pop
+.endm
+#endif /* CONFIG_SHADOW_CALL_STACK */
+
        /* save all GPs except x1 ~ x5 */
        .macro save_from_x6_to_x31
        REG_S x6,  PT_T1(sp)
index 65f6eee4ab8d7751d412c04b9a3f2d6ec078858e..224b4dc02b50bc6761cbef064445e472ef053ce2 100644 (file)
 #include <asm/barrier.h>
 #include <asm/bitsperlong.h>
 
+#if !defined(CONFIG_RISCV_ISA_ZBB) || defined(NO_ALTERNATIVE)
 #include <asm-generic/bitops/__ffs.h>
-#include <asm-generic/bitops/ffz.h>
-#include <asm-generic/bitops/fls.h>
 #include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/fls.h>
+
+#else
+#include <asm/alternative-macros.h>
+#include <asm/hwcap.h>
+
+#if (BITS_PER_LONG == 64)
+#define CTZW   "ctzw "
+#define CLZW   "clzw "
+#elif (BITS_PER_LONG == 32)
+#define CTZW   "ctz "
+#define CLZW   "clz "
+#else
+#error "Unexpected BITS_PER_LONG"
+#endif
+
+static __always_inline unsigned long variable__ffs(unsigned long word)
+{
+       int num;
+
+       asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+                                     RISCV_ISA_EXT_ZBB, 1)
+                         : : : : legacy);
+
+       asm volatile (".option push\n"
+                     ".option arch,+zbb\n"
+                     "ctz %0, %1\n"
+                     ".option pop\n"
+                     : "=r" (word) : "r" (word) :);
+
+       return word;
+
+legacy:
+       num = 0;
+#if BITS_PER_LONG == 64
+       if ((word & 0xffffffff) == 0) {
+               num += 32;
+               word >>= 32;
+       }
+#endif
+       if ((word & 0xffff) == 0) {
+               num += 16;
+               word >>= 16;
+       }
+       if ((word & 0xff) == 0) {
+               num += 8;
+               word >>= 8;
+       }
+       if ((word & 0xf) == 0) {
+               num += 4;
+               word >>= 4;
+       }
+       if ((word & 0x3) == 0) {
+               num += 2;
+               word >>= 2;
+       }
+       if ((word & 0x1) == 0)
+               num += 1;
+       return num;
+}
+
+/**
+ * __ffs - find first set bit in a long word
+ * @word: The word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+#define __ffs(word)                            \
+       (__builtin_constant_p(word) ?           \
+        (unsigned long)__builtin_ctzl(word) :  \
+        variable__ffs(word))
+
+static __always_inline unsigned long variable__fls(unsigned long word)
+{
+       int num;
+
+       asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+                                     RISCV_ISA_EXT_ZBB, 1)
+                         : : : : legacy);
+
+       asm volatile (".option push\n"
+                     ".option arch,+zbb\n"
+                     "clz %0, %1\n"
+                     ".option pop\n"
+                     : "=r" (word) : "r" (word) :);
+
+       return BITS_PER_LONG - 1 - word;
+
+legacy:
+       num = BITS_PER_LONG - 1;
+#if BITS_PER_LONG == 64
+       if (!(word & (~0ul << 32))) {
+               num -= 32;
+               word <<= 32;
+       }
+#endif
+       if (!(word & (~0ul << (BITS_PER_LONG - 16)))) {
+               num -= 16;
+               word <<= 16;
+       }
+       if (!(word & (~0ul << (BITS_PER_LONG - 8)))) {
+               num -= 8;
+               word <<= 8;
+       }
+       if (!(word & (~0ul << (BITS_PER_LONG - 4)))) {
+               num -= 4;
+               word <<= 4;
+       }
+       if (!(word & (~0ul << (BITS_PER_LONG - 2)))) {
+               num -= 2;
+               word <<= 2;
+       }
+       if (!(word & (~0ul << (BITS_PER_LONG - 1))))
+               num -= 1;
+       return num;
+}
+
+/**
+ * __fls - find last set bit in a long word
+ * @word: the word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+#define __fls(word)                                                    \
+       (__builtin_constant_p(word) ?                                   \
+        (unsigned long)(BITS_PER_LONG - 1 - __builtin_clzl(word)) :    \
+        variable__fls(word))
+
+static __always_inline int variable_ffs(int x)
+{
+       int r;
+
+       if (!x)
+               return 0;
+
+       asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+                                     RISCV_ISA_EXT_ZBB, 1)
+                         : : : : legacy);
+
+       asm volatile (".option push\n"
+                     ".option arch,+zbb\n"
+                     CTZW "%0, %1\n"
+                     ".option pop\n"
+                     : "=r" (r) : "r" (x) :);
+
+       return r + 1;
+
+legacy:
+       r = 1;
+       if (!(x & 0xffff)) {
+               x >>= 16;
+               r += 16;
+       }
+       if (!(x & 0xff)) {
+               x >>= 8;
+               r += 8;
+       }
+       if (!(x & 0xf)) {
+               x >>= 4;
+               r += 4;
+       }
+       if (!(x & 3)) {
+               x >>= 2;
+               r += 2;
+       }
+       if (!(x & 1)) {
+               x >>= 1;
+               r += 1;
+       }
+       return r;
+}
+
+/**
+ * ffs - find first set bit in a word
+ * @x: the word to search
+ *
+ * This is defined the same way as the libc and compiler builtin ffs routines.
+ *
+ * ffs(value) returns 0 if value is 0 or the position of the first set bit if
+ * value is nonzero. The first (least significant) bit is at position 1.
+ */
+#define ffs(x) (__builtin_constant_p(x) ? __builtin_ffs(x) : variable_ffs(x))
+
+static __always_inline int variable_fls(unsigned int x)
+{
+       int r;
+
+       if (!x)
+               return 0;
+
+       asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
+                                     RISCV_ISA_EXT_ZBB, 1)
+                         : : : : legacy);
+
+       asm volatile (".option push\n"
+                     ".option arch,+zbb\n"
+                     CLZW "%0, %1\n"
+                     ".option pop\n"
+                     : "=r" (r) : "r" (x) :);
+
+       return 32 - r;
+
+legacy:
+       r = 32;
+       if (!(x & 0xffff0000u)) {
+               x <<= 16;
+               r -= 16;
+       }
+       if (!(x & 0xff000000u)) {
+               x <<= 8;
+               r -= 8;
+       }
+       if (!(x & 0xf0000000u)) {
+               x <<= 4;
+               r -= 4;
+       }
+       if (!(x & 0xc0000000u)) {
+               x <<= 2;
+               r -= 2;
+       }
+       if (!(x & 0x80000000u)) {
+               x <<= 1;
+               r -= 1;
+       }
+       return r;
+}
+
+/**
+ * fls - find last set bit in a word
+ * @x: the word to search
+ *
+ * This is defined in a similar way as ffs, but returns the position of the most
+ * significant set bit.
+ *
+ * fls(value) returns 0 if value is 0 or the position of the last set bit if
+ * value is nonzero. The last (most significant) bit is at position 32.
+ */
+#define fls(x)                                                 \
+({                                                             \
+       typeof(x) x_ = (x);                                     \
+       __builtin_constant_p(x_) ?                              \
+        (int)((x_ != 0) ? (32 - __builtin_clz(x_)) : 0)        \
+        :                                                      \
+        variable_fls(x_);                                      \
+})
+
+#endif /* !defined(CONFIG_RISCV_ISA_ZBB) || defined(NO_ALTERNATIVE) */
+
+#include <asm-generic/bitops/ffz.h>
 #include <asm-generic/bitops/fls64.h>
 #include <asm-generic/bitops/sched.h>
-#include <asm-generic/bitops/ffs.h>
 
 #include <asm-generic/bitops/hweight.h>
 
index d0345bd659c94f11f349e6d92b9acd6b178218f6..a418c3112cd60cf2207b455d78c4520539364fc3 100644 (file)
@@ -7,7 +7,10 @@
 #define _ASM_CPUFEATURE_H
 
 #include <linux/bitmap.h>
+#include <linux/jump_label.h>
 #include <asm/hwcap.h>
+#include <asm/alternative-macros.h>
+#include <asm/errno.h>
 
 /*
  * These are probed via a device_initcall(), via either the SBI or directly
@@ -30,6 +33,104 @@ DECLARE_PER_CPU(long, misaligned_access_speed);
 /* Per-cpu ISA extensions. */
 extern struct riscv_isainfo hart_isa[NR_CPUS];
 
-void check_unaligned_access(int cpu);
+void riscv_user_isa_enable(void);
+
+#ifdef CONFIG_RISCV_MISALIGNED
+bool unaligned_ctl_available(void);
+bool check_unaligned_access_emulated(int cpu);
+void unaligned_emulation_finish(void);
+#else
+static inline bool unaligned_ctl_available(void)
+{
+       return false;
+}
+
+static inline bool check_unaligned_access_emulated(int cpu)
+{
+       return false;
+}
+
+static inline void unaligned_emulation_finish(void) {}
+#endif
+
+unsigned long riscv_get_elf_hwcap(void);
+
+struct riscv_isa_ext_data {
+       const unsigned int id;
+       const char *name;
+       const char *property;
+};
+
+extern const struct riscv_isa_ext_data riscv_isa_ext[];
+extern const size_t riscv_isa_ext_count;
+extern bool riscv_isa_fallback;
+
+unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
+
+bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit);
+#define riscv_isa_extension_available(isa_bitmap, ext) \
+       __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext)
+
+static __always_inline bool
+riscv_has_extension_likely(const unsigned long ext)
+{
+       compiletime_assert(ext < RISCV_ISA_EXT_MAX,
+                          "ext must be < RISCV_ISA_EXT_MAX");
+
+       if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+               asm_volatile_goto(
+               ALTERNATIVE("j  %l[l_no]", "nop", 0, %[ext], 1)
+               :
+               : [ext] "i" (ext)
+               :
+               : l_no);
+       } else {
+               if (!__riscv_isa_extension_available(NULL, ext))
+                       goto l_no;
+       }
+
+       return true;
+l_no:
+       return false;
+}
+
+static __always_inline bool
+riscv_has_extension_unlikely(const unsigned long ext)
+{
+       compiletime_assert(ext < RISCV_ISA_EXT_MAX,
+                          "ext must be < RISCV_ISA_EXT_MAX");
+
+       if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+               asm_volatile_goto(
+               ALTERNATIVE("nop", "j   %l[l_yes]", 0, %[ext], 1)
+               :
+               : [ext] "i" (ext)
+               :
+               : l_yes);
+       } else {
+               if (__riscv_isa_extension_available(NULL, ext))
+                       goto l_yes;
+       }
+
+       return false;
+l_yes:
+       return true;
+}
+
+static __always_inline bool riscv_cpu_has_extension_likely(int cpu, const unsigned long ext)
+{
+       if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_likely(ext))
+               return true;
+
+       return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
+}
+
+static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsigned long ext)
+{
+       if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_unlikely(ext))
+               return true;
+
+       return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
+}
 
 #endif
index b3b2dfbdf945efa2adfbc1f022979986ace2b983..06c236bfab53b323491ce6ae3bbdbbbcd6206318 100644 (file)
@@ -14,7 +14,7 @@
 #include <asm/auxvec.h>
 #include <asm/byteorder.h>
 #include <asm/cacheinfo.h>
-#include <asm/hwcap.h>
+#include <asm/cpufeature.h>
 
 /*
  * These are used to set parameters in the core dumps.
index 6e4dee49d84b985a5ab1bb59b35ecc39a94ed0de..7ab5e34318c85fe05df525a5f80a49d25051bcd7 100644 (file)
@@ -8,4 +8,18 @@
 void handle_page_fault(struct pt_regs *regs);
 void handle_break(struct pt_regs *regs);
 
+#ifdef CONFIG_RISCV_MISALIGNED
+int handle_misaligned_load(struct pt_regs *regs);
+int handle_misaligned_store(struct pt_regs *regs);
+#else
+static inline int handle_misaligned_load(struct pt_regs *regs)
+{
+       return -1;
+}
+static inline int handle_misaligned_store(struct pt_regs *regs)
+{
+       return -1;
+}
+#endif
+
 #endif /* _ASM_RISCV_ENTRY_COMMON_H */
index b55b434f0059108342229b1e2013e351a7b81961..83ed25e4355343c25101882b7c0b31cf462af542 100644 (file)
@@ -95,31 +95,31 @@ asm volatile(ALTERNATIVE(                                           \
 #endif
 
 /*
- * dcache.ipa rs1 (invalidate, physical address)
+ * th.dcache.ipa rs1 (invalidate, physical address)
  * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
  *   0000001    01010      rs1       000      00000  0001011
- * dache.iva rs1 (invalida, virtual address)
+ * th.dache.iva rs1 (invalida, virtual address)
  *   0000001    00110      rs1       000      00000  0001011
  *
- * dcache.cpa rs1 (clean, physical address)
+ * th.dcache.cpa rs1 (clean, physical address)
  * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
  *   0000001    01001      rs1       000      00000  0001011
- * dcache.cva rs1 (clean, virtual address)
+ * th.dcache.cva rs1 (clean, virtual address)
  *   0000001    00101      rs1       000      00000  0001011
  *
- * dcache.cipa rs1 (clean then invalidate, physical address)
+ * th.dcache.cipa rs1 (clean then invalidate, physical address)
  * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
  *   0000001    01011      rs1       000      00000  0001011
- * dcache.civa rs1 (... virtual address)
+ * th.dcache.civa rs1 (... virtual address)
  *   0000001    00111      rs1       000      00000  0001011
  *
- * sync.s (make sure all cache operations finished)
+ * th.sync.s (make sure all cache operations finished)
  * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
  *   0000000    11001     00000      000      00000  0001011
  */
-#define THEAD_inval_A0 ".long 0x0265000b"
-#define THEAD_clean_A0 ".long 0x0255000b"
-#define THEAD_flush_A0 ".long 0x0275000b"
+#define THEAD_INVAL_A0 ".long 0x0265000b"
+#define THEAD_CLEAN_A0 ".long 0x0255000b"
+#define THEAD_FLUSH_A0 ".long 0x0275000b"
 #define THEAD_SYNC_S   ".long 0x0190000b"
 
 #define ALT_CMO_OP(_op, _start, _size, _cachesize)                     \
index 6fc51c1b34cf7097b449c1893968ab473eed9e90..06d30526ef3b837d4e6c7fe8d14cb39f11e676f1 100644 (file)
@@ -8,9 +8,6 @@
 #ifndef _ASM_RISCV_HWCAP_H
 #define _ASM_RISCV_HWCAP_H
 
-#include <asm/alternative-macros.h>
-#include <asm/errno.h>
-#include <linux/bits.h>
 #include <uapi/asm/hwcap.h>
 
 #define RISCV_ISA_EXT_a                ('a' - 'a')
 #define RISCV_ISA_EXT_SxAIA            RISCV_ISA_EXT_SSAIA
 #endif
 
-#ifndef __ASSEMBLY__
-
-#include <linux/jump_label.h>
-
-unsigned long riscv_get_elf_hwcap(void);
-
-struct riscv_isa_ext_data {
-       const unsigned int id;
-       const char *name;
-       const char *property;
-};
-
-extern const struct riscv_isa_ext_data riscv_isa_ext[];
-extern const size_t riscv_isa_ext_count;
-extern bool riscv_isa_fallback;
-
-unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
-
-#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext)
-
-bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit);
-#define riscv_isa_extension_available(isa_bitmap, ext) \
-       __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext)
-
-static __always_inline bool
-riscv_has_extension_likely(const unsigned long ext)
-{
-       compiletime_assert(ext < RISCV_ISA_EXT_MAX,
-                          "ext must be < RISCV_ISA_EXT_MAX");
-
-       if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
-               asm_volatile_goto(
-               ALTERNATIVE("j  %l[l_no]", "nop", 0, %[ext], 1)
-               :
-               : [ext] "i" (ext)
-               :
-               : l_no);
-       } else {
-               if (!__riscv_isa_extension_available(NULL, ext))
-                       goto l_no;
-       }
-
-       return true;
-l_no:
-       return false;
-}
-
-static __always_inline bool
-riscv_has_extension_unlikely(const unsigned long ext)
-{
-       compiletime_assert(ext < RISCV_ISA_EXT_MAX,
-                          "ext must be < RISCV_ISA_EXT_MAX");
-
-       if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
-               asm_volatile_goto(
-               ALTERNATIVE("nop", "j   %l[l_yes]", 0, %[ext], 1)
-               :
-               : [ext] "i" (ext)
-               :
-               : l_yes);
-       } else {
-               if (__riscv_isa_extension_available(NULL, ext))
-                       goto l_yes;
-       }
-
-       return false;
-l_yes:
-       return true;
-}
-
-#endif
-
 #endif /* _ASM_RISCV_HWCAP_H */
index 78936f4ff513307a59e62768838ae19a738f69d1..5c48f48e79a67823707806a3e7872e1ed6f3388c 100644 (file)
@@ -8,6 +8,11 @@
 
 #include <uapi/asm/hwprobe.h>
 
-#define RISCV_HWPROBE_MAX_KEY 5
+#define RISCV_HWPROBE_MAX_KEY 6
+
+static inline bool riscv_hwprobe_key_is_valid(__s64 key)
+{
+       return key >= 0 && key <= RISCV_HWPROBE_MAX_KEY;
+}
 
 #endif
index 6960beb75f32942422de90583e6cbdaadcc78006..e27179b26086b376f7ff1babd8b026bd1f1167cc 100644 (file)
        INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(51),              \
               __RD(0), RS1(gaddr), RS2(vmid))
 
-#define CBO_inval(base)                                                \
+#define CBO_INVAL(base)                                                \
        INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0),              \
               RS1(base), SIMM12(0))
 
-#define CBO_clean(base)                                                \
+#define CBO_CLEAN(base)                                                \
        INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0),              \
               RS1(base), SIMM12(1))
 
-#define CBO_flush(base)                                                \
+#define CBO_FLUSH(base)                                                \
        INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0),              \
               RS1(base), SIMM12(2))
 
-#define CBO_zero(base)                                         \
+#define CBO_ZERO(base)                                         \
        INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0),              \
               RS1(base), SIMM12(4))
 
index e4042d297580080c0c3dfffed0dd711f5f371ca1..6441ded3b0cf2cf894312be28aa9e731e151f2a8 100644 (file)
@@ -12,6 +12,9 @@
 
 DECLARE_PER_CPU(ulong *, irq_stack_ptr);
 
+asmlinkage void call_on_irq_stack(struct pt_regs *regs,
+                                 void (*func)(struct pt_regs *));
+
 #ifdef CONFIG_VMAP_STACK
 /*
  * To ensure that VMAP'd stack overflow detection works correctly, all VMAP'd
index 5488ecc337b63fd5505c92165695131723d85bff..57e887bfa34cb70b3d829d13dbb80c3fc6bf7ba9 100644 (file)
@@ -33,8 +33,8 @@
 #define PAGE_OFFSET            _AC(CONFIG_PAGE_OFFSET, UL)
 #endif
 /*
- * By default, CONFIG_PAGE_OFFSET value corresponds to SV48 address space so
- * define the PAGE_OFFSET value for SV39.
+ * By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so
+ * define the PAGE_OFFSET value for SV48 and SV39.
  */
 #define PAGE_OFFSET_L4         _AC(0xffffaf8000000000, UL)
 #define PAGE_OFFSET_L3         _AC(0xffffffd800000000, UL)
index 59ba1fbaf784931c680f5c9994080206d331fd54..00f3369570a83668c68dcdaaa15f085ba780f5e7 100644 (file)
@@ -33,4 +33,7 @@
                                          _PAGE_WRITE | _PAGE_EXEC |    \
                                          _PAGE_USER | _PAGE_GLOBAL))
 
+static const __maybe_unused int pgtable_l4_enabled;
+static const __maybe_unused int pgtable_l5_enabled;
+
 #endif /* _ASM_RISCV_PGTABLE_32_H */
index 7a5097202e15709dab5927d6be984570d40c5f41..9a2c780a11e9530bcad95a677553dabeba67bb5f 100644 (file)
@@ -126,14 +126,18 @@ enum napot_cont_order {
 
 /*
  * [63:59] T-Head Memory Type definitions:
- *
- * 00000 - NC   Weakly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable
+ * bit[63] SO - Strong Order
+ * bit[62] C - Cacheable
+ * bit[61] B - Bufferable
+ * bit[60] SH - Shareable
+ * bit[59] Sec - Trustable
+ * 00110 - NC   Weakly-ordered, Non-cacheable, Bufferable, Shareable, Non-trustable
  * 01110 - PMA  Weakly-ordered, Cacheable, Bufferable, Shareable, Non-trustable
- * 10000 - IO   Strongly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable
+ * 10010 - IO   Strongly-ordered, Non-cacheable, Non-bufferable, Shareable, Non-trustable
  */
 #define _PAGE_PMA_THEAD                ((1UL << 62) | (1UL << 61) | (1UL << 60))
-#define _PAGE_NOCACHE_THEAD    0UL
-#define _PAGE_IO_THEAD         (1UL << 63)
+#define _PAGE_NOCACHE_THEAD    ((1UL < 61) | (1UL << 60))
+#define _PAGE_IO_THEAD         ((1UL << 63) | (1UL << 60))
 #define _PAGE_MTMASK_THEAD     (_PAGE_PMA_THEAD | _PAGE_IO_THEAD | (1UL << 59))
 
 static inline u64 riscv_page_mtmask(void)
index f896708e833127d12e4cb9851b0890a8d7c5d922..179bd4afece46a6b96c33088a14480cb6c18c429 100644 (file)
@@ -16,9 +16,9 @@
 #define _PAGE_GLOBAL    (1 << 5)    /* Global */
 #define _PAGE_ACCESSED  (1 << 6)    /* Set by hardware on any access */
 #define _PAGE_DIRTY     (1 << 7)    /* Set by hardware on any write */
-#define _PAGE_SOFT      (1 << 8)    /* Reserved for software */
+#define _PAGE_SOFT      (3 << 8)    /* Reserved for software */
 
-#define _PAGE_SPECIAL   _PAGE_SOFT
+#define _PAGE_SPECIAL   (1 << 8)    /* RSW: 0x1 */
 #define _PAGE_TABLE     _PAGE_PRESENT
 
 /*
index b2ba3f79cfe9a7c95080b1f7f30947ad95d4b8f6..294044429e8e15d9230f3b96c7c5579be68857f2 100644 (file)
@@ -291,6 +291,7 @@ static inline pte_t pud_pte(pud_t pud)
 }
 
 #ifdef CONFIG_RISCV_ISA_SVNAPOT
+#include <asm/cpufeature.h>
 
 static __always_inline bool has_svnapot(void)
 {
@@ -811,7 +812,7 @@ extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
  *     bit            5:       _PAGE_PROT_NONE (zero)
  *     bit            6:       exclusive marker
  *     bits      7 to 11:      swap type
- *     bits 11 to XLEN-1:      swap offset
+ *     bits 12 to XLEN-1:      swap offset
  */
 #define __SWP_TYPE_SHIFT       7
 #define __SWP_TYPE_BITS                5
@@ -914,7 +915,6 @@ extern uintptr_t _dtb_early_pa;
 #define dtb_early_pa   _dtb_early_pa
 #endif /* CONFIG_XIP_KERNEL */
 extern u64 satp_mode;
-extern bool pgtable_l4_enabled;
 
 void paging_init(void);
 void misc_mem_init(void);
index 441da1839c947803cdcbe177e4292e2b81a9db49..f19f861cda549014eee042efb651709f5da00475 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/const.h>
 #include <linux/cache.h>
+#include <linux/prctl.h>
 
 #include <vdso/processor.h>
 
@@ -82,6 +83,7 @@ struct thread_struct {
        unsigned long bad_cause;
        unsigned long vstate_ctrl;
        struct __riscv_v_ext_state vstate;
+       unsigned long align_ctl;
 };
 
 /* Whitelist the fstate from the task_struct for hardened usercopy */
@@ -94,6 +96,7 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
 
 #define INIT_THREAD {                                  \
        .sp = sizeof(init_stack) + (long)&init_stack,   \
+       .align_ctl = PR_UNALIGN_NOPRINT,                \
 }
 
 #define task_pt_regs(tsk)                                              \
@@ -136,6 +139,12 @@ extern long riscv_v_vstate_ctrl_set_current(unsigned long arg);
 extern long riscv_v_vstate_ctrl_get_current(void);
 #endif /* CONFIG_RISCV_ISA_V */
 
+extern int get_unalign_ctl(struct task_struct *tsk, unsigned long addr);
+extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
+
+#define GET_UNALIGN_CTL(tsk, addr)     get_unalign_ctl((tsk), (addr))
+#define SET_UNALIGN_CTL(tsk, val)      set_unalign_ctl((tsk), (val))
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_RISCV_PROCESSOR_H */
index 12dfda6bb9242f402c729e1c08184e04a2f96eec..0892f4421bc4a5d0046750930b5637b355c15c26 100644 (file)
@@ -280,9 +280,6 @@ void sbi_set_timer(uint64_t stime_value);
 void sbi_shutdown(void);
 void sbi_send_ipi(unsigned int cpu);
 int sbi_remote_fence_i(const struct cpumask *cpu_mask);
-int sbi_remote_sfence_vma(const struct cpumask *cpu_mask,
-                          unsigned long start,
-                          unsigned long size);
 
 int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
                                unsigned long start,
diff --git a/arch/riscv/include/asm/scs.h b/arch/riscv/include/asm/scs.h
new file mode 100644 (file)
index 0000000..0e45db7
--- /dev/null
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_SCS_H
+#define _ASM_SCS_H
+
+#ifdef __ASSEMBLY__
+#include <asm/asm-offsets.h>
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+
+/* Load init_shadow_call_stack to gp. */
+.macro scs_load_init_stack
+       la      gp, init_shadow_call_stack
+       XIP_FIXUP_OFFSET gp
+.endm
+
+/* Load the per-CPU IRQ shadow call stack to gp. */
+.macro scs_load_irq_stack tmp
+       load_per_cpu gp, irq_shadow_call_stack_ptr, \tmp
+.endm
+
+/* Load task_scs_sp(current) to gp. */
+.macro scs_load_current
+       REG_L   gp, TASK_TI_SCS_SP(tp)
+.endm
+
+/* Load task_scs_sp(current) to gp, but only if tp has changed. */
+.macro scs_load_current_if_task_changed prev
+       beq     \prev, tp, _skip_scs
+       scs_load_current
+_skip_scs:
+.endm
+
+/* Save gp to task_scs_sp(current). */
+.macro scs_save_current
+       REG_S   gp, TASK_TI_SCS_SP(tp)
+.endm
+
+#else /* CONFIG_SHADOW_CALL_STACK */
+
+.macro scs_load_init_stack
+.endm
+.macro scs_load_irq_stack tmp
+.endm
+.macro scs_load_current
+.endm
+.macro scs_load_current_if_task_changed prev
+.endm
+.macro scs_save_current
+.endm
+
+#endif /* CONFIG_SHADOW_CALL_STACK */
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_SCS_H */
index a727be723c5610f9ce2bc42de6f4dd2987c7536c..f90d8e42f3c7911908ec1f5f19929ab5ba67ff3a 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/jump_label.h>
 #include <linux/sched/task_stack.h>
 #include <asm/vector.h>
-#include <asm/hwcap.h>
+#include <asm/cpufeature.h>
 #include <asm/processor.h>
 #include <asm/ptrace.h>
 #include <asm/csr.h>
index 1833beb00489c317c43a263afc926099a271cfed..574779900bfb339eeb446e49f4aae119fe382ae3 100644 (file)
@@ -34,9 +34,6 @@
 
 #ifndef __ASSEMBLY__
 
-extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)];
-extern unsigned long spin_shadow_stack;
-
 #include <asm/processor.h>
 #include <asm/csr.h>
 
@@ -60,8 +57,20 @@ struct thread_info {
        long                    user_sp;        /* User stack pointer */
        int                     cpu;
        unsigned long           syscall_work;   /* SYSCALL_WORK_ flags */
+#ifdef CONFIG_SHADOW_CALL_STACK
+       void                    *scs_base;
+       void                    *scs_sp;
+#endif
 };
 
+#ifdef CONFIG_SHADOW_CALL_STACK
+#define INIT_SCS                                                       \
+       .scs_base       = init_shadow_call_stack,                       \
+       .scs_sp         = init_shadow_call_stack,
+#else
+#define INIT_SCS
+#endif
+
 /*
  * macros/functions for gaining access to the thread information structure
  *
@@ -71,6 +80,7 @@ struct thread_info {
 {                                              \
        .flags          = 0,                    \
        .preempt_count  = INIT_PREEMPT_COUNT,   \
+       INIT_SCS                                \
 }
 
 void arch_release_task_struct(struct task_struct *tsk);
index 120bcf2ed8a878554000f0d0ac73e14ec4aa6fa2..1eb5682b2af6065c9019e398df729f5b97a573c6 100644 (file)
@@ -15,7 +15,13 @@ static void tlb_flush(struct mmu_gather *tlb);
 
 static inline void tlb_flush(struct mmu_gather *tlb)
 {
-       flush_tlb_mm(tlb->mm);
+#ifdef CONFIG_MMU
+       if (tlb->fullmm || tlb->need_flush_all)
+               flush_tlb_mm(tlb->mm);
+       else
+               flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end,
+                                  tlb_get_unmap_size(tlb));
+#endif
 }
 
 #endif /* _ASM_RISCV_TLB_H */
index a09196f8de688ea90123bb74fc21e080cde19f22..8f3418c5f1724ba45e412ca52e0ef59ba0140638 100644 (file)
@@ -11,6 +11,9 @@
 #include <asm/smp.h>
 #include <asm/errata_list.h>
 
+#define FLUSH_TLB_MAX_SIZE      ((unsigned long)-1)
+#define FLUSH_TLB_NO_ASID       ((unsigned long)-1)
+
 #ifdef CONFIG_MMU
 extern unsigned long asid_mask;
 
@@ -32,9 +35,12 @@ static inline void local_flush_tlb_page(unsigned long addr)
 #if defined(CONFIG_SMP) && defined(CONFIG_MMU)
 void flush_tlb_all(void);
 void flush_tlb_mm(struct mm_struct *mm);
+void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+                       unsigned long end, unsigned int page_size);
 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                     unsigned long end);
+void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
@@ -51,14 +57,15 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
        local_flush_tlb_all();
 }
 
-#define flush_tlb_mm(mm) flush_tlb_all()
-#endif /* !CONFIG_SMP || !CONFIG_MMU */
-
 /* Flush a range of kernel pages */
 static inline void flush_tlb_kernel_range(unsigned long start,
        unsigned long end)
 {
-       flush_tlb_all();
+       local_flush_tlb_all();
 }
 
+#define flush_tlb_mm(mm) flush_tlb_all()
+#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all()
+#endif /* !CONFIG_SMP || !CONFIG_MMU */
+
 #endif /* _ASM_RISCV_TLBFLUSH_H */
index 14f5d27783b85811a4f7e6e1d43c9b5ee9aca5a1..96b65a5396dfcfc8f60fab06e7e0cd1972e0271e 100644 (file)
@@ -14,7 +14,7 @@ static inline void cpu_relax(void)
        __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
 #endif
 
-#ifdef __riscv_zihintpause
+#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE
        /*
         * Reduce instruction retirement.
         * This assumes the PC changes.
index c5ee07b3df071d16ad956fd62e6981403d9bf133..87aaef656257cbde40331aadaf1cb0b1ea374455 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/sched.h>
 #include <linux/sched/task_stack.h>
 #include <asm/ptrace.h>
-#include <asm/hwcap.h>
+#include <asm/cpufeature.h>
 #include <asm/csr.h>
 #include <asm/asm.h>
 
index d696d6610231dd6cacacdeba77e7c96b6184d993..11a71b8533d5759ec724a8359d0ffa2a4f2e976d 100644 (file)
@@ -49,6 +49,7 @@ typedef union __riscv_fp_state elf_fpregset_t;
 #define R_RISCV_TLS_DTPREL64   9
 #define R_RISCV_TLS_TPREL32    10
 #define R_RISCV_TLS_TPREL64    11
+#define R_RISCV_IRELATIVE      58
 
 /* Relocation types not used by the dynamic linker */
 #define R_RISCV_BRANCH         16
@@ -81,7 +82,6 @@ typedef union __riscv_fp_state elf_fpregset_t;
 #define R_RISCV_ALIGN          43
 #define R_RISCV_RVC_BRANCH     44
 #define R_RISCV_RVC_JUMP       45
-#define R_RISCV_LUI            46
 #define R_RISCV_GPREL_I                47
 #define R_RISCV_GPREL_S                48
 #define R_RISCV_TPREL_I                49
@@ -93,6 +93,9 @@ typedef union __riscv_fp_state elf_fpregset_t;
 #define R_RISCV_SET16          55
 #define R_RISCV_SET32          56
 #define R_RISCV_32_PCREL       57
+#define R_RISCV_PLT32          59
+#define R_RISCV_SET_ULEB128    60
+#define R_RISCV_SUB_ULEB128    61
 
 
 #endif /* _UAPI_ASM_RISCV_ELF_H */
index d43e306ce2f92b1acf89c3fb61e94eb79e33ec52..b659ffcfcdb454cf4d12cd513bfcb23cf32b11c5 100644 (file)
@@ -29,6 +29,7 @@ struct riscv_hwprobe {
 #define                RISCV_HWPROBE_EXT_ZBA           (1 << 3)
 #define                RISCV_HWPROBE_EXT_ZBB           (1 << 4)
 #define                RISCV_HWPROBE_EXT_ZBS           (1 << 5)
+#define                RISCV_HWPROBE_EXT_ZICBOZ        (1 << 6)
 #define RISCV_HWPROBE_KEY_CPUPERF_0    5
 #define                RISCV_HWPROBE_MISALIGNED_UNKNOWN        (0 << 0)
 #define                RISCV_HWPROBE_MISALIGNED_EMULATED       (1 << 0)
@@ -36,6 +37,7 @@ struct riscv_hwprobe {
 #define                RISCV_HWPROBE_MISALIGNED_FAST           (3 << 0)
 #define                RISCV_HWPROBE_MISALIGNED_UNSUPPORTED    (4 << 0)
 #define                RISCV_HWPROBE_MISALIGNED_MASK           (7 << 0)
+#define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE    6
 /* Increase RISCV_HWPROBE_MAX_KEY when adding items. */
 
 #endif
index 95cf25d484052e88b39ca3a49b74cc2bb1453992..fee22a3d1b53462a33bad285d345465a0100e6a9 100644 (file)
@@ -57,9 +57,10 @@ obj-y        += stacktrace.o
 obj-y  += cacheinfo.o
 obj-y  += patch.o
 obj-y  += probes/
+obj-y  += tests/
 obj-$(CONFIG_MMU) += vdso.o vdso/
 
-obj-$(CONFIG_RISCV_M_MODE)     += traps_misaligned.o
+obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o
 obj-$(CONFIG_FPU)              += fpu.o
 obj-$(CONFIG_RISCV_ISA_V)      += vector.o
 obj-$(CONFIG_SMP)              += smpboot.o
index 56cb2c986c4857a9990fbfec9301df90f410162e..e619edc8b0cc972c1a83809c9f0ebeb10c3331a6 100644 (file)
  */
 
 #include <linux/acpi.h>
+#include <linux/efi.h>
 #include <linux/io.h>
+#include <linux/memblock.h>
 #include <linux/pci.h>
-#include <linux/efi.h>
 
 int acpi_noirq = 1;            /* skip ACPI IRQ initialization */
 int acpi_disabled = 1;
@@ -217,7 +218,89 @@ void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
 
 void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
 {
-       return (void __iomem *)memremap(phys, size, MEMREMAP_WB);
+       efi_memory_desc_t *md, *region = NULL;
+       pgprot_t prot;
+
+       if (WARN_ON_ONCE(!efi_enabled(EFI_MEMMAP)))
+               return NULL;
+
+       for_each_efi_memory_desc(md) {
+               u64 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
+
+               if (phys < md->phys_addr || phys >= end)
+                       continue;
+
+               if (phys + size > end) {
+                       pr_warn(FW_BUG "requested region covers multiple EFI memory regions\n");
+                       return NULL;
+               }
+               region = md;
+               break;
+       }
+
+       /*
+        * It is fine for AML to remap regions that are not represented in the
+        * EFI memory map at all, as it only describes normal memory, and MMIO
+        * regions that require a virtual mapping to make them accessible to
+        * the EFI runtime services.
+        */
+       prot = PAGE_KERNEL_IO;
+       if (region) {
+               switch (region->type) {
+               case EFI_LOADER_CODE:
+               case EFI_LOADER_DATA:
+               case EFI_BOOT_SERVICES_CODE:
+               case EFI_BOOT_SERVICES_DATA:
+               case EFI_CONVENTIONAL_MEMORY:
+               case EFI_PERSISTENT_MEMORY:
+                       if (memblock_is_map_memory(phys) ||
+                           !memblock_is_region_memory(phys, size)) {
+                               pr_warn(FW_BUG "requested region covers kernel memory\n");
+                               return NULL;
+                       }
+
+                       /*
+                        * Mapping kernel memory is permitted if the region in
+                        * question is covered by a single memblock with the
+                        * NOMAP attribute set: this enables the use of ACPI
+                        * table overrides passed via initramfs.
+                        * This particular use case only requires read access.
+                        */
+                       fallthrough;
+
+               case EFI_RUNTIME_SERVICES_CODE:
+                       /*
+                        * This would be unusual, but not problematic per se,
+                        * as long as we take care not to create a writable
+                        * mapping for executable code.
+                        */
+                       prot = PAGE_KERNEL_RO;
+                       break;
+
+               case EFI_ACPI_RECLAIM_MEMORY:
+                       /*
+                        * ACPI reclaim memory is used to pass firmware tables
+                        * and other data that is intended for consumption by
+                        * the OS only, which may decide it wants to reclaim
+                        * that memory and use it for something else. We never
+                        * do that, but we usually add it to the linear map
+                        * anyway, in which case we should use the existing
+                        * mapping.
+                        */
+                       if (memblock_is_map_memory(phys))
+                               return (void __iomem *)__va(phys);
+                       fallthrough;
+
+               default:
+                       if (region->attribute & EFI_MEMORY_WB)
+                               prot = PAGE_KERNEL;
+                       else if ((region->attribute & EFI_MEMORY_WC) ||
+                                (region->attribute & EFI_MEMORY_WT))
+                               prot = pgprot_writecombine(PAGE_KERNEL);
+               }
+       }
+
+       return ioremap_prot(phys, size, pgprot_val(prot));
 }
 
 #ifdef CONFIG_PCI
index d6a75aac1d27a53ddbee4e0d5e028eaa0ffeae85..a03129f40c464868e9352436e1273424c1da060b 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/thread_info.h>
 #include <asm/ptrace.h>
 #include <asm/cpu_ops_sbi.h>
+#include <asm/stacktrace.h>
 #include <asm/suspend.h>
 
 void asm_offsets(void);
@@ -38,7 +39,11 @@ void asm_offsets(void)
        OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
        OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
        OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
+#ifdef CONFIG_SHADOW_CALL_STACK
+       OFFSET(TASK_TI_SCS_SP, task_struct, thread_info.scs_sp);
+#endif
 
+       OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu);
        OFFSET(TASK_THREAD_F0,  task_struct, thread.fstate.f[0]);
        OFFSET(TASK_THREAD_F1,  task_struct, thread.fstate.f[1]);
        OFFSET(TASK_THREAD_F2,  task_struct, thread.fstate.f[2]);
@@ -479,4 +484,8 @@ void asm_offsets(void)
        OFFSET(KERNEL_MAP_VIRT_ADDR, kernel_mapping, virt_addr);
        OFFSET(SBI_HART_BOOT_TASK_PTR_OFFSET, sbi_hart_boot_data, task_ptr);
        OFFSET(SBI_HART_BOOT_STACK_PTR_OFFSET, sbi_hart_boot_data, stack_ptr);
+
+       DEFINE(STACKFRAME_SIZE_ON_STACK, ALIGN(sizeof(struct stackframe), STACK_ALIGN));
+       OFFSET(STACKFRAME_FP, stackframe, fp);
+       OFFSET(STACKFRAME_RA, stackframe, ra);
 }
index cfdecfbaad627153ead8cde6fe9ca7a298c1aa40..2b3d9398c113fbaea3f775b1439058e5a5178c7d 100644 (file)
@@ -9,7 +9,7 @@
 /* void __riscv_copy_words_unaligned(void *, const void *, size_t) */
 /* Performs a memcpy without aligning buffers, using word loads and stores. */
 /* Note: The size is truncated to a multiple of 8 * SZREG */
-ENTRY(__riscv_copy_words_unaligned)
+SYM_FUNC_START(__riscv_copy_words_unaligned)
        andi  a4, a2, ~((8*SZREG)-1)
        beqz  a4, 2f
        add   a3, a1, a4
@@ -36,12 +36,12 @@ ENTRY(__riscv_copy_words_unaligned)
 
 2:
        ret
-END(__riscv_copy_words_unaligned)
+SYM_FUNC_END(__riscv_copy_words_unaligned)
 
 /* void __riscv_copy_bytes_unaligned(void *, const void *, size_t) */
 /* Performs a memcpy without aligning buffers, using only byte accesses. */
 /* Note: The size is truncated to a multiple of 8 */
-ENTRY(__riscv_copy_bytes_unaligned)
+SYM_FUNC_START(__riscv_copy_bytes_unaligned)
        andi a4, a2, ~(8-1)
        beqz a4, 2f
        add  a3, a1, a4
@@ -68,4 +68,4 @@ ENTRY(__riscv_copy_bytes_unaligned)
 
 2:
        ret
-END(__riscv_copy_bytes_unaligned)
+SYM_FUNC_END(__riscv_copy_bytes_unaligned)
index c17dacb1141cb3ca9c077ec8e95c5f39dbc5fabd..d11d6320fb0d2db489f221ab2527297a247fd304 100644 (file)
@@ -125,13 +125,14 @@ old_interface:
  */
 int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
 {
-       int rc;
-
        for (; node; node = node->parent) {
                if (of_device_is_compatible(node, "riscv")) {
-                       rc = riscv_of_processor_hartid(node, hartid);
-                       if (!rc)
-                               return 0;
+                       *hartid = (unsigned long)of_get_cpu_hwid(node, 0);
+                       if (*hartid == ~0UL) {
+                               pr_warn("Found CPU without hart ID\n");
+                               return -ENODEV;
+                       }
+                       return 0;
                }
        }
 
@@ -202,9 +203,8 @@ arch_initcall(riscv_cpuinfo_init);
 
 #ifdef CONFIG_PROC_FS
 
-static void print_isa(struct seq_file *f)
+static void print_isa(struct seq_file *f, const unsigned long *isa_bitmap)
 {
-       seq_puts(f, "isa\t\t: ");
 
        if (IS_ENABLED(CONFIG_32BIT))
                seq_write(f, "rv32", 4);
@@ -212,7 +212,7 @@ static void print_isa(struct seq_file *f)
                seq_write(f, "rv64", 4);
 
        for (int i = 0; i < riscv_isa_ext_count; i++) {
-               if (!__riscv_isa_extension_available(NULL, riscv_isa_ext[i].id))
+               if (!__riscv_isa_extension_available(isa_bitmap, riscv_isa_ext[i].id))
                        continue;
 
                /* Only multi-letter extensions are split by underscores */
@@ -276,7 +276,15 @@ static int c_show(struct seq_file *m, void *v)
 
        seq_printf(m, "processor\t: %lu\n", cpu_id);
        seq_printf(m, "hart\t\t: %lu\n", cpuid_to_hartid_map(cpu_id));
-       print_isa(m);
+
+       /*
+        * For historical raisins, the isa: line is limited to the lowest common
+        * denominator of extensions supported across all harts. A true list of
+        * extensions supported on this hart is printed later in the hart isa:
+        * line.
+        */
+       seq_puts(m, "isa\t\t: ");
+       print_isa(m, NULL);
        print_mmu(m);
 
        if (acpi_disabled) {
@@ -292,6 +300,13 @@ static int c_show(struct seq_file *m, void *v)
        seq_printf(m, "mvendorid\t: 0x%lx\n", ci->mvendorid);
        seq_printf(m, "marchid\t\t: 0x%lx\n", ci->marchid);
        seq_printf(m, "mimpid\t\t: 0x%lx\n", ci->mimpid);
+
+       /*
+        * Print the ISA extensions specific to this hart, which may show
+        * additional extensions not present across all harts.
+        */
+       seq_puts(m, "hart isa\t: ");
+       print_isa(m, hart_isa[cpu_id].isa);
        seq_puts(m, "\n");
 
        return 0;
index e3803822ab5a3a0ee36852cd9b08f9db8ee76fdc..b3785ffc15703cdf55efc2c523179dd2b64695c1 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/acpi.h>
 #include <linux/bitmap.h>
+#include <linux/cpuhotplug.h>
 #include <linux/ctype.h>
 #include <linux/log2.h>
 #include <linux/memory.h>
@@ -29,6 +30,7 @@
 
 #define MISALIGNED_ACCESS_JIFFIES_LG2 1
 #define MISALIGNED_BUFFER_SIZE 0x4000
+#define MISALIGNED_BUFFER_ORDER get_order(MISALIGNED_BUFFER_SIZE)
 #define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80)
 
 unsigned long elf_hwcap __read_mostly;
@@ -93,10 +95,10 @@ static bool riscv_isa_extension_check(int id)
                return true;
        case RISCV_ISA_EXT_ZICBOZ:
                if (!riscv_cboz_block_size) {
-                       pr_err("Zicboz detected in ISA string, but no cboz-block-size found\n");
+                       pr_err("Zicboz detected in ISA string, disabling as no cboz-block-size found\n");
                        return false;
                } else if (!is_power_of_2(riscv_cboz_block_size)) {
-                       pr_err("cboz-block-size present, but is not a power-of-2\n");
+                       pr_err("Zicboz disabled as cboz-block-size present, but is not a power-of-2\n");
                        return false;
                }
                return true;
@@ -206,10 +208,11 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc
                switch (*ext) {
                case 's':
                        /*
-                        * Workaround for invalid single-letter 's' & 'u'(QEMU).
+                        * Workaround for invalid single-letter 's' & 'u' (QEMU).
                         * No need to set the bit in riscv_isa as 's' & 'u' are
-                        * not valid ISA extensions. It works until multi-letter
-                        * extension starting with "Su" appears.
+                        * not valid ISA extensions. It works unless the first
+                        * multi-letter extension in the ISA string begins with
+                        * "Su" and is not prefixed with an underscore.
                         */
                        if (ext[-1] != '_' && ext[1] == 'u') {
                                ++isa;
@@ -558,23 +561,21 @@ unsigned long riscv_get_elf_hwcap(void)
        return hwcap;
 }
 
-void check_unaligned_access(int cpu)
+static int check_unaligned_access(void *param)
 {
+       int cpu = smp_processor_id();
        u64 start_cycles, end_cycles;
        u64 word_cycles;
        u64 byte_cycles;
        int ratio;
        unsigned long start_jiffies, now;
-       struct page *page;
+       struct page *page = param;
        void *dst;
        void *src;
        long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
 
-       page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE));
-       if (!page) {
-               pr_warn("Can't alloc pages to measure memcpy performance");
-               return;
-       }
+       if (check_unaligned_access_emulated(cpu))
+               return 0;
 
        /* Make an unaligned destination buffer. */
        dst = (void *)((unsigned long)page_address(page) | 0x1);
@@ -628,7 +629,7 @@ void check_unaligned_access(int cpu)
                pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned access speed\n",
                        cpu);
 
-               goto out;
+               return 0;
        }
 
        if (word_cycles < byte_cycles)
@@ -642,18 +643,90 @@ void check_unaligned_access(int cpu)
                (speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
 
        per_cpu(misaligned_access_speed, cpu) = speed;
+       return 0;
+}
 
-out:
-       __free_pages(page, get_order(MISALIGNED_BUFFER_SIZE));
+static void check_unaligned_access_nonboot_cpu(void *param)
+{
+       unsigned int cpu = smp_processor_id();
+       struct page **pages = param;
+
+       if (smp_processor_id() != 0)
+               check_unaligned_access(pages[cpu]);
 }
 
-static int check_unaligned_access_boot_cpu(void)
+static int riscv_online_cpu(unsigned int cpu)
 {
-       check_unaligned_access(0);
+       static struct page *buf;
+
+       /* We are already set since the last check */
+       if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
+               return 0;
+
+       buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
+       if (!buf) {
+               pr_warn("Allocation failure, not measuring misaligned performance\n");
+               return -ENOMEM;
+       }
+
+       check_unaligned_access(buf);
+       __free_pages(buf, MISALIGNED_BUFFER_ORDER);
        return 0;
 }
 
-arch_initcall(check_unaligned_access_boot_cpu);
+/* Measure unaligned access on all CPUs present at boot in parallel. */
+static int check_unaligned_access_all_cpus(void)
+{
+       unsigned int cpu;
+       unsigned int cpu_count = num_possible_cpus();
+       struct page **bufs = kzalloc(cpu_count * sizeof(struct page *),
+                                    GFP_KERNEL);
+
+       if (!bufs) {
+               pr_warn("Allocation failure, not measuring misaligned performance\n");
+               return 0;
+       }
+
+       /*
+        * Allocate separate buffers for each CPU so there's no fighting over
+        * cache lines.
+        */
+       for_each_cpu(cpu, cpu_online_mask) {
+               bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
+               if (!bufs[cpu]) {
+                       pr_warn("Allocation failure, not measuring misaligned performance\n");
+                       goto out;
+               }
+       }
+
+       /* Check everybody except 0, who stays behind to tend jiffies. */
+       on_each_cpu(check_unaligned_access_nonboot_cpu, bufs, 1);
+
+       /* Check core 0. */
+       smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
+
+       /* Setup hotplug callback for any new CPUs that come online. */
+       cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
+                                 riscv_online_cpu, NULL);
+
+out:
+       unaligned_emulation_finish();
+       for_each_cpu(cpu, cpu_online_mask) {
+               if (bufs[cpu])
+                       __free_pages(bufs[cpu], MISALIGNED_BUFFER_ORDER);
+       }
+
+       kfree(bufs);
+       return 0;
+}
+
+arch_initcall(check_unaligned_access_all_cpus);
+
+void riscv_user_isa_enable(void)
+{
+       if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_ZICBOZ))
+               csr_set(CSR_SENVCFG, ENVCFG_CBZE);
+}
 
 #ifdef CONFIG_RISCV_ALTERNATIVE
 /*
index 143a2bb3e69760b00aebae5be78d08475f98792a..54ca4564a92631388783a7978e8f49f40e556364 100644 (file)
@@ -9,10 +9,15 @@
 
 #include <asm/asm.h>
 #include <asm/csr.h>
+#include <asm/scs.h>
 #include <asm/unistd.h>
+#include <asm/page.h>
 #include <asm/thread_info.h>
 #include <asm/asm-offsets.h>
 #include <asm/errata_list.h>
+#include <linux/sizes.h>
+
+       .section .irqentry.text, "ax"
 
 SYM_CODE_START(handle_exception)
        /*
@@ -21,9 +26,9 @@ SYM_CODE_START(handle_exception)
         * register will contain 0, and we should continue on the current TP.
         */
        csrrw tp, CSR_SCRATCH, tp
-       bnez tp, _save_context
+       bnez tp, .Lsave_context
 
-_restore_kernel_tpsp:
+.Lrestore_kernel_tpsp:
        csrr tp, CSR_SCRATCH
        REG_S sp, TASK_TI_KERNEL_SP(tp)
 
@@ -35,7 +40,7 @@ _restore_kernel_tpsp:
        REG_L sp, TASK_TI_KERNEL_SP(tp)
 #endif
 
-_save_context:
+.Lsave_context:
        REG_S sp, TASK_TI_USER_SP(tp)
        REG_L sp, TASK_TI_KERNEL_SP(tp)
        addi sp, sp, -(PT_SIZE_ON_STACK)
@@ -73,10 +78,11 @@ _save_context:
        csrw CSR_SCRATCH, x0
 
        /* Load the global pointer */
-.option push
-.option norelax
-       la gp, __global_pointer$
-.option pop
+       load_global_pointer
+
+       /* Load the kernel shadow call stack pointer if coming from userspace */
+       scs_load_current_if_task_changed s5
+
        move a0, sp /* pt_regs */
        la ra, ret_from_exception
 
@@ -123,6 +129,9 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
        addi s0, sp, PT_SIZE_ON_STACK
        REG_S s0, TASK_TI_KERNEL_SP(tp)
 
+       /* Save the kernel shadow call stack pointer */
+       scs_save_current
+
        /*
         * Save TP into the scratch register , so we can find the kernel data
         * structures again.
@@ -170,67 +179,15 @@ SYM_CODE_END(ret_from_exception)
 
 #ifdef CONFIG_VMAP_STACK
 SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
-       /*
-        * Takes the psuedo-spinlock for the shadow stack, in case multiple
-        * harts are concurrently overflowing their kernel stacks.  We could
-        * store any value here, but since we're overflowing the kernel stack
-        * already we only have SP to use as a scratch register.  So we just
-        * swap in the address of the spinlock, as that's definately non-zero.
-        *
-        * Pairs with a store_release in handle_bad_stack().
-        */
-1:     la sp, spin_shadow_stack
-       REG_AMOSWAP_AQ sp, sp, (sp)
-       bnez sp, 1b
-
-       la sp, shadow_stack
-       addi sp, sp, SHADOW_OVERFLOW_STACK_SIZE
+       /* we reach here from kernel context, sscratch must be 0 */
+       csrrw x31, CSR_SCRATCH, x31
+       asm_per_cpu sp, overflow_stack, x31
+       li x31, OVERFLOW_STACK_SIZE
+       add sp, sp, x31
+       /* zero out x31 again and restore x31 */
+       xor x31, x31, x31
+       csrrw x31, CSR_SCRATCH, x31
 
-       //save caller register to shadow stack
-       addi sp, sp, -(PT_SIZE_ON_STACK)
-       REG_S x1,  PT_RA(sp)
-       REG_S x5,  PT_T0(sp)
-       REG_S x6,  PT_T1(sp)
-       REG_S x7,  PT_T2(sp)
-       REG_S x10, PT_A0(sp)
-       REG_S x11, PT_A1(sp)
-       REG_S x12, PT_A2(sp)
-       REG_S x13, PT_A3(sp)
-       REG_S x14, PT_A4(sp)
-       REG_S x15, PT_A5(sp)
-       REG_S x16, PT_A6(sp)
-       REG_S x17, PT_A7(sp)
-       REG_S x28, PT_T3(sp)
-       REG_S x29, PT_T4(sp)
-       REG_S x30, PT_T5(sp)
-       REG_S x31, PT_T6(sp)
-
-       la ra, restore_caller_reg
-       tail get_overflow_stack
-
-restore_caller_reg:
-       //save per-cpu overflow stack
-       REG_S a0, -8(sp)
-       //restore caller register from shadow_stack
-       REG_L x1,  PT_RA(sp)
-       REG_L x5,  PT_T0(sp)
-       REG_L x6,  PT_T1(sp)
-       REG_L x7,  PT_T2(sp)
-       REG_L x10, PT_A0(sp)
-       REG_L x11, PT_A1(sp)
-       REG_L x12, PT_A2(sp)
-       REG_L x13, PT_A3(sp)
-       REG_L x14, PT_A4(sp)
-       REG_L x15, PT_A5(sp)
-       REG_L x16, PT_A6(sp)
-       REG_L x17, PT_A7(sp)
-       REG_L x28, PT_T3(sp)
-       REG_L x29, PT_T4(sp)
-       REG_L x30, PT_T5(sp)
-       REG_L x31, PT_T6(sp)
-
-       //load per-cpu overflow stack
-       REG_L sp, -8(sp)
        addi sp, sp, -(PT_SIZE_ON_STACK)
 
        //save context to overflow stack
@@ -268,6 +225,43 @@ SYM_CODE_START(ret_from_fork)
        tail syscall_exit_to_user_mode
 SYM_CODE_END(ret_from_fork)
 
+#ifdef CONFIG_IRQ_STACKS
+/*
+ * void call_on_irq_stack(struct pt_regs *regs,
+ *                       void (*func)(struct pt_regs *));
+ *
+ * Calls func(regs) using the per-CPU IRQ stack.
+ */
+SYM_FUNC_START(call_on_irq_stack)
+       /* Create a frame record to save ra and s0 (fp) */
+       addi    sp, sp, -STACKFRAME_SIZE_ON_STACK
+       REG_S   ra, STACKFRAME_RA(sp)
+       REG_S   s0, STACKFRAME_FP(sp)
+       addi    s0, sp, STACKFRAME_SIZE_ON_STACK
+
+       /* Switch to the per-CPU shadow call stack */
+       scs_save_current
+       scs_load_irq_stack t0
+
+       /* Switch to the per-CPU IRQ stack and call the handler */
+       load_per_cpu t0, irq_stack_ptr, t1
+       li      t1, IRQ_STACK_SIZE
+       add     sp, t0, t1
+       jalr    a1
+
+       /* Switch back to the thread shadow call stack */
+       scs_load_current
+
+       /* Switch back to the thread stack and restore ra and s0 */
+       addi    sp, s0, -STACKFRAME_SIZE_ON_STACK
+       REG_L   ra, STACKFRAME_RA(sp)
+       REG_L   s0, STACKFRAME_FP(sp)
+       addi    sp, sp, STACKFRAME_SIZE_ON_STACK
+
+       ret
+SYM_FUNC_END(call_on_irq_stack)
+#endif /* CONFIG_IRQ_STACKS */
+
 /*
  * Integer register context switch
  * The callee-saved registers must be saved and restored.
@@ -297,6 +291,8 @@ SYM_FUNC_START(__switch_to)
        REG_S s9,  TASK_THREAD_S9_RA(a3)
        REG_S s10, TASK_THREAD_S10_RA(a3)
        REG_S s11, TASK_THREAD_S11_RA(a3)
+       /* Save the kernel shadow call stack pointer */
+       scs_save_current
        /* Restore context from next->thread */
        REG_L ra,  TASK_THREAD_RA_RA(a4)
        REG_L sp,  TASK_THREAD_SP_RA(a4)
@@ -314,6 +310,8 @@ SYM_FUNC_START(__switch_to)
        REG_L s11, TASK_THREAD_S11_RA(a4)
        /* The offset of thread_info in task_struct is zero. */
        move tp, a1
+       /* Switch to the next shadow call stack */
+       scs_load_current
        ret
 SYM_FUNC_END(__switch_to)
 
@@ -324,7 +322,7 @@ SYM_FUNC_END(__switch_to)
        .section ".rodata"
        .align LGREG
        /* Exception vector table */
-SYM_CODE_START(excp_vect_table)
+SYM_DATA_START_LOCAL(excp_vect_table)
        RISCV_PTR do_trap_insn_misaligned
        ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault)
        RISCV_PTR do_trap_insn_illegal
@@ -342,12 +340,11 @@ SYM_CODE_START(excp_vect_table)
        RISCV_PTR do_page_fault   /* load page fault */
        RISCV_PTR do_trap_unknown
        RISCV_PTR do_page_fault   /* store page fault */
-excp_vect_table_end:
-SYM_CODE_END(excp_vect_table)
+SYM_DATA_END_LABEL(excp_vect_table, SYM_L_LOCAL, excp_vect_table_end)
 
 #ifndef CONFIG_MMU
-SYM_CODE_START(__user_rt_sigreturn)
+SYM_DATA_START(__user_rt_sigreturn)
        li a7, __NR_rt_sigreturn
        ecall
-SYM_CODE_END(__user_rt_sigreturn)
+SYM_DATA_END(__user_rt_sigreturn)
 #endif
index dd2205473de78571a5a4a4b68bd4b33302a77b31..2c543f130f9389aa4f1d9fee05f5027d99adaa78 100644 (file)
@@ -19,7 +19,7 @@
 #include <asm/csr.h>
 #include <asm/asm-offsets.h>
 
-ENTRY(__fstate_save)
+SYM_FUNC_START(__fstate_save)
        li  a2,  TASK_THREAD_F0
        add a0, a0, a2
        li t1, SR_FS
@@ -60,9 +60,9 @@ ENTRY(__fstate_save)
        sw t0, TASK_THREAD_FCSR_F0(a0)
        csrc CSR_STATUS, t1
        ret
-ENDPROC(__fstate_save)
+SYM_FUNC_END(__fstate_save)
 
-ENTRY(__fstate_restore)
+SYM_FUNC_START(__fstate_restore)
        li  a2,  TASK_THREAD_F0
        add a0, a0, a2
        li t1, SR_FS
@@ -103,4 +103,125 @@ ENTRY(__fstate_restore)
        fscsr t0
        csrc CSR_STATUS, t1
        ret
-ENDPROC(__fstate_restore)
+SYM_FUNC_END(__fstate_restore)
+
+#define get_f32(which) fmv.x.s a0, which; j 2f
+#define put_f32(which) fmv.s.x which, a1; j 2f
+#if __riscv_xlen == 64
+# define get_f64(which) fmv.x.d a0, which; j 2f
+# define put_f64(which) fmv.d.x which, a1; j 2f
+#else
+# define get_f64(which) fsd which, 0(a1); j 2f
+# define put_f64(which) fld which, 0(a1); j 2f
+#endif
+
+.macro fp_access_prologue
+       /*
+        * Compute jump offset to store the correct FP register since we don't
+        * have indirect FP register access
+        */
+       sll t0, a0, 3
+       la t2, 1f
+       add t0, t0, t2
+       li t1, SR_FS
+       csrs CSR_STATUS, t1
+       jr t0
+1:
+.endm
+
+.macro fp_access_epilogue
+2:
+       csrc CSR_STATUS, t1
+       ret
+.endm
+
+#define fp_access_body(__access_func) \
+       __access_func(f0); \
+       __access_func(f1); \
+       __access_func(f2); \
+       __access_func(f3); \
+       __access_func(f4); \
+       __access_func(f5); \
+       __access_func(f6); \
+       __access_func(f7); \
+       __access_func(f8); \
+       __access_func(f9); \
+       __access_func(f10); \
+       __access_func(f11); \
+       __access_func(f12); \
+       __access_func(f13); \
+       __access_func(f14); \
+       __access_func(f15); \
+       __access_func(f16); \
+       __access_func(f17); \
+       __access_func(f18); \
+       __access_func(f19); \
+       __access_func(f20); \
+       __access_func(f21); \
+       __access_func(f22); \
+       __access_func(f23); \
+       __access_func(f24); \
+       __access_func(f25); \
+       __access_func(f26); \
+       __access_func(f27); \
+       __access_func(f28); \
+       __access_func(f29); \
+       __access_func(f30); \
+       __access_func(f31)
+
+
+#ifdef CONFIG_RISCV_MISALIGNED
+
+/*
+ * Disable compressed instructions set to keep a constant offset between FP
+ * load/store/move instructions
+ */
+.option norvc
+/*
+ * put_f32_reg - Set a FP register from a register containing the value
+ * a0 = FP register index to be set
+ * a1 = value to be loaded in the FP register
+ */
+SYM_FUNC_START(put_f32_reg)
+       fp_access_prologue
+       fp_access_body(put_f32)
+       fp_access_epilogue
+SYM_FUNC_END(put_f32_reg)
+
+/*
+ * get_f32_reg - Get a FP register value and return it
+ * a0 = FP register index to be retrieved
+ */
+SYM_FUNC_START(get_f32_reg)
+       fp_access_prologue
+       fp_access_body(get_f32)
+       fp_access_epilogue
+SYM_FUNC_END(get_f32_reg)
+
+/*
+ * put_f64_reg - Set a 64 bits FP register from a value or a pointer.
+ * a0 = FP register index to be set
+ * a1 = value/pointer to be loaded in the FP register (when xlen == 32 bits, we
+ * load the value to a pointer).
+ */
+SYM_FUNC_START(put_f64_reg)
+       fp_access_prologue
+       fp_access_body(put_f64)
+       fp_access_epilogue
+SYM_FUNC_END(put_f64_reg)
+
+/*
+ * put_f64_reg - Get a 64 bits FP register value and returned it or store it to
+ *              a pointer.
+ * a0 = FP register index to be retrieved
+ * a1 = If xlen == 32, pointer which should be loaded with the FP register value
+ *     or unused if xlen == 64. In which case the FP register value is returned
+ *     through a0
+ */
+SYM_FUNC_START(get_f64_reg)
+       fp_access_prologue
+       fp_access_body(get_f64)
+       fp_access_epilogue
+SYM_FUNC_END(get_f64_reg)
+
+#endif /* CONFIG_RISCV_MISALIGNED */
index 3710ea5d160f30347ff1b8ebf79927ca466e6da1..b77397432403d9ef028fea6855cdc97aea143d00 100644 (file)
 #include <asm/cpu_ops_sbi.h>
 #include <asm/hwcap.h>
 #include <asm/image.h>
+#include <asm/scs.h>
 #include <asm/xip_fixup.h>
 #include "efi-header.S"
 
 __HEAD
-ENTRY(_start)
+SYM_CODE_START(_start)
        /*
         * Image header expected by Linux boot-loaders. The image header data
         * structure is described in asm/image.h.
@@ -110,10 +111,7 @@ relocate_enable_mmu:
        csrw CSR_TVEC, a0
 
        /* Reload the global pointer */
-.option push
-.option norelax
-       la gp, __global_pointer$
-.option pop
+       load_global_pointer
 
        /*
         * Switch to kernel page tables.  A full fence is necessary in order to
@@ -134,10 +132,7 @@ secondary_start_sbi:
        csrw CSR_IP, zero
 
        /* Load the global pointer */
-       .option push
-       .option norelax
-               la gp, __global_pointer$
-       .option pop
+       load_global_pointer
 
        /*
         * Disable FPU & VECTOR to detect illegal usage of
@@ -159,6 +154,7 @@ secondary_start_sbi:
        XIP_FIXUP_OFFSET a3
        add a3, a3, a1
        REG_L sp, (a3)
+       scs_load_current
 
 .Lsecondary_start_common:
 
@@ -168,12 +164,12 @@ secondary_start_sbi:
        XIP_FIXUP_OFFSET a0
        call relocate_enable_mmu
 #endif
-       call setup_trap_vector
+       call .Lsetup_trap_vector
        tail smp_callin
 #endif /* CONFIG_SMP */
 
 .align 2
-setup_trap_vector:
+.Lsetup_trap_vector:
        /* Set trap vector to exception handler */
        la a0, handle_exception
        csrw CSR_TVEC, a0
@@ -191,9 +187,9 @@ setup_trap_vector:
        wfi
        j .Lsecondary_park
 
-END(_start)
+SYM_CODE_END(_start)
 
-ENTRY(_start_kernel)
+SYM_CODE_START(_start_kernel)
        /* Mask all interrupts */
        csrw CSR_IE, zero
        csrw CSR_IP, zero
@@ -210,7 +206,7 @@ ENTRY(_start_kernel)
         * not implement PMPs, so we set up a quick trap handler to just skip
         * touching the PMPs on any trap.
         */
-       la a0, pmp_done
+       la a0, .Lpmp_done
        csrw CSR_TVEC, a0
 
        li a0, -1
@@ -218,7 +214,7 @@ ENTRY(_start_kernel)
        li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
        csrw CSR_PMPCFG0, a0
 .align 2
-pmp_done:
+.Lpmp_done:
 
        /*
         * The hartid in a0 is expected later on, and we have no firmware
@@ -228,10 +224,7 @@ pmp_done:
 #endif /* CONFIG_RISCV_M_MODE */
 
        /* Load the global pointer */
-.option push
-.option norelax
-       la gp, __global_pointer$
-.option pop
+       load_global_pointer
 
        /*
         * Disable FPU & VECTOR to detect illegal usage of
@@ -282,12 +275,12 @@ pmp_done:
        /* Clear BSS for flat non-ELF images */
        la a3, __bss_start
        la a4, __bss_stop
-       ble a4, a3, clear_bss_done
-clear_bss:
+       ble a4, a3, .Lclear_bss_done
+.Lclear_bss:
        REG_S zero, (a3)
        add a3, a3, RISCV_SZPTR
-       blt a3, a4, clear_bss
-clear_bss_done:
+       blt a3, a4, .Lclear_bss
+.Lclear_bss_done:
 #endif
        la a2, boot_cpu_hartid
        XIP_FIXUP_OFFSET a2
@@ -298,6 +291,7 @@ clear_bss_done:
        la sp, init_thread_union + THREAD_SIZE
        XIP_FIXUP_OFFSET sp
        addi sp, sp, -PT_SIZE_ON_STACK
+       scs_load_init_stack
 #ifdef CONFIG_BUILTIN_DTB
        la a0, __dtb_start
        XIP_FIXUP_OFFSET a0
@@ -311,11 +305,12 @@ clear_bss_done:
        call relocate_enable_mmu
 #endif /* CONFIG_MMU */
 
-       call setup_trap_vector
+       call .Lsetup_trap_vector
        /* Restore C environment */
        la tp, init_task
        la sp, init_thread_union + THREAD_SIZE
        addi sp, sp, -PT_SIZE_ON_STACK
+       scs_load_current
 
 #ifdef CONFIG_KASAN
        call kasan_early_init
@@ -353,10 +348,10 @@ clear_bss_done:
        tail .Lsecondary_start_common
 #endif /* CONFIG_RISCV_BOOT_SPINWAIT */
 
-END(_start_kernel)
+SYM_CODE_END(_start_kernel)
 
 #ifdef CONFIG_RISCV_M_MODE
-ENTRY(reset_regs)
+SYM_CODE_START_LOCAL(reset_regs)
        li      sp, 0
        li      gp, 0
        li      tp, 0
@@ -454,5 +449,5 @@ ENTRY(reset_regs)
 .Lreset_regs_done_vector:
 #endif /* CONFIG_RISCV_ISA_V */
        ret
-END(reset_regs)
+SYM_CODE_END(reset_regs)
 #endif /* CONFIG_RISCV_M_MODE */
index d698dd7df637ba8ad9263e2312f896c268a58b2d..d040dcf4add453dadaa328b85cd462ae43d6d31a 100644 (file)
@@ -21,7 +21,7 @@
  *
  * Always returns 0
  */
-ENTRY(__hibernate_cpu_resume)
+SYM_FUNC_START(__hibernate_cpu_resume)
        /* switch to hibernated image's page table. */
        csrw CSR_SATP, s0
        sfence.vma
@@ -34,7 +34,7 @@ ENTRY(__hibernate_cpu_resume)
        mv      a0, zero
 
        ret
-END(__hibernate_cpu_resume)
+SYM_FUNC_END(__hibernate_cpu_resume)
 
 /*
  * Prepare to restore the image.
@@ -42,7 +42,7 @@ END(__hibernate_cpu_resume)
  * a1: satp of temporary page tables.
  * a2: cpu_resume.
  */
-ENTRY(hibernate_restore_image)
+SYM_FUNC_START(hibernate_restore_image)
        mv      s0, a0
        mv      s1, a1
        mv      s2, a2
@@ -50,7 +50,7 @@ ENTRY(hibernate_restore_image)
        REG_L   a1, relocated_restore_code
 
        jr      a1
-END(hibernate_restore_image)
+SYM_FUNC_END(hibernate_restore_image)
 
 /*
  * The below code will be executed from a 'safe' page.
@@ -58,7 +58,7 @@ END(hibernate_restore_image)
  * back to the original memory location. Finally, it jumps to __hibernate_cpu_resume()
  * to restore the CPU context.
  */
-ENTRY(hibernate_core_restore_code)
+SYM_FUNC_START(hibernate_core_restore_code)
        /* switch to temp page table. */
        csrw satp, s1
        sfence.vma
@@ -73,4 +73,4 @@ ENTRY(hibernate_core_restore_code)
        bnez    s4, .Lcopy
 
        jr      s2
-END(hibernate_core_restore_code)
+SYM_FUNC_END(hibernate_core_restore_code)
index 9cc0a76692715ea6ff2ec56630f7b60ed0a37f92..9ceda02507cae9c73efac8c0589695b38d03e499 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/irqchip.h>
 #include <linux/irqdomain.h>
 #include <linux/module.h>
+#include <linux/scs.h>
 #include <linux/seq_file.h>
 #include <asm/sbi.h>
 #include <asm/smp.h>
@@ -34,6 +35,24 @@ EXPORT_SYMBOL_GPL(riscv_get_intc_hwnode);
 #ifdef CONFIG_IRQ_STACKS
 #include <asm/irq_stack.h>
 
+DECLARE_PER_CPU(ulong *, irq_shadow_call_stack_ptr);
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+DEFINE_PER_CPU(ulong *, irq_shadow_call_stack_ptr);
+#endif
+
+static void init_irq_scs(void)
+{
+       int cpu;
+
+       if (!scs_is_enabled())
+               return;
+
+       for_each_possible_cpu(cpu)
+               per_cpu(irq_shadow_call_stack_ptr, cpu) =
+                       scs_alloc(cpu_to_node(cpu));
+}
+
 DEFINE_PER_CPU(ulong *, irq_stack_ptr);
 
 #ifdef CONFIG_VMAP_STACK
@@ -61,40 +80,22 @@ static void init_irq_stacks(void)
 #endif /* CONFIG_VMAP_STACK */
 
 #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
+static void ___do_softirq(struct pt_regs *regs)
+{
+       __do_softirq();
+}
+
 void do_softirq_own_stack(void)
 {
-#ifdef CONFIG_IRQ_STACKS
-       if (on_thread_stack()) {
-               ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id())
-                                       + IRQ_STACK_SIZE/sizeof(ulong);
-               __asm__ __volatile(
-               "addi   sp, sp, -"RISCV_SZPTR  "\n"
-               REG_S"  ra, (sp)                \n"
-               "addi   sp, sp, -"RISCV_SZPTR  "\n"
-               REG_S"  s0, (sp)                \n"
-               "addi   s0, sp, 2*"RISCV_SZPTR "\n"
-               "move   sp, %[sp]               \n"
-               "call   __do_softirq            \n"
-               "addi   sp, s0, -2*"RISCV_SZPTR"\n"
-               REG_L"  s0, (sp)                \n"
-               "addi   sp, sp, "RISCV_SZPTR   "\n"
-               REG_L"  ra, (sp)                \n"
-               "addi   sp, sp, "RISCV_SZPTR   "\n"
-               :
-               : [sp] "r" (sp)
-               : "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
-                 "t0", "t1", "t2", "t3", "t4", "t5", "t6",
-#ifndef CONFIG_FRAME_POINTER
-                 "s0",
-#endif
-                 "memory");
-       } else
-#endif
+       if (on_thread_stack())
+               call_on_irq_stack(NULL, ___do_softirq);
+       else
                __do_softirq();
 }
 #endif /* CONFIG_SOFTIRQ_ON_OWN_STACK */
 
 #else
+static void init_irq_scs(void) {}
 static void init_irq_stacks(void) {}
 #endif /* CONFIG_IRQ_STACKS */
 
@@ -106,6 +107,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
 
 void __init init_IRQ(void)
 {
+       init_irq_scs();
        init_irq_stacks();
        irqchip_init();
        if (!handle_arch_irq)
index 059c5e216ae75958750474cc4e49ecc886213b73..de0a4b35d01efcc6b904fadb29eb8ae25da3df7c 100644 (file)
@@ -17,27 +17,17 @@ SYM_CODE_START(riscv_kexec_relocate)
         * s1: (const) Phys address to jump to after relocation
         * s2: (const) Phys address of the FDT image
         * s3: (const) The hartid of the current hart
-        * s4: Pointer to the destination address for the relocation
-        * s5: (const) Number of words per page
-        * s6: (const) 1, used for subtraction
-        * s7: (const) kernel_map.va_pa_offset, used when switching MMU off
-        * s8: (const) Physical address of the main loop
-        * s9: (debug) indirection page counter
-        * s10: (debug) entry counter
-        * s11: (debug) copied words counter
+        * s4: (const) kernel_map.va_pa_offset, used when switching MMU off
+        * s5: Pointer to the destination address for the relocation
+        * s6: (const) Physical address of the main loop
         */
        mv      s0, a0
        mv      s1, a1
        mv      s2, a2
        mv      s3, a3
-       mv      s4, zero
-       li      s5, (PAGE_SIZE / RISCV_SZPTR)
-       li      s6, 1
-       mv      s7, a4
-       mv      s8, zero
-       mv      s9, zero
-       mv      s10, zero
-       mv      s11, zero
+       mv      s4, a4
+       mv      s5, zero
+       mv      s6, zero
 
        /* Disable / cleanup interrupts */
        csrw    CSR_SIE, zero
@@ -52,21 +42,27 @@ SYM_CODE_START(riscv_kexec_relocate)
         * the start of the loop below so that we jump there in
         * any case.
         */
-       la      s8, 1f
-       sub     s8, s8, s7
-       csrw    CSR_STVEC, s8
+       la      s6, 1f
+       sub     s6, s6, s4
+       csrw    CSR_STVEC, s6
+
+       /*
+        * With C-extension, here we get 42 Bytes and the next
+        * .align directive would pad zeros here up to 44 Bytes.
+        * So manually put a nop here to avoid zeros padding.
+       */
+       nop
 
        /* Process entries in a loop */
 .align 2
 1:
-       addi    s10, s10, 1
        REG_L   t0, 0(s0)               /* t0 = *image->entry */
        addi    s0, s0, RISCV_SZPTR     /* image->entry++ */
 
        /* IND_DESTINATION entry ? -> save destination address */
        andi    t1, t0, 0x1
        beqz    t1, 2f
-       andi    s4, t0, ~0x1
+       andi    s5, t0, ~0x1
        j       1b
 
 2:
@@ -74,9 +70,8 @@ SYM_CODE_START(riscv_kexec_relocate)
        andi    t1, t0, 0x2
        beqz    t1, 2f
        andi    s0, t0, ~0x2
-       addi    s9, s9, 1
        csrw    CSR_SATP, zero
-       jalr    zero, s8, 0
+       jr      s6
 
 2:
        /* IND_DONE entry ? -> jump to done label */
@@ -92,14 +87,13 @@ SYM_CODE_START(riscv_kexec_relocate)
        andi    t1, t0, 0x8
        beqz    t1, 1b          /* Unknown entry type, ignore it */
        andi    t0, t0, ~0x8
-       mv      t3, s5          /* i = num words per page */
+       li      t3, (PAGE_SIZE / RISCV_SZPTR)   /* i = num words per page */
 3:     /* copy loop */
        REG_L   t1, (t0)        /* t1 = *src_ptr */
-       REG_S   t1, (s4)        /* *dst_ptr = *src_ptr */
+       REG_S   t1, (s5)        /* *dst_ptr = *src_ptr */
        addi    t0, t0, RISCV_SZPTR /* stc_ptr++ */
-       addi    s4, s4, RISCV_SZPTR /* dst_ptr++ */
-       sub     t3, t3, s6      /* i-- */
-       addi    s11, s11, 1     /* c++ */
+       addi    s5, s5, RISCV_SZPTR /* dst_ptr++ */
+       addi    t3, t3, -0x1    /* i-- */
        beqz    t3, 1b          /* copy done ? */
        j       3b
 
@@ -146,7 +140,7 @@ SYM_CODE_START(riscv_kexec_relocate)
         */
        fence.i
 
-       jalr    zero, a2, 0
+       jr      a2
 
 SYM_CODE_END(riscv_kexec_relocate)
 riscv_kexec_relocate_end:
index 669b8697aa38a5ed792ead53d73ce3057c62ab51..58dd96a2a15340ee83c473436a1b2cf25d407c1f 100644 (file)
@@ -82,7 +82,7 @@
        .endm
 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
 
-ENTRY(ftrace_caller)
+SYM_FUNC_START(ftrace_caller)
        SAVE_ABI
 
        addi    a0, t0, -FENTRY_RA_OFFSET
@@ -91,8 +91,7 @@ ENTRY(ftrace_caller)
        mv      a1, ra
        mv      a3, sp
 
-ftrace_call:
-       .global ftrace_call
+SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
        call    ftrace_stub
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -102,16 +101,15 @@ ftrace_call:
 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
        mv      a2, s0
 #endif
-ftrace_graph_call:
-       .global ftrace_graph_call
+SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL)
        call    ftrace_stub
 #endif
        RESTORE_ABI
        jr t0
-ENDPROC(ftrace_caller)
+SYM_FUNC_END(ftrace_caller)
 
 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
-ENTRY(ftrace_regs_caller)
+SYM_FUNC_START(ftrace_regs_caller)
        SAVE_ALL
 
        addi    a0, t0, -FENTRY_RA_OFFSET
@@ -120,8 +118,7 @@ ENTRY(ftrace_regs_caller)
        mv      a1, ra
        mv      a3, sp
 
-ftrace_regs_call:
-       .global ftrace_regs_call
+SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
        call    ftrace_stub
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -131,12 +128,11 @@ ftrace_regs_call:
 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
        mv      a2, s0
 #endif
-ftrace_graph_regs_call:
-       .global ftrace_graph_regs_call
+SYM_INNER_LABEL(ftrace_graph_regs_call, SYM_L_GLOBAL)
        call    ftrace_stub
 #endif
 
        RESTORE_ALL
        jr t0
-ENDPROC(ftrace_regs_caller)
+SYM_FUNC_END(ftrace_regs_caller)
 #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
index 8818a8fa9ff3af7b3427e2456f1650c5c6cb7168..b4dd9ed6849e30f13922a5ab4e398f87de984e9b 100644 (file)
@@ -61,7 +61,7 @@ SYM_TYPED_FUNC_START(ftrace_stub_graph)
        ret
 SYM_FUNC_END(ftrace_stub_graph)
 
-ENTRY(return_to_handler)
+SYM_FUNC_START(return_to_handler)
 /*
  * On implementing the frame point test, the ideal way is to compare the
  * s0 (frame pointer, if enabled) on entry and the sp (stack pointer) on return.
@@ -76,25 +76,25 @@ ENTRY(return_to_handler)
        mv      a2, a0
        RESTORE_RET_ABI_STATE
        jalr    a2
-ENDPROC(return_to_handler)
+SYM_FUNC_END(return_to_handler)
 #endif
 
 #ifndef CONFIG_DYNAMIC_FTRACE
-ENTRY(MCOUNT_NAME)
+SYM_FUNC_START(MCOUNT_NAME)
        la      t4, ftrace_stub
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        la      t0, ftrace_graph_return
        REG_L   t1, 0(t0)
-       bne     t1, t4, do_ftrace_graph_caller
+       bne     t1, t4, .Ldo_ftrace_graph_caller
 
        la      t3, ftrace_graph_entry
        REG_L   t2, 0(t3)
        la      t6, ftrace_graph_entry_stub
-       bne     t2, t6, do_ftrace_graph_caller
+       bne     t2, t6, .Ldo_ftrace_graph_caller
 #endif
        la      t3, ftrace_trace_function
        REG_L   t5, 0(t3)
-       bne     t5, t4, do_trace
+       bne     t5, t4, .Ldo_trace
        ret
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -102,7 +102,7 @@ ENTRY(MCOUNT_NAME)
  * A pseudo representation for the function graph tracer:
  * prepare_to_return(&ra_to_caller_of_caller, ra_to_caller)
  */
-do_ftrace_graph_caller:
+.Ldo_ftrace_graph_caller:
        addi    a0, s0, -SZREG
        mv      a1, ra
 #ifdef HAVE_FUNCTION_GRAPH_FP_TEST
@@ -118,7 +118,7 @@ do_ftrace_graph_caller:
  * A pseudo representation for the function tracer:
  * (*ftrace_trace_function)(ra_to_caller, ra_to_caller_of_caller)
  */
-do_trace:
+.Ldo_trace:
        REG_L   a1, -SZREG(s0)
        mv      a0, ra
 
@@ -126,6 +126,6 @@ do_trace:
        jalr    t5
        RESTORE_ABI_STATE
        ret
-ENDPROC(MCOUNT_NAME)
+SYM_FUNC_END(MCOUNT_NAME)
 #endif
 EXPORT_SYMBOL(MCOUNT_NAME)
index 7c651d55fcbd2ff402f18d3b7c8ebc470c492bfd..56a8c78e9e215eab146fac7ae9b645723f75a063 100644 (file)
@@ -7,6 +7,9 @@
 #include <linux/elf.h>
 #include <linux/err.h>
 #include <linux/errno.h>
+#include <linux/hashtable.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
 #include <linux/moduleloader.h>
 #include <linux/vmalloc.h>
 #include <linux/sizes.h>
 #include <asm/alternative.h>
 #include <asm/sections.h>
 
+struct used_bucket {
+       struct list_head head;
+       struct hlist_head *bucket;
+};
+
+struct relocation_head {
+       struct hlist_node node;
+       struct list_head *rel_entry;
+       void *location;
+};
+
+struct relocation_entry {
+       struct list_head head;
+       Elf_Addr value;
+       unsigned int type;
+};
+
+struct relocation_handlers {
+       int (*reloc_handler)(struct module *me, void *location, Elf_Addr v);
+       int (*accumulate_handler)(struct module *me, void *location,
+                                 long buffer);
+};
+
+unsigned int initialize_relocation_hashtable(unsigned int num_relocations);
+void process_accumulated_relocations(struct module *me);
+int add_relocation_to_accumulate(struct module *me, int type, void *location,
+                                unsigned int hashtable_bits, Elf_Addr v);
+
+struct hlist_head *relocation_hashtable;
+
+struct list_head used_buckets_list;
+
 /*
  * The auipc+jalr instruction pair can reach any PC-relative offset
  * in the range [-2^31 - 2^11, 2^31 - 2^11)
@@ -27,68 +62,90 @@ static bool riscv_insn_valid_32bit_offset(ptrdiff_t val)
 #endif
 }
 
-static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
+static int riscv_insn_rmw(void *location, u32 keep, u32 set)
+{
+       u16 *parcel = location;
+       u32 insn = (u32)le16_to_cpu(parcel[0]) | (u32)le16_to_cpu(parcel[1]) << 16;
+
+       insn &= keep;
+       insn |= set;
+
+       parcel[0] = cpu_to_le16(insn);
+       parcel[1] = cpu_to_le16(insn >> 16);
+       return 0;
+}
+
+static int riscv_insn_rvc_rmw(void *location, u16 keep, u16 set)
+{
+       u16 *parcel = location;
+       u16 insn = le16_to_cpu(*parcel);
+
+       insn &= keep;
+       insn |= set;
+
+       *parcel = cpu_to_le16(insn);
+       return 0;
+}
+
+static int apply_r_riscv_32_rela(struct module *me, void *location, Elf_Addr v)
 {
        if (v != (u32)v) {
                pr_err("%s: value %016llx out of range for 32-bit field\n",
                       me->name, (long long)v);
                return -EINVAL;
        }
-       *location = v;
+       *(u32 *)location = v;
        return 0;
 }
 
-static int apply_r_riscv_64_rela(struct module *me, u32 *location, Elf_Addr v)
+static int apply_r_riscv_64_rela(struct module *me, void *location, Elf_Addr v)
 {
        *(u64 *)location = v;
        return 0;
 }
 
-static int apply_r_riscv_branch_rela(struct module *me, u32 *location,
+static int apply_r_riscv_branch_rela(struct module *me, void *location,
                                     Elf_Addr v)
 {
-       ptrdiff_t offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - location;
        u32 imm12 = (offset & 0x1000) << (31 - 12);
        u32 imm11 = (offset & 0x800) >> (11 - 7);
        u32 imm10_5 = (offset & 0x7e0) << (30 - 10);
        u32 imm4_1 = (offset & 0x1e) << (11 - 4);
 
-       *location = (*location & 0x1fff07f) | imm12 | imm11 | imm10_5 | imm4_1;
-       return 0;
+       return riscv_insn_rmw(location, 0x1fff07f, imm12 | imm11 | imm10_5 | imm4_1);
 }
 
-static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
+static int apply_r_riscv_jal_rela(struct module *me, void *location,
                                  Elf_Addr v)
 {
-       ptrdiff_t offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - location;
        u32 imm20 = (offset & 0x100000) << (31 - 20);
        u32 imm19_12 = (offset & 0xff000);
        u32 imm11 = (offset & 0x800) << (20 - 11);
        u32 imm10_1 = (offset & 0x7fe) << (30 - 10);
 
-       *location = (*location & 0xfff) | imm20 | imm19_12 | imm11 | imm10_1;
-       return 0;
+       return riscv_insn_rmw(location, 0xfff, imm20 | imm19_12 | imm11 | imm10_1);
 }
 
-static int apply_r_riscv_rvc_branch_rela(struct module *me, u32 *location,
+static int apply_r_riscv_rvc_branch_rela(struct module *me, void *location,
                                         Elf_Addr v)
 {
-       ptrdiff_t offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - location;
        u16 imm8 = (offset & 0x100) << (12 - 8);
        u16 imm7_6 = (offset & 0xc0) >> (6 - 5);
        u16 imm5 = (offset & 0x20) >> (5 - 2);
        u16 imm4_3 = (offset & 0x18) << (12 - 5);
        u16 imm2_1 = (offset & 0x6) << (12 - 10);
 
-       *(u16 *)location = (*(u16 *)location & 0xe383) |
-                   imm8 | imm7_6 | imm5 | imm4_3 | imm2_1;
-       return 0;
+       return riscv_insn_rvc_rmw(location, 0xe383,
+                       imm8 | imm7_6 | imm5 | imm4_3 | imm2_1);
 }
 
-static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
+static int apply_r_riscv_rvc_jump_rela(struct module *me, void *location,
                                       Elf_Addr v)
 {
-       ptrdiff_t offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - location;
        u16 imm11 = (offset & 0x800) << (12 - 11);
        u16 imm10 = (offset & 0x400) >> (10 - 8);
        u16 imm9_8 = (offset & 0x300) << (12 - 11);
@@ -98,16 +155,14 @@ static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
        u16 imm4 = (offset & 0x10) << (12 - 5);
        u16 imm3_1 = (offset & 0xe) << (12 - 10);
 
-       *(u16 *)location = (*(u16 *)location & 0xe003) |
-                   imm11 | imm10 | imm9_8 | imm7 | imm6 | imm5 | imm4 | imm3_1;
-       return 0;
+       return riscv_insn_rvc_rmw(location, 0xe003,
+                       imm11 | imm10 | imm9_8 | imm7 | imm6 | imm5 | imm4 | imm3_1);
 }
 
-static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
+static int apply_r_riscv_pcrel_hi20_rela(struct module *me, void *location,
                                         Elf_Addr v)
 {
-       ptrdiff_t offset = (void *)v - (void *)location;
-       s32 hi20;
+       ptrdiff_t offset = (void *)v - location;
 
        if (!riscv_insn_valid_32bit_offset(offset)) {
                pr_err(
@@ -116,23 +171,20 @@ static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
                return -EINVAL;
        }
 
-       hi20 = (offset + 0x800) & 0xfffff000;
-       *location = (*location & 0xfff) | hi20;
-       return 0;
+       return riscv_insn_rmw(location, 0xfff, (offset + 0x800) & 0xfffff000);
 }
 
-static int apply_r_riscv_pcrel_lo12_i_rela(struct module *me, u32 *location,
+static int apply_r_riscv_pcrel_lo12_i_rela(struct module *me, void *location,
                                           Elf_Addr v)
 {
        /*
         * v is the lo12 value to fill. It is calculated before calling this
         * handler.
         */
-       *location = (*location & 0xfffff) | ((v & 0xfff) << 20);
-       return 0;
+       return riscv_insn_rmw(location, 0xfffff, (v & 0xfff) << 20);
 }
 
-static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, u32 *location,
+static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, void *location,
                                           Elf_Addr v)
 {
        /*
@@ -142,15 +194,12 @@ static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, u32 *location,
        u32 imm11_5 = (v & 0xfe0) << (31 - 11);
        u32 imm4_0 = (v & 0x1f) << (11 - 4);
 
-       *location = (*location & 0x1fff07f) | imm11_5 | imm4_0;
-       return 0;
+       return riscv_insn_rmw(location, 0x1fff07f, imm11_5 | imm4_0);
 }
 
-static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
+static int apply_r_riscv_hi20_rela(struct module *me, void *location,
                                   Elf_Addr v)
 {
-       s32 hi20;
-
        if (IS_ENABLED(CONFIG_CMODEL_MEDLOW)) {
                pr_err(
                  "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
@@ -158,22 +207,20 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
                return -EINVAL;
        }
 
-       hi20 = ((s32)v + 0x800) & 0xfffff000;
-       *location = (*location & 0xfff) | hi20;
-       return 0;
+       return riscv_insn_rmw(location, 0xfff, ((s32)v + 0x800) & 0xfffff000);
 }
 
-static int apply_r_riscv_lo12_i_rela(struct module *me, u32 *location,
+static int apply_r_riscv_lo12_i_rela(struct module *me, void *location,
                                     Elf_Addr v)
 {
        /* Skip medlow checking because of filtering by HI20 already */
        s32 hi20 = ((s32)v + 0x800) & 0xfffff000;
        s32 lo12 = ((s32)v - hi20);
-       *location = (*location & 0xfffff) | ((lo12 & 0xfff) << 20);
-       return 0;
+
+       return riscv_insn_rmw(location, 0xfffff, (lo12 & 0xfff) << 20);
 }
 
-static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location,
+static int apply_r_riscv_lo12_s_rela(struct module *me, void *location,
                                     Elf_Addr v)
 {
        /* Skip medlow checking because of filtering by HI20 already */
@@ -181,20 +228,18 @@ static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location,
        s32 lo12 = ((s32)v - hi20);
        u32 imm11_5 = (lo12 & 0xfe0) << (31 - 11);
        u32 imm4_0 = (lo12 & 0x1f) << (11 - 4);
-       *location = (*location & 0x1fff07f) | imm11_5 | imm4_0;
-       return 0;
+
+       return riscv_insn_rmw(location, 0x1fff07f, imm11_5 | imm4_0);
 }
 
-static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
+static int apply_r_riscv_got_hi20_rela(struct module *me, void *location,
                                       Elf_Addr v)
 {
-       ptrdiff_t offset = (void *)v - (void *)location;
-       s32 hi20;
+       ptrdiff_t offset = (void *)v - location;
 
        /* Always emit the got entry */
        if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
-               offset = module_emit_got_entry(me, v);
-               offset = (void *)offset - (void *)location;
+               offset = (void *)module_emit_got_entry(me, v) - location;
        } else {
                pr_err(
                  "%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n",
@@ -202,22 +247,19 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
                return -EINVAL;
        }
 
-       hi20 = (offset + 0x800) & 0xfffff000;
-       *location = (*location & 0xfff) | hi20;
-       return 0;
+       return riscv_insn_rmw(location, 0xfff, (offset + 0x800) & 0xfffff000);
 }
 
-static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
+static int apply_r_riscv_call_plt_rela(struct module *me, void *location,
                                       Elf_Addr v)
 {
-       ptrdiff_t offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - location;
        u32 hi20, lo12;
 
        if (!riscv_insn_valid_32bit_offset(offset)) {
                /* Only emit the plt entry if offset over 32-bit range */
                if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
-                       offset = module_emit_plt_entry(me, v);
-                       offset = (void *)offset - (void *)location;
+                       offset = (void *)module_emit_plt_entry(me, v) - location;
                } else {
                        pr_err(
                          "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
@@ -228,15 +270,14 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
 
        hi20 = (offset + 0x800) & 0xfffff000;
        lo12 = (offset - hi20) & 0xfff;
-       *location = (*location & 0xfff) | hi20;
-       *(location + 1) = (*(location + 1) & 0xfffff) | (lo12 << 20);
-       return 0;
+       riscv_insn_rmw(location, 0xfff, hi20);
+       return riscv_insn_rmw(location + 4, 0xfffff, lo12 << 20);
 }
 
-static int apply_r_riscv_call_rela(struct module *me, u32 *location,
+static int apply_r_riscv_call_rela(struct module *me, void *location,
                                   Elf_Addr v)
 {
-       ptrdiff_t offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - location;
        u32 hi20, lo12;
 
        if (!riscv_insn_valid_32bit_offset(offset)) {
@@ -248,18 +289,17 @@ static int apply_r_riscv_call_rela(struct module *me, u32 *location,
 
        hi20 = (offset + 0x800) & 0xfffff000;
        lo12 = (offset - hi20) & 0xfff;
-       *location = (*location & 0xfff) | hi20;
-       *(location + 1) = (*(location + 1) & 0xfffff) | (lo12 << 20);
-       return 0;
+       riscv_insn_rmw(location, 0xfff, hi20);
+       return riscv_insn_rmw(location + 4, 0xfffff, lo12 << 20);
 }
 
-static int apply_r_riscv_relax_rela(struct module *me, u32 *location,
+static int apply_r_riscv_relax_rela(struct module *me, void *location,
                                    Elf_Addr v)
 {
        return 0;
 }
 
-static int apply_r_riscv_align_rela(struct module *me, u32 *location,
+static int apply_r_riscv_align_rela(struct module *me, void *location,
                                    Elf_Addr v)
 {
        pr_err(
@@ -268,91 +308,446 @@ static int apply_r_riscv_align_rela(struct module *me, u32 *location,
        return -EINVAL;
 }
 
-static int apply_r_riscv_add16_rela(struct module *me, u32 *location,
+static int apply_r_riscv_add8_rela(struct module *me, void *location, Elf_Addr v)
+{
+       *(u8 *)location += (u8)v;
+       return 0;
+}
+
+static int apply_r_riscv_add16_rela(struct module *me, void *location,
                                    Elf_Addr v)
 {
        *(u16 *)location += (u16)v;
        return 0;
 }
 
-static int apply_r_riscv_add32_rela(struct module *me, u32 *location,
+static int apply_r_riscv_add32_rela(struct module *me, void *location,
                                    Elf_Addr v)
 {
        *(u32 *)location += (u32)v;
        return 0;
 }
 
-static int apply_r_riscv_add64_rela(struct module *me, u32 *location,
+static int apply_r_riscv_add64_rela(struct module *me, void *location,
                                    Elf_Addr v)
 {
        *(u64 *)location += (u64)v;
        return 0;
 }
 
-static int apply_r_riscv_sub16_rela(struct module *me, u32 *location,
+static int apply_r_riscv_sub8_rela(struct module *me, void *location, Elf_Addr v)
+{
+       *(u8 *)location -= (u8)v;
+       return 0;
+}
+
+static int apply_r_riscv_sub16_rela(struct module *me, void *location,
                                    Elf_Addr v)
 {
        *(u16 *)location -= (u16)v;
        return 0;
 }
 
-static int apply_r_riscv_sub32_rela(struct module *me, u32 *location,
+static int apply_r_riscv_sub32_rela(struct module *me, void *location,
                                    Elf_Addr v)
 {
        *(u32 *)location -= (u32)v;
        return 0;
 }
 
-static int apply_r_riscv_sub64_rela(struct module *me, u32 *location,
+static int apply_r_riscv_sub64_rela(struct module *me, void *location,
                                    Elf_Addr v)
 {
        *(u64 *)location -= (u64)v;
        return 0;
 }
 
-static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
-                               Elf_Addr v) = {
-       [R_RISCV_32]                    = apply_r_riscv_32_rela,
-       [R_RISCV_64]                    = apply_r_riscv_64_rela,
-       [R_RISCV_BRANCH]                = apply_r_riscv_branch_rela,
-       [R_RISCV_JAL]                   = apply_r_riscv_jal_rela,
-       [R_RISCV_RVC_BRANCH]            = apply_r_riscv_rvc_branch_rela,
-       [R_RISCV_RVC_JUMP]              = apply_r_riscv_rvc_jump_rela,
-       [R_RISCV_PCREL_HI20]            = apply_r_riscv_pcrel_hi20_rela,
-       [R_RISCV_PCREL_LO12_I]          = apply_r_riscv_pcrel_lo12_i_rela,
-       [R_RISCV_PCREL_LO12_S]          = apply_r_riscv_pcrel_lo12_s_rela,
-       [R_RISCV_HI20]                  = apply_r_riscv_hi20_rela,
-       [R_RISCV_LO12_I]                = apply_r_riscv_lo12_i_rela,
-       [R_RISCV_LO12_S]                = apply_r_riscv_lo12_s_rela,
-       [R_RISCV_GOT_HI20]              = apply_r_riscv_got_hi20_rela,
-       [R_RISCV_CALL_PLT]              = apply_r_riscv_call_plt_rela,
-       [R_RISCV_CALL]                  = apply_r_riscv_call_rela,
-       [R_RISCV_RELAX]                 = apply_r_riscv_relax_rela,
-       [R_RISCV_ALIGN]                 = apply_r_riscv_align_rela,
-       [R_RISCV_ADD16]                 = apply_r_riscv_add16_rela,
-       [R_RISCV_ADD32]                 = apply_r_riscv_add32_rela,
-       [R_RISCV_ADD64]                 = apply_r_riscv_add64_rela,
-       [R_RISCV_SUB16]                 = apply_r_riscv_sub16_rela,
-       [R_RISCV_SUB32]                 = apply_r_riscv_sub32_rela,
-       [R_RISCV_SUB64]                 = apply_r_riscv_sub64_rela,
+static int dynamic_linking_not_supported(struct module *me, void *location,
+                                        Elf_Addr v)
+{
+       pr_err("%s: Dynamic linking not supported in kernel modules PC = %p\n",
+              me->name, location);
+       return -EINVAL;
+}
+
+static int tls_not_supported(struct module *me, void *location, Elf_Addr v)
+{
+       pr_err("%s: Thread local storage not supported in kernel modules PC = %p\n",
+              me->name, location);
+       return -EINVAL;
+}
+
+static int apply_r_riscv_sub6_rela(struct module *me, void *location, Elf_Addr v)
+{
+       u8 *byte = location;
+       u8 value = v;
+
+       *byte = (*byte - (value & 0x3f)) & 0x3f;
+       return 0;
+}
+
+static int apply_r_riscv_set6_rela(struct module *me, void *location, Elf_Addr v)
+{
+       u8 *byte = location;
+       u8 value = v;
+
+       *byte = (*byte & 0xc0) | (value & 0x3f);
+       return 0;
+}
+
+static int apply_r_riscv_set8_rela(struct module *me, void *location, Elf_Addr v)
+{
+       *(u8 *)location = (u8)v;
+       return 0;
+}
+
+static int apply_r_riscv_set16_rela(struct module *me, void *location,
+                                   Elf_Addr v)
+{
+       *(u16 *)location = (u16)v;
+       return 0;
+}
+
+static int apply_r_riscv_set32_rela(struct module *me, void *location,
+                                   Elf_Addr v)
+{
+       *(u32 *)location = (u32)v;
+       return 0;
+}
+
+static int apply_r_riscv_32_pcrel_rela(struct module *me, void *location,
+                                      Elf_Addr v)
+{
+       *(u32 *)location = v - (uintptr_t)location;
+       return 0;
+}
+
+static int apply_r_riscv_plt32_rela(struct module *me, void *location,
+                                   Elf_Addr v)
+{
+       ptrdiff_t offset = (void *)v - location;
+
+       if (!riscv_insn_valid_32bit_offset(offset)) {
+               /* Only emit the plt entry if offset over 32-bit range */
+               if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
+                       offset = (void *)module_emit_plt_entry(me, v) - location;
+               } else {
+                       pr_err("%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
+                              me->name, (long long)v, location);
+                       return -EINVAL;
+               }
+       }
+
+       *(u32 *)location = (u32)offset;
+       return 0;
+}
+
+static int apply_r_riscv_set_uleb128(struct module *me, void *location, Elf_Addr v)
+{
+       *(long *)location = v;
+       return 0;
+}
+
+static int apply_r_riscv_sub_uleb128(struct module *me, void *location, Elf_Addr v)
+{
+       *(long *)location -= v;
+       return 0;
+}
+
+static int apply_6_bit_accumulation(struct module *me, void *location, long buffer)
+{
+       u8 *byte = location;
+       u8 value = buffer;
+
+       if (buffer > 0x3f) {
+               pr_err("%s: value %ld out of range for 6-bit relocation.\n",
+                      me->name, buffer);
+               return -EINVAL;
+       }
+
+       *byte = (*byte & 0xc0) | (value & 0x3f);
+       return 0;
+}
+
+static int apply_8_bit_accumulation(struct module *me, void *location, long buffer)
+{
+       if (buffer > U8_MAX) {
+               pr_err("%s: value %ld out of range for 8-bit relocation.\n",
+                      me->name, buffer);
+               return -EINVAL;
+       }
+       *(u8 *)location = (u8)buffer;
+       return 0;
+}
+
+static int apply_16_bit_accumulation(struct module *me, void *location, long buffer)
+{
+       if (buffer > U16_MAX) {
+               pr_err("%s: value %ld out of range for 16-bit relocation.\n",
+                      me->name, buffer);
+               return -EINVAL;
+       }
+       *(u16 *)location = (u16)buffer;
+       return 0;
+}
+
+static int apply_32_bit_accumulation(struct module *me, void *location, long buffer)
+{
+       if (buffer > U32_MAX) {
+               pr_err("%s: value %ld out of range for 32-bit relocation.\n",
+                      me->name, buffer);
+               return -EINVAL;
+       }
+       *(u32 *)location = (u32)buffer;
+       return 0;
+}
+
+static int apply_64_bit_accumulation(struct module *me, void *location, long buffer)
+{
+       *(u64 *)location = (u64)buffer;
+       return 0;
+}
+
+static int apply_uleb128_accumulation(struct module *me, void *location, long buffer)
+{
+       /*
+        * ULEB128 is a variable length encoding. Encode the buffer into
+        * the ULEB128 data format.
+        */
+       u8 *p = location;
+
+       while (buffer != 0) {
+               u8 value = buffer & 0x7f;
+
+               buffer >>= 7;
+               value |= (!!buffer) << 7;
+
+               *p++ = value;
+       }
+       return 0;
+}
+
+/*
+ * Relocations defined in the riscv-elf-psabi-doc.
+ * This handles static linking only.
+ */
+static const struct relocation_handlers reloc_handlers[] = {
+       [R_RISCV_32]            = { .reloc_handler = apply_r_riscv_32_rela },
+       [R_RISCV_64]            = { .reloc_handler = apply_r_riscv_64_rela },
+       [R_RISCV_RELATIVE]      = { .reloc_handler = dynamic_linking_not_supported },
+       [R_RISCV_COPY]          = { .reloc_handler = dynamic_linking_not_supported },
+       [R_RISCV_JUMP_SLOT]     = { .reloc_handler = dynamic_linking_not_supported },
+       [R_RISCV_TLS_DTPMOD32]  = { .reloc_handler = dynamic_linking_not_supported },
+       [R_RISCV_TLS_DTPMOD64]  = { .reloc_handler = dynamic_linking_not_supported },
+       [R_RISCV_TLS_DTPREL32]  = { .reloc_handler = dynamic_linking_not_supported },
+       [R_RISCV_TLS_DTPREL64]  = { .reloc_handler = dynamic_linking_not_supported },
+       [R_RISCV_TLS_TPREL32]   = { .reloc_handler = dynamic_linking_not_supported },
+       [R_RISCV_TLS_TPREL64]   = { .reloc_handler = dynamic_linking_not_supported },
+       /* 12-15 undefined */
+       [R_RISCV_BRANCH]        = { .reloc_handler = apply_r_riscv_branch_rela },
+       [R_RISCV_JAL]           = { .reloc_handler = apply_r_riscv_jal_rela },
+       [R_RISCV_CALL]          = { .reloc_handler = apply_r_riscv_call_rela },
+       [R_RISCV_CALL_PLT]      = { .reloc_handler = apply_r_riscv_call_plt_rela },
+       [R_RISCV_GOT_HI20]      = { .reloc_handler = apply_r_riscv_got_hi20_rela },
+       [R_RISCV_TLS_GOT_HI20]  = { .reloc_handler = tls_not_supported },
+       [R_RISCV_TLS_GD_HI20]   = { .reloc_handler = tls_not_supported },
+       [R_RISCV_PCREL_HI20]    = { .reloc_handler = apply_r_riscv_pcrel_hi20_rela },
+       [R_RISCV_PCREL_LO12_I]  = { .reloc_handler = apply_r_riscv_pcrel_lo12_i_rela },
+       [R_RISCV_PCREL_LO12_S]  = { .reloc_handler = apply_r_riscv_pcrel_lo12_s_rela },
+       [R_RISCV_HI20]          = { .reloc_handler = apply_r_riscv_hi20_rela },
+       [R_RISCV_LO12_I]        = { .reloc_handler = apply_r_riscv_lo12_i_rela },
+       [R_RISCV_LO12_S]        = { .reloc_handler = apply_r_riscv_lo12_s_rela },
+       [R_RISCV_TPREL_HI20]    = { .reloc_handler = tls_not_supported },
+       [R_RISCV_TPREL_LO12_I]  = { .reloc_handler = tls_not_supported },
+       [R_RISCV_TPREL_LO12_S]  = { .reloc_handler = tls_not_supported },
+       [R_RISCV_TPREL_ADD]     = { .reloc_handler = tls_not_supported },
+       [R_RISCV_ADD8]          = { .reloc_handler = apply_r_riscv_add8_rela,
+                                   .accumulate_handler = apply_8_bit_accumulation },
+       [R_RISCV_ADD16]         = { .reloc_handler = apply_r_riscv_add16_rela,
+                                   .accumulate_handler = apply_16_bit_accumulation },
+       [R_RISCV_ADD32]         = { .reloc_handler = apply_r_riscv_add32_rela,
+                                   .accumulate_handler = apply_32_bit_accumulation },
+       [R_RISCV_ADD64]         = { .reloc_handler = apply_r_riscv_add64_rela,
+                                   .accumulate_handler = apply_64_bit_accumulation },
+       [R_RISCV_SUB8]          = { .reloc_handler = apply_r_riscv_sub8_rela,
+                                   .accumulate_handler = apply_8_bit_accumulation },
+       [R_RISCV_SUB16]         = { .reloc_handler = apply_r_riscv_sub16_rela,
+                                   .accumulate_handler = apply_16_bit_accumulation },
+       [R_RISCV_SUB32]         = { .reloc_handler = apply_r_riscv_sub32_rela,
+                                   .accumulate_handler = apply_32_bit_accumulation },
+       [R_RISCV_SUB64]         = { .reloc_handler = apply_r_riscv_sub64_rela,
+                                   .accumulate_handler = apply_64_bit_accumulation },
+       /* 41-42 reserved for future standard use */
+       [R_RISCV_ALIGN]         = { .reloc_handler = apply_r_riscv_align_rela },
+       [R_RISCV_RVC_BRANCH]    = { .reloc_handler = apply_r_riscv_rvc_branch_rela },
+       [R_RISCV_RVC_JUMP]      = { .reloc_handler = apply_r_riscv_rvc_jump_rela },
+       /* 46-50 reserved for future standard use */
+       [R_RISCV_RELAX]         = { .reloc_handler = apply_r_riscv_relax_rela },
+       [R_RISCV_SUB6]          = { .reloc_handler = apply_r_riscv_sub6_rela,
+                                   .accumulate_handler = apply_6_bit_accumulation },
+       [R_RISCV_SET6]          = { .reloc_handler = apply_r_riscv_set6_rela,
+                                   .accumulate_handler = apply_6_bit_accumulation },
+       [R_RISCV_SET8]          = { .reloc_handler = apply_r_riscv_set8_rela,
+                                   .accumulate_handler = apply_8_bit_accumulation },
+       [R_RISCV_SET16]         = { .reloc_handler = apply_r_riscv_set16_rela,
+                                   .accumulate_handler = apply_16_bit_accumulation },
+       [R_RISCV_SET32]         = { .reloc_handler = apply_r_riscv_set32_rela,
+                                   .accumulate_handler = apply_32_bit_accumulation },
+       [R_RISCV_32_PCREL]      = { .reloc_handler = apply_r_riscv_32_pcrel_rela },
+       [R_RISCV_IRELATIVE]     = { .reloc_handler = dynamic_linking_not_supported },
+       [R_RISCV_PLT32]         = { .reloc_handler = apply_r_riscv_plt32_rela },
+       [R_RISCV_SET_ULEB128]   = { .reloc_handler = apply_r_riscv_set_uleb128,
+                                   .accumulate_handler = apply_uleb128_accumulation },
+       [R_RISCV_SUB_ULEB128]   = { .reloc_handler = apply_r_riscv_sub_uleb128,
+                                   .accumulate_handler = apply_uleb128_accumulation },
+       /* 62-191 reserved for future standard use */
+       /* 192-255 nonstandard ABI extensions  */
 };
 
+void process_accumulated_relocations(struct module *me)
+{
+       /*
+        * Only ADD/SUB/SET/ULEB128 should end up here.
+        *
+        * Each bucket may have more than one relocation location. All
+        * relocations for a location are stored in a list in a bucket.
+        *
+        * Relocations are applied to a temp variable before being stored to the
+        * provided location to check for overflow. This also allows ULEB128 to
+        * properly decide how many entries are needed before storing to
+        * location. The final value is stored into location using the handler
+        * for the last relocation to an address.
+        *
+        * Three layers of indexing:
+        *      - Each of the buckets in use
+        *      - Groups of relocations in each bucket by location address
+        *      - Each relocation entry for a location address
+        */
+       struct used_bucket *bucket_iter;
+       struct relocation_head *rel_head_iter;
+       struct relocation_entry *rel_entry_iter;
+       int curr_type;
+       void *location;
+       long buffer;
+
+       list_for_each_entry(bucket_iter, &used_buckets_list, head) {
+               hlist_for_each_entry(rel_head_iter, bucket_iter->bucket, node) {
+                       buffer = 0;
+                       location = rel_head_iter->location;
+                       list_for_each_entry(rel_entry_iter,
+                                           rel_head_iter->rel_entry, head) {
+                               curr_type = rel_entry_iter->type;
+                               reloc_handlers[curr_type].reloc_handler(
+                                       me, &buffer, rel_entry_iter->value);
+                               kfree(rel_entry_iter);
+                       }
+                       reloc_handlers[curr_type].accumulate_handler(
+                               me, location, buffer);
+                       kfree(rel_head_iter);
+               }
+               kfree(bucket_iter);
+       }
+
+       kfree(relocation_hashtable);
+}
+
+int add_relocation_to_accumulate(struct module *me, int type, void *location,
+                                unsigned int hashtable_bits, Elf_Addr v)
+{
+       struct relocation_entry *entry;
+       struct relocation_head *rel_head;
+       struct hlist_head *current_head;
+       struct used_bucket *bucket;
+       unsigned long hash;
+
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       INIT_LIST_HEAD(&entry->head);
+       entry->type = type;
+       entry->value = v;
+
+       hash = hash_min((uintptr_t)location, hashtable_bits);
+
+       current_head = &relocation_hashtable[hash];
+
+       /* Find matching location (if any) */
+       bool found = false;
+       struct relocation_head *rel_head_iter;
+
+       hlist_for_each_entry(rel_head_iter, current_head, node) {
+               if (rel_head_iter->location == location) {
+                       found = true;
+                       rel_head = rel_head_iter;
+                       break;
+               }
+       }
+
+       if (!found) {
+               rel_head = kmalloc(sizeof(*rel_head), GFP_KERNEL);
+               rel_head->rel_entry =
+                       kmalloc(sizeof(struct list_head), GFP_KERNEL);
+               INIT_LIST_HEAD(rel_head->rel_entry);
+               rel_head->location = location;
+               INIT_HLIST_NODE(&rel_head->node);
+               if (!current_head->first) {
+                       bucket =
+                               kmalloc(sizeof(struct used_bucket), GFP_KERNEL);
+                       INIT_LIST_HEAD(&bucket->head);
+                       bucket->bucket = current_head;
+                       list_add(&bucket->head, &used_buckets_list);
+               }
+               hlist_add_head(&rel_head->node, current_head);
+       }
+
+       /* Add relocation to head of discovered rel_head */
+       list_add_tail(&entry->head, rel_head->rel_entry);
+
+       return 0;
+}
+
+unsigned int initialize_relocation_hashtable(unsigned int num_relocations)
+{
+       /* Can safely assume that bits is not greater than sizeof(long) */
+       unsigned long hashtable_size = roundup_pow_of_two(num_relocations);
+       unsigned int hashtable_bits = ilog2(hashtable_size);
+
+       /*
+        * Double size of hashtable if num_relocations * 1.25 is greater than
+        * hashtable_size.
+        */
+       int should_double_size = ((num_relocations + (num_relocations >> 2)) > (hashtable_size));
+
+       hashtable_bits += should_double_size;
+
+       hashtable_size <<= should_double_size;
+
+       relocation_hashtable = kmalloc_array(hashtable_size,
+                                            sizeof(*relocation_hashtable),
+                                            GFP_KERNEL);
+       __hash_init(relocation_hashtable, hashtable_size);
+
+       INIT_LIST_HEAD(&used_buckets_list);
+
+       return hashtable_bits;
+}
+
 int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
                       unsigned int symindex, unsigned int relsec,
                       struct module *me)
 {
        Elf_Rela *rel = (void *) sechdrs[relsec].sh_addr;
-       int (*handler)(struct module *me, u32 *location, Elf_Addr v);
+       int (*handler)(struct module *me, void *location, Elf_Addr v);
        Elf_Sym *sym;
-       u32 *location;
+       void *location;
        unsigned int i, type;
        Elf_Addr v;
        int res;
+       unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel);
+       unsigned int hashtable_bits = initialize_relocation_hashtable(num_relocations);
 
        pr_debug("Applying relocate section %u to %u\n", relsec,
               sechdrs[relsec].sh_info);
 
-       for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+       for (i = 0; i < num_relocations; i++) {
                /* This is where to make the change */
                location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
                        + rel[i].r_offset;
@@ -370,8 +765,8 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
 
                type = ELF_RISCV_R_TYPE(rel[i].r_info);
 
-               if (type < ARRAY_SIZE(reloc_handlers_rela))
-                       handler = reloc_handlers_rela[type];
+               if (type < ARRAY_SIZE(reloc_handlers))
+                       handler = reloc_handlers[type].reloc_handler;
                else
                        handler = NULL;
 
@@ -427,11 +822,16 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
                        }
                }
 
-               res = handler(me, location, v);
+               if (reloc_handlers[type].accumulate_handler)
+                       res = add_relocation_to_accumulate(me, type, location, hashtable_bits, v);
+               else
+                       res = handler(me, location, v);
                if (res)
                        return res;
        }
 
+       process_accumulated_relocations(me);
+
        return 0;
 }
 
index 21bac92a170a9bd43be973f213cb8be3329175ef..f2cd83d9b0f004e0fe6ae64db9425d343a394bf9 100644 (file)
@@ -75,7 +75,7 @@
        REG_L x31, PT_T6(sp)
        .endm
 
-ENTRY(arch_rethook_trampoline)
+SYM_CODE_START(arch_rethook_trampoline)
        addi sp, sp, -(PT_SIZE_ON_STACK)
        save_all_base_regs
 
@@ -90,4 +90,4 @@ ENTRY(arch_rethook_trampoline)
        addi sp, sp, PT_SIZE_ON_STACK
 
        ret
-ENDPROC(arch_rethook_trampoline)
+SYM_CODE_END(arch_rethook_trampoline)
index d3099d67816d054b4abcc938a7e775fbe2a79212..6c166029079c42bf71f0d0a5b0c28308027680c6 100644 (file)
@@ -24,7 +24,7 @@ static inline bool rv_insn_reg_set_val(struct pt_regs *regs, u32 index,
                                       unsigned long val)
 {
        if (index == 0)
-               return false;
+               return true;
        else if (index <= 31)
                *((unsigned long *)regs + index) = val;
        else
index 194f166b2cc40e77b3bcef5dabe6ca9c01d86bd9..4b3dc8beaf77d31d0c828a7a04cc81e2d78ef2e0 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/highmem.h>
 #include <linux/ptrace.h>
 #include <linux/uprobes.h>
+#include <asm/insn.h>
 
 #include "decode-insn.h"
 
@@ -17,6 +18,11 @@ bool is_swbp_insn(uprobe_opcode_t *insn)
 #endif
 }
 
+bool is_trap_insn(uprobe_opcode_t *insn)
+{
+       return riscv_insn_is_ebreak(*insn) || riscv_insn_is_c_ebreak(*insn);
+}
+
 unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
 {
        return instruction_pointer(regs);
index e32d737e039fd477c33861da55705b86dbe5f53f..4f21d970a1292b06be357b8b33ed541751bbb091 100644 (file)
@@ -25,6 +25,7 @@
 #include <asm/thread_info.h>
 #include <asm/cpuidle.h>
 #include <asm/vector.h>
+#include <asm/cpufeature.h>
 
 register unsigned long gp_in_global __asm__("gp");
 
@@ -41,6 +42,23 @@ void arch_cpu_idle(void)
        cpu_do_idle();
 }
 
+int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
+{
+       if (!unaligned_ctl_available())
+               return -EINVAL;
+
+       tsk->thread.align_ctl = val;
+       return 0;
+}
+
+int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
+{
+       if (!unaligned_ctl_available())
+               return -EINVAL;
+
+       return put_user(tsk->thread.align_ctl, (unsigned long __user *)adr);
+}
+
 void __show_regs(struct pt_regs *regs)
 {
        show_regs_print_info(KERN_DEFAULT);
index c672c8ba9a2a6b45450993b56c41f48e3514249b..5a62ed1da45332c85820fdfdd7e90046b1ae3380 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/reboot.h>
 #include <asm/sbi.h>
 #include <asm/smp.h>
+#include <asm/tlbflush.h>
 
 /* default SBI version is 0.1 */
 unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT;
@@ -376,32 +377,15 @@ int sbi_remote_fence_i(const struct cpumask *cpu_mask)
 }
 EXPORT_SYMBOL(sbi_remote_fence_i);
 
-/**
- * sbi_remote_sfence_vma() - Execute SFENCE.VMA instructions on given remote
- *                          harts for the specified virtual address range.
- * @cpu_mask: A cpu mask containing all the target harts.
- * @start: Start of the virtual address
- * @size: Total size of the virtual address range.
- *
- * Return: 0 on success, appropriate linux error code otherwise.
- */
-int sbi_remote_sfence_vma(const struct cpumask *cpu_mask,
-                          unsigned long start,
-                          unsigned long size)
-{
-       return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
-                           cpu_mask, start, size, 0, 0);
-}
-EXPORT_SYMBOL(sbi_remote_sfence_vma);
-
 /**
  * sbi_remote_sfence_vma_asid() - Execute SFENCE.VMA instructions on given
- * remote harts for a virtual address range belonging to a specific ASID.
+ * remote harts for a virtual address range belonging to a specific ASID or not.
  *
  * @cpu_mask: A cpu mask containing all the target harts.
  * @start: Start of the virtual address
  * @size: Total size of the virtual address range.
- * @asid: The value of address space identifier (ASID).
+ * @asid: The value of address space identifier (ASID), or FLUSH_TLB_NO_ASID
+ * for flushing all address spaces.
  *
  * Return: 0 on success, appropriate linux error code otherwise.
  */
@@ -410,8 +394,12 @@ int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
                                unsigned long size,
                                unsigned long asid)
 {
-       return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
-                           cpu_mask, start, size, asid, 0);
+       if (asid == FLUSH_TLB_NO_ASID)
+               return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
+                                   cpu_mask, start, size, 0, 0);
+       else
+               return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
+                                   cpu_mask, start, size, asid, 0);
 }
 EXPORT_SYMBOL(sbi_remote_sfence_vma_asid);
 
index 0624f44d43eca13ed1a0619fae0c4922fd52d384..535a837de55d1ba3aa8a45fe4123404ce1a9430f 100644 (file)
@@ -25,6 +25,7 @@
 #include <asm/acpi.h>
 #include <asm/alternative.h>
 #include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
 #include <asm/cpu_ops.h>
 #include <asm/early_ioremap.h>
 #include <asm/pgtable.h>
@@ -289,10 +290,13 @@ void __init setup_arch(char **cmdline_p)
        riscv_fill_hwcap();
        init_rt_signal_env();
        apply_boot_alternatives();
+
        if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) &&
            riscv_isa_extension_available(NULL, ZICBOM))
                riscv_noncoherent_supported();
        riscv_set_dma_cache_alignment();
+
+       riscv_user_isa_enable();
 }
 
 static int __init topology_init(void)
index 21a4d0e111bc5f151f9ef4a6205e7da17a87ec10..88b6220b260879ee75ac6a6824def025b004041b 100644 (file)
@@ -384,30 +384,6 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
        sigset_t *oldset = sigmask_to_save();
        int ret;
 
-       /* Are we from a system call? */
-       if (regs->cause == EXC_SYSCALL) {
-               /* Avoid additional syscall restarting via ret_from_exception */
-               regs->cause = -1UL;
-               /* If so, check system call restarting.. */
-               switch (regs->a0) {
-               case -ERESTART_RESTARTBLOCK:
-               case -ERESTARTNOHAND:
-                       regs->a0 = -EINTR;
-                       break;
-
-               case -ERESTARTSYS:
-                       if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
-                               regs->a0 = -EINTR;
-                               break;
-                       }
-                       fallthrough;
-               case -ERESTARTNOINTR:
-                        regs->a0 = regs->orig_a0;
-                       regs->epc -= 0x4;
-                       break;
-               }
-       }
-
        rseq_signal_deliver(ksig, regs);
 
        /* Set up the stack frame */
@@ -421,35 +397,66 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
 
 void arch_do_signal_or_restart(struct pt_regs *regs)
 {
+       unsigned long continue_addr = 0, restart_addr = 0;
+       int retval = 0;
        struct ksignal ksig;
+       bool syscall = (regs->cause == EXC_SYSCALL);
 
-       if (get_signal(&ksig)) {
-               /* Actually deliver the signal */
-               handle_signal(&ksig, regs);
-               return;
-       }
+       /* If we were from a system call, check for system call restarting */
+       if (syscall) {
+               continue_addr = regs->epc;
+               restart_addr = continue_addr - 4;
+               retval = regs->a0;
 
-       /* Did we come from a system call? */
-       if (regs->cause == EXC_SYSCALL) {
                /* Avoid additional syscall restarting via ret_from_exception */
                regs->cause = -1UL;
 
-               /* Restart the system call - no handlers present */
-               switch (regs->a0) {
+               /*
+                * Prepare for system call restart. We do this here so that a
+                * debugger will see the already changed PC.
+                */
+               switch (retval) {
                case -ERESTARTNOHAND:
                case -ERESTARTSYS:
                case -ERESTARTNOINTR:
-                        regs->a0 = regs->orig_a0;
-                       regs->epc -= 0x4;
-                       break;
                case -ERESTART_RESTARTBLOCK:
-                        regs->a0 = regs->orig_a0;
-                       regs->a7 = __NR_restart_syscall;
-                       regs->epc -= 0x4;
+                       regs->a0 = regs->orig_a0;
+                       regs->epc = restart_addr;
                        break;
                }
        }
 
+       /*
+        * Get the signal to deliver. When running under ptrace, at this point
+        * the debugger may change all of our registers.
+        */
+       if (get_signal(&ksig)) {
+               /*
+                * Depending on the signal settings, we may need to revert the
+                * decision to restart the system call, but skip this if a
+                * debugger has chosen to restart at a different PC.
+                */
+               if (regs->epc == restart_addr &&
+                   (retval == -ERESTARTNOHAND ||
+                    retval == -ERESTART_RESTARTBLOCK ||
+                    (retval == -ERESTARTSYS &&
+                     !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
+                       regs->a0 = -EINTR;
+                       regs->epc = continue_addr;
+               }
+
+               /* Actually deliver the signal */
+               handle_signal(&ksig, regs);
+               return;
+       }
+
+       /*
+        * Handle restarting a different system call. As above, if a debugger
+        * has chosen to restart at a different PC, ignore the restart.
+        */
+       if (syscall && regs->epc == restart_addr && retval == -ERESTART_RESTARTBLOCK)
+               regs->a7 = __NR_restart_syscall;
+
        /*
         * If there is no signal to deliver, we just put the saved
         * sigmask back.
index 1b8da4e40a4d6e979293e4734fb7d4b2e7407723..d162bf339beb16e3e4ffcb2b7d755ded9712e895 100644 (file)
@@ -25,6 +25,8 @@
 #include <linux/of.h>
 #include <linux/sched/task_stack.h>
 #include <linux/sched/mm.h>
+
+#include <asm/cpufeature.h>
 #include <asm/cpu_ops.h>
 #include <asm/cpufeature.h>
 #include <asm/irq.h>
@@ -246,13 +248,14 @@ asmlinkage __visible void smp_callin(void)
 
        numa_add_cpu(curr_cpuid);
        set_cpu_online(curr_cpuid, 1);
-       check_unaligned_access(curr_cpuid);
 
        if (has_vector()) {
                if (riscv_v_setup_vsize())
                        elf_hwcap &= ~COMPAT_HWCAP_ISA_V;
        }
 
+       riscv_user_isa_enable();
+
        /*
         * Remote TLB flushes are ignored while the CPU is offline, so emit
         * a local TLB flush right now just in case.
index f7960c7c5f9e25081424f3edbe526896c1692303..2d54f309c14059ad901f80448df2ff257f047388 100644 (file)
@@ -16,7 +16,7 @@
        .altmacro
        .option norelax
 
-ENTRY(__cpu_suspend_enter)
+SYM_FUNC_START(__cpu_suspend_enter)
        /* Save registers (except A0 and T0-T6) */
        REG_S   ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
        REG_S   sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
@@ -57,14 +57,11 @@ ENTRY(__cpu_suspend_enter)
 
        /* Return to C code */
        ret
-END(__cpu_suspend_enter)
+SYM_FUNC_END(__cpu_suspend_enter)
 
 SYM_TYPED_FUNC_START(__cpu_resume_enter)
        /* Load the global pointer */
-       .option push
-       .option norelax
-               la gp, __global_pointer$
-       .option pop
+       load_global_pointer
 
 #ifdef CONFIG_MMU
        /* Save A0 and A1 */
index b651ec698a91b11ff8f75ab402f33a1addbc66b5..c712037dbe10ec88b9ee8f5d8559f9b73c6608fa 100644 (file)
@@ -145,26 +145,38 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
        for_each_cpu(cpu, cpus) {
                struct riscv_isainfo *isainfo = &hart_isa[cpu];
 
-               if (riscv_isa_extension_available(isainfo->isa, ZBA))
-                       pair->value |= RISCV_HWPROBE_EXT_ZBA;
-               else
-                       missing |= RISCV_HWPROBE_EXT_ZBA;
-
-               if (riscv_isa_extension_available(isainfo->isa, ZBB))
-                       pair->value |= RISCV_HWPROBE_EXT_ZBB;
-               else
-                       missing |= RISCV_HWPROBE_EXT_ZBB;
-
-               if (riscv_isa_extension_available(isainfo->isa, ZBS))
-                       pair->value |= RISCV_HWPROBE_EXT_ZBS;
-               else
-                       missing |= RISCV_HWPROBE_EXT_ZBS;
+#define EXT_KEY(ext)                                                                   \
+       do {                                                                            \
+               if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \
+                       pair->value |= RISCV_HWPROBE_EXT_##ext;                         \
+               else                                                                    \
+                       missing |= RISCV_HWPROBE_EXT_##ext;                             \
+       } while (false)
+
+               /*
+                * Only use EXT_KEY() for extensions which can be exposed to userspace,
+                * regardless of the kernel's configuration, as no other checks, besides
+                * presence in the hart_isa bitmap, are made.
+                */
+               EXT_KEY(ZBA);
+               EXT_KEY(ZBB);
+               EXT_KEY(ZBS);
+               EXT_KEY(ZICBOZ);
+#undef EXT_KEY
        }
 
        /* Now turn off reporting features if any CPU is missing it. */
        pair->value &= ~missing;
 }
 
+static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
+{
+       struct riscv_hwprobe pair;
+
+       hwprobe_isa_ext0(&pair, cpus);
+       return (pair.value & ext);
+}
+
 static u64 hwprobe_misaligned(const struct cpumask *cpus)
 {
        int cpu;
@@ -215,6 +227,12 @@ static void hwprobe_one_pair(struct riscv_hwprobe *pair,
                pair->value = hwprobe_misaligned(cpus);
                break;
 
+       case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
+               pair->value = 0;
+               if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
+                       pair->value = riscv_cboz_block_size;
+               break;
+
        /*
         * For forward compatibility, unknown keys don't fail the whole
         * call, but get their element key set to -1 and value set to 0
diff --git a/arch/riscv/kernel/tests/Kconfig.debug b/arch/riscv/kernel/tests/Kconfig.debug
new file mode 100644 (file)
index 0000000..5dba64e
--- /dev/null
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "arch/riscv/kernel Testing and Coverage"
+
+config AS_HAS_ULEB128
+       def_bool $(as-instr,.reloc label$(comma) R_RISCV_SET_ULEB128$(comma) 127\n.reloc label$(comma) R_RISCV_SUB_ULEB128$(comma) 127\nlabel:\n.word 0)
+
+menuconfig RUNTIME_KERNEL_TESTING_MENU
+       bool "arch/riscv/kernel runtime Testing"
+       def_bool y
+       help
+         Enable riscv kernel runtime testing.
+
+if RUNTIME_KERNEL_TESTING_MENU
+
+config RISCV_MODULE_LINKING_KUNIT
+       bool "KUnit test riscv module linking at runtime" if !KUNIT_ALL_TESTS
+       depends on KUNIT
+       default KUNIT_ALL_TESTS
+       help
+         Enable this option to test riscv module linking at boot. This will
+        enable a module called "test_module_linking".
+
+         KUnit tests run during boot and output the results to the debug log
+         in TAP format (http://testanything.org/). Only useful for kernel devs
+         running the KUnit test harness, and not intended for inclusion into a
+         production build.
+
+         For more information on KUnit and unit tests in general please refer
+         to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+         If unsure, say N.
+
+endif # RUNTIME_TESTING_MENU
+
+endmenu # "arch/riscv/kernel runtime Testing"
diff --git a/arch/riscv/kernel/tests/Makefile b/arch/riscv/kernel/tests/Makefile
new file mode 100644 (file)
index 0000000..7d6c76c
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_RISCV_MODULE_LINKING_KUNIT)       += module_test/
diff --git a/arch/riscv/kernel/tests/module_test/Makefile b/arch/riscv/kernel/tests/module_test/Makefile
new file mode 100644 (file)
index 0000000..d7a6fd8
--- /dev/null
@@ -0,0 +1,15 @@
+obj-m += test_module_linking.o
+
+test_sub := test_sub6.o test_sub8.o test_sub16.o test_sub32.o test_sub64.o
+
+test_set := test_set6.o test_set8.o test_set16.o test_set32.o
+
+test_module_linking-objs += $(test_sub)
+
+test_module_linking-objs += $(test_set)
+
+ifeq ($(CONFIG_AS_HAS_ULEB128),y)
+test_module_linking-objs += test_uleb128.o
+endif
+
+test_module_linking-objs += test_module_linking_main.o
diff --git a/arch/riscv/kernel/tests/module_test/test_module_linking_main.c b/arch/riscv/kernel/tests/module_test/test_module_linking_main.c
new file mode 100644 (file)
index 0000000..8df5fa5
--- /dev/null
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <kunit/test.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Test module linking");
+
+extern int test_set32(void);
+extern int test_set16(void);
+extern int test_set8(void);
+extern int test_set6(void);
+extern long test_sub64(void);
+extern int test_sub32(void);
+extern int test_sub16(void);
+extern int test_sub8(void);
+extern int test_sub6(void);
+
+#ifdef CONFIG_AS_HAS_ULEB128
+extern int test_uleb_basic(void);
+extern int test_uleb_large(void);
+#endif
+
+#define CHECK_EQ(lhs, rhs) KUNIT_ASSERT_EQ(test, lhs, rhs)
+
+void run_test_set(struct kunit *test);
+void run_test_sub(struct kunit *test);
+void run_test_uleb(struct kunit *test);
+
+void run_test_set(struct kunit *test)
+{
+       int val32 = test_set32();
+       int val16 = test_set16();
+       int val8 = test_set8();
+       int val6 = test_set6();
+
+       CHECK_EQ(val32, 0);
+       CHECK_EQ(val16, 0);
+       CHECK_EQ(val8, 0);
+       CHECK_EQ(val6, 0);
+}
+
+void run_test_sub(struct kunit *test)
+{
+       int val64 = test_sub64();
+       int val32 = test_sub32();
+       int val16 = test_sub16();
+       int val8 = test_sub8();
+       int val6 = test_sub6();
+
+       CHECK_EQ(val64, 0);
+       CHECK_EQ(val32, 0);
+       CHECK_EQ(val16, 0);
+       CHECK_EQ(val8, 0);
+       CHECK_EQ(val6, 0);
+}
+
+#ifdef CONFIG_AS_HAS_ULEB128
+void run_test_uleb(struct kunit *test)
+{
+       int val_uleb = test_uleb_basic();
+       int val_uleb2 = test_uleb_large();
+
+       CHECK_EQ(val_uleb, 0);
+       CHECK_EQ(val_uleb2, 0);
+}
+#endif
+
+static struct kunit_case __refdata riscv_module_linking_test_cases[] = {
+       KUNIT_CASE(run_test_set),
+       KUNIT_CASE(run_test_sub),
+#ifdef CONFIG_AS_HAS_ULEB128
+       KUNIT_CASE(run_test_uleb),
+#endif
+       {}
+};
+
+static struct kunit_suite riscv_module_linking_test_suite = {
+       .name = "riscv_checksum",
+       .test_cases = riscv_module_linking_test_cases,
+};
+
+kunit_test_suites(&riscv_module_linking_test_suite);
diff --git a/arch/riscv/kernel/tests/module_test/test_set16.S b/arch/riscv/kernel/tests/module_test/test_set16.S
new file mode 100644 (file)
index 0000000..2be0e44
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_set16
+test_set16:
+       lw      a0, set16
+       la      t0, set16
+#ifdef CONFIG_32BIT
+       slli    t0, t0, 16
+       srli    t0, t0, 16
+#else
+       slli    t0, t0, 48
+       srli    t0, t0, 48
+#endif
+       sub     a0, a0, t0
+       ret
+.data
+set16:
+       .reloc set16, R_RISCV_SET16, set16
+       .word 0
diff --git a/arch/riscv/kernel/tests/module_test/test_set32.S b/arch/riscv/kernel/tests/module_test/test_set32.S
new file mode 100644 (file)
index 0000000..de04445
--- /dev/null
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_set32
+test_set32:
+       lw      a0, set32
+       la      t0, set32
+#ifndef CONFIG_32BIT
+       slli    t0, t0, 32
+       srli    t0, t0, 32
+#endif
+       sub     a0, a0, t0
+       ret
+.data
+set32:
+       .reloc set32, R_RISCV_SET32, set32
+       .word 0
diff --git a/arch/riscv/kernel/tests/module_test/test_set6.S b/arch/riscv/kernel/tests/module_test/test_set6.S
new file mode 100644 (file)
index 0000000..c39ce4c
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_set6
+test_set6:
+       lw      a0, set6
+       la      t0, set6
+#ifdef CONFIG_32BIT
+       slli    t0, t0, 26
+       srli    t0, t0, 26
+#else
+       slli    t0, t0, 58
+       srli    t0, t0, 58
+#endif
+       sub     a0, a0, t0
+       ret
+.data
+set6:
+       .reloc set6, R_RISCV_SET6, set6
+       .word 0
diff --git a/arch/riscv/kernel/tests/module_test/test_set8.S b/arch/riscv/kernel/tests/module_test/test_set8.S
new file mode 100644 (file)
index 0000000..a656173
--- /dev/null
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_set8
+test_set8:
+       lw      a0, set8
+       la      t0, set8
+#ifdef CONFIG_32BIT
+       slli    t0, t0, 24
+       srli    t0, t0, 24
+#else
+       slli    t0, t0, 56
+       srli    t0, t0, 56
+#endif
+       sub     a0, a0, t0
+       ret
+.data
+set8:
+       .reloc set8, R_RISCV_SET8, set8
+       .word 0
diff --git a/arch/riscv/kernel/tests/module_test/test_sub16.S b/arch/riscv/kernel/tests/module_test/test_sub16.S
new file mode 100644 (file)
index 0000000..80f731d
--- /dev/null
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_sub16
+test_sub16:
+       lh      a0, sub16
+       addi    a0, a0, -32
+       ret
+first:
+       .space 32
+second:
+
+.data
+sub16:
+       .reloc          sub16, R_RISCV_ADD16, second
+       .reloc          sub16, R_RISCV_SUB16, first
+       .half           0
diff --git a/arch/riscv/kernel/tests/module_test/test_sub32.S b/arch/riscv/kernel/tests/module_test/test_sub32.S
new file mode 100644 (file)
index 0000000..a341686
--- /dev/null
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_sub32
+test_sub32:
+       lw      a0, sub32
+       addi    a0, a0, -32
+       ret
+first:
+       .space 32
+second:
+
+.data
+sub32:
+       .reloc          sub32, R_RISCV_ADD32, second
+       .reloc          sub32, R_RISCV_SUB32, first
+       .word           0
diff --git a/arch/riscv/kernel/tests/module_test/test_sub6.S b/arch/riscv/kernel/tests/module_test/test_sub6.S
new file mode 100644 (file)
index 0000000..e8b61c1
--- /dev/null
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_sub6
+test_sub6:
+       lb      a0, sub6
+       addi    a0, a0, -32
+       ret
+first:
+       .space 32
+second:
+
+.data
+sub6:
+       .reloc          sub6, R_RISCV_SET6, second
+       .reloc          sub6, R_RISCV_SUB6, first
+       .byte           0
diff --git a/arch/riscv/kernel/tests/module_test/test_sub64.S b/arch/riscv/kernel/tests/module_test/test_sub64.S
new file mode 100644 (file)
index 0000000..a59e8af
--- /dev/null
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_sub64
+test_sub64:
+#ifdef CONFIG_32BIT
+       lw      a0, sub64
+#else
+       ld      a0, sub64
+#endif
+       addi    a0, a0, -32
+       ret
+first:
+       .space 32
+second:
+
+.data
+sub64:
+       .reloc          sub64, R_RISCV_ADD64, second
+       .reloc          sub64, R_RISCV_SUB64, first
+       .word           0
+       .word           0
diff --git a/arch/riscv/kernel/tests/module_test/test_sub8.S b/arch/riscv/kernel/tests/module_test/test_sub8.S
new file mode 100644 (file)
index 0000000..ac5d0ec
--- /dev/null
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_sub8
+test_sub8:
+       lb      a0, sub8
+       addi    a0, a0, -32
+       ret
+first:
+       .space 32
+second:
+
+.data
+sub8:
+       .reloc          sub8, R_RISCV_ADD8, second
+       .reloc          sub8, R_RISCV_SUB8, first
+       .byte           0
diff --git a/arch/riscv/kernel/tests/module_test/test_uleb128.S b/arch/riscv/kernel/tests/module_test/test_uleb128.S
new file mode 100644 (file)
index 0000000..90f2204
--- /dev/null
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_uleb_basic
+test_uleb_basic:
+       ld      a0, second
+       addi    a0, a0, -127
+       ret
+
+.global test_uleb_large
+test_uleb_large:
+       ld      a0, fourth
+       addi    a0, a0, -0x07e8
+       ret
+
+.data
+first:
+       .space 127
+second:
+       .reloc second, R_RISCV_SET_ULEB128, second
+       .reloc second, R_RISCV_SUB_ULEB128, first
+       .dword 0
+third:
+       .space 1000
+fourth:
+       .reloc fourth, R_RISCV_SET_ULEB128, fourth
+       .reloc fourth, R_RISCV_SUB_ULEB128, third
+       .dword 0
index fae8f610d867fd7f34ec19ed24e7cb3da06b968e..a1b9be3c4332d97f08b50beebfcadba5adaa02be 100644 (file)
@@ -36,7 +36,21 @@ int show_unhandled_signals = 1;
 
 static DEFINE_SPINLOCK(die_lock);
 
-static void dump_kernel_instr(const char *loglvl, struct pt_regs *regs)
+static int copy_code(struct pt_regs *regs, u16 *val, const u16 *insns)
+{
+       const void __user *uaddr = (__force const void __user *)insns;
+
+       if (!user_mode(regs))
+               return get_kernel_nofault(*val, insns);
+
+       /* The user space code from other tasks cannot be accessed. */
+       if (regs != task_pt_regs(current))
+               return -EPERM;
+
+       return copy_from_user_nofault(val, uaddr, sizeof(*val));
+}
+
+static void dump_instr(const char *loglvl, struct pt_regs *regs)
 {
        char str[sizeof("0000 ") * 12 + 2 + 1], *p = str;
        const u16 *insns = (u16 *)instruction_pointer(regs);
@@ -45,7 +59,7 @@ static void dump_kernel_instr(const char *loglvl, struct pt_regs *regs)
        int i;
 
        for (i = -10; i < 2; i++) {
-               bad = get_kernel_nofault(val, &insns[i]);
+               bad = copy_code(regs, &val, &insns[i]);
                if (!bad) {
                        p += sprintf(p, i == 0 ? "(%04hx) " : "%04hx ", val);
                } else {
@@ -74,7 +88,7 @@ void die(struct pt_regs *regs, const char *str)
        print_modules();
        if (regs) {
                show_regs(regs);
-               dump_kernel_instr(KERN_EMERG, regs);
+               dump_instr(KERN_EMERG, regs);
        }
 
        cause = regs ? regs->cause : -1;
@@ -107,6 +121,7 @@ void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
                print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
                pr_cont("\n");
                __show_regs(regs);
+               dump_instr(KERN_EMERG, regs);
        }
 
        force_sig_fault(signo, code, (void __user *)addr);
@@ -181,14 +196,6 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re
 
 DO_ERROR_INFO(do_trap_load_fault,
        SIGSEGV, SEGV_ACCERR, "load access fault");
-#ifndef CONFIG_RISCV_M_MODE
-DO_ERROR_INFO(do_trap_load_misaligned,
-       SIGBUS, BUS_ADRALN, "Oops - load address misaligned");
-DO_ERROR_INFO(do_trap_store_misaligned,
-       SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned");
-#else
-int handle_misaligned_load(struct pt_regs *regs);
-int handle_misaligned_store(struct pt_regs *regs);
 
 asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
 {
@@ -231,7 +238,6 @@ asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs
                irqentry_nmi_exit(regs, state);
        }
 }
-#endif
 DO_ERROR_INFO(do_trap_store_fault,
        SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
 DO_ERROR_INFO(do_trap_ecall_s,
@@ -360,34 +366,10 @@ static void noinstr handle_riscv_irq(struct pt_regs *regs)
 asmlinkage void noinstr do_irq(struct pt_regs *regs)
 {
        irqentry_state_t state = irqentry_enter(regs);
-#ifdef CONFIG_IRQ_STACKS
-       if (on_thread_stack()) {
-               ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id())
-                                       + IRQ_STACK_SIZE/sizeof(ulong);
-               __asm__ __volatile(
-               "addi   sp, sp, -"RISCV_SZPTR  "\n"
-               REG_S"  ra, (sp)                \n"
-               "addi   sp, sp, -"RISCV_SZPTR  "\n"
-               REG_S"  s0, (sp)                \n"
-               "addi   s0, sp, 2*"RISCV_SZPTR "\n"
-               "move   sp, %[sp]               \n"
-               "move   a0, %[regs]             \n"
-               "call   handle_riscv_irq        \n"
-               "addi   sp, s0, -2*"RISCV_SZPTR"\n"
-               REG_L"  s0, (sp)                \n"
-               "addi   sp, sp, "RISCV_SZPTR   "\n"
-               REG_L"  ra, (sp)                \n"
-               "addi   sp, sp, "RISCV_SZPTR   "\n"
-               :
-               : [sp] "r" (sp), [regs] "r" (regs)
-               : "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
-                 "t0", "t1", "t2", "t3", "t4", "t5", "t6",
-#ifndef CONFIG_FRAME_POINTER
-                 "s0",
-#endif
-                 "memory");
-       } else
-#endif
+
+       if (IS_ENABLED(CONFIG_IRQ_STACKS) && on_thread_stack())
+               call_on_irq_stack(regs, handle_riscv_irq);
+       else
                handle_riscv_irq(regs);
 
        irqentry_exit(regs, state);
@@ -410,48 +392,14 @@ int is_valid_bugaddr(unsigned long pc)
 #endif /* CONFIG_GENERIC_BUG */
 
 #ifdef CONFIG_VMAP_STACK
-/*
- * Extra stack space that allows us to provide panic messages when the kernel
- * has overflowed its stack.
- */
-static DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
+DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)],
                overflow_stack)__aligned(16);
-/*
- * A temporary stack for use by handle_kernel_stack_overflow.  This is used so
- * we can call into C code to get the per-hart overflow stack.  Usage of this
- * stack must be protected by spin_shadow_stack.
- */
-long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE/sizeof(long)] __aligned(16);
-
-/*
- * A pseudo spinlock to protect the shadow stack from being used by multiple
- * harts concurrently.  This isn't a real spinlock because the lock side must
- * be taken without a valid stack and only a single register, it's only taken
- * while in the process of panicing anyway so the performance and error
- * checking a proper spinlock gives us doesn't matter.
- */
-unsigned long spin_shadow_stack;
-
-asmlinkage unsigned long get_overflow_stack(void)
-{
-       return (unsigned long)this_cpu_ptr(overflow_stack) +
-               OVERFLOW_STACK_SIZE;
-}
 
 asmlinkage void handle_bad_stack(struct pt_regs *regs)
 {
        unsigned long tsk_stk = (unsigned long)current->stack;
        unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
 
-       /*
-        * We're done with the shadow stack by this point, as we're on the
-        * overflow stack.  Tell any other concurrent overflowing harts that
-        * they can proceed with panicing by releasing the pseudo-spinlock.
-        *
-        * This pairs with an amoswap.aq in handle_kernel_stack_overflow.
-        */
-       smp_store_release(&spin_shadow_stack, 0);
-
        console_verbose();
 
        pr_emerg("Insufficient stack space to handle exception!\n");
index 378f5b151443564020e775edfc13e6e90e557152..5eba37147caa96c077eb9ffb89233e1f679fed6d 100644 (file)
@@ -6,12 +6,16 @@
 #include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/module.h>
+#include <linux/perf_event.h>
 #include <linux/irq.h>
 #include <linux/stringify.h>
 
 #include <asm/processor.h>
 #include <asm/ptrace.h>
 #include <asm/csr.h>
+#include <asm/entry-common.h>
+#include <asm/hwprobe.h>
+#include <asm/cpufeature.h>
 
 #define INSN_MATCH_LB                  0x3
 #define INSN_MASK_LB                   0x707f
 #define PRECISION_S 0
 #define PRECISION_D 1
 
-#define DECLARE_UNPRIVILEGED_LOAD_FUNCTION(type, insn)                 \
-static inline type load_##type(const type *addr)                       \
-{                                                                      \
-       type val;                                                       \
-       asm (#insn " %0, %1"                                            \
-       : "=&r" (val) : "m" (*addr));                                   \
-       return val;                                                     \
+#ifdef CONFIG_FPU
+
+#define FP_GET_RD(insn)                (insn >> 7 & 0x1F)
+
+extern void put_f32_reg(unsigned long fp_reg, unsigned long value);
+
+static int set_f32_rd(unsigned long insn, struct pt_regs *regs,
+                     unsigned long val)
+{
+       unsigned long fp_reg = FP_GET_RD(insn);
+
+       put_f32_reg(fp_reg, val);
+       regs->status |= SR_FS_DIRTY;
+
+       return 0;
 }
 
-#define DECLARE_UNPRIVILEGED_STORE_FUNCTION(type, insn)                        \
-static inline void store_##type(type *addr, type val)                  \
-{                                                                      \
-       asm volatile (#insn " %0, %1\n"                                 \
-       : : "r" (val), "m" (*addr));                                    \
+extern void put_f64_reg(unsigned long fp_reg, unsigned long value);
+
+static int set_f64_rd(unsigned long insn, struct pt_regs *regs, u64 val)
+{
+       unsigned long fp_reg = FP_GET_RD(insn);
+       unsigned long value;
+
+#if __riscv_xlen == 32
+       value = (unsigned long) &val;
+#else
+       value = val;
+#endif
+       put_f64_reg(fp_reg, value);
+       regs->status |= SR_FS_DIRTY;
+
+       return 0;
 }
 
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u8, lbu)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u16, lhu)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s8, lb)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s16, lh)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s32, lw)
-DECLARE_UNPRIVILEGED_STORE_FUNCTION(u8, sb)
-DECLARE_UNPRIVILEGED_STORE_FUNCTION(u16, sh)
-DECLARE_UNPRIVILEGED_STORE_FUNCTION(u32, sw)
-#if defined(CONFIG_64BIT)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lwu)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u64, ld)
-DECLARE_UNPRIVILEGED_STORE_FUNCTION(u64, sd)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, ld)
+#if __riscv_xlen == 32
+extern void get_f64_reg(unsigned long fp_reg, u64 *value);
+
+static u64 get_f64_rs(unsigned long insn, u8 fp_reg_offset,
+                     struct pt_regs *regs)
+{
+       unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
+       u64 val;
+
+       get_f64_reg(fp_reg, &val);
+       regs->status |= SR_FS_DIRTY;
+
+       return val;
+}
 #else
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lw)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, lw)
 
-static inline u64 load_u64(const u64 *addr)
+extern unsigned long get_f64_reg(unsigned long fp_reg);
+
+static unsigned long get_f64_rs(unsigned long insn, u8 fp_reg_offset,
+                               struct pt_regs *regs)
 {
-       return load_u32((u32 *)addr)
-               + ((u64)load_u32((u32 *)addr + 1) << 32);
+       unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
+       unsigned long val;
+
+       val = get_f64_reg(fp_reg);
+       regs->status |= SR_FS_DIRTY;
+
+       return val;
 }
 
-static inline void store_u64(u64 *addr, u64 val)
+#endif
+
+extern unsigned long get_f32_reg(unsigned long fp_reg);
+
+static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
+                               struct pt_regs *regs)
 {
-       store_u32((u32 *)addr, val);
-       store_u32((u32 *)addr + 1, val >> 32);
+       unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
+       unsigned long val;
+
+       val = get_f32_reg(fp_reg);
+       regs->status |= SR_FS_DIRTY;
+
+       return val;
 }
+
+#else /* CONFIG_FPU */
+static void set_f32_rd(unsigned long insn, struct pt_regs *regs,
+                      unsigned long val) {}
+
+static void set_f64_rd(unsigned long insn, struct pt_regs *regs, u64 val) {}
+
+static unsigned long get_f64_rs(unsigned long insn, u8 fp_reg_offset,
+                               struct pt_regs *regs)
+{
+       return 0;
+}
+
+static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
+                               struct pt_regs *regs)
+{
+       return 0;
+}
+
 #endif
 
-static inline ulong get_insn(ulong mepc)
+#define GET_F64_RS2(insn, regs) (get_f64_rs(insn, 20, regs))
+#define GET_F64_RS2C(insn, regs) (get_f64_rs(insn, 2, regs))
+#define GET_F64_RS2S(insn, regs) (get_f64_rs(RVC_RS2S(insn), 0, regs))
+
+#define GET_F32_RS2(insn, regs) (get_f32_rs(insn, 20, regs))
+#define GET_F32_RS2C(insn, regs) (get_f32_rs(insn, 2, regs))
+#define GET_F32_RS2S(insn, regs) (get_f32_rs(RVC_RS2S(insn), 0, regs))
+
+#ifdef CONFIG_RISCV_M_MODE
+static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val)
+{
+       u8 val;
+
+       asm volatile("lbu %0, %1" : "=&r" (val) : "m" (*addr));
+       *r_val = val;
+
+       return 0;
+}
+
+static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val)
+{
+       asm volatile ("sb %0, %1\n" : : "r" (val), "m" (*addr));
+
+       return 0;
+}
+
+static inline int get_insn(struct pt_regs *regs, ulong mepc, ulong *r_insn)
 {
        register ulong __mepc asm ("a2") = mepc;
        ulong val, rvc_mask = 3, tmp;
@@ -226,23 +311,119 @@ static inline ulong get_insn(ulong mepc)
        : [addr] "r" (__mepc), [rvc_mask] "r" (rvc_mask),
          [xlen_minus_16] "i" (XLEN_MINUS_16));
 
-       return val;
+       *r_insn = val;
+
+       return 0;
+}
+#else
+static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val)
+{
+       if (user_mode(regs)) {
+               return __get_user(*r_val, addr);
+       } else {
+               *r_val = *addr;
+               return 0;
+       }
+}
+
+static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val)
+{
+       if (user_mode(regs)) {
+               return __put_user(val, addr);
+       } else {
+               *addr = val;
+               return 0;
+       }
 }
 
+#define __read_insn(regs, insn, insn_addr)             \
+({                                                     \
+       int __ret;                                      \
+                                                       \
+       if (user_mode(regs)) {                          \
+               __ret = __get_user(insn, insn_addr);    \
+       } else {                                        \
+               insn = *insn_addr;                      \
+               __ret = 0;                              \
+       }                                               \
+                                                       \
+       __ret;                                          \
+})
+
+static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn)
+{
+       ulong insn = 0;
+
+       if (epc & 0x2) {
+               ulong tmp = 0;
+               u16 __user *insn_addr = (u16 __user *)epc;
+
+               if (__read_insn(regs, insn, insn_addr))
+                       return -EFAULT;
+               /* __get_user() uses regular "lw" which sign extend the loaded
+                * value make sure to clear higher order bits in case we "or" it
+                * below with the upper 16 bits half.
+                */
+               insn &= GENMASK(15, 0);
+               if ((insn & __INSN_LENGTH_MASK) != __INSN_LENGTH_32) {
+                       *r_insn = insn;
+                       return 0;
+               }
+               insn_addr++;
+               if (__read_insn(regs, tmp, insn_addr))
+                       return -EFAULT;
+               *r_insn = (tmp << 16) | insn;
+
+               return 0;
+       } else {
+               u32 __user *insn_addr = (u32 __user *)epc;
+
+               if (__read_insn(regs, insn, insn_addr))
+                       return -EFAULT;
+               if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) {
+                       *r_insn = insn;
+                       return 0;
+               }
+               insn &= GENMASK(15, 0);
+               *r_insn = insn;
+
+               return 0;
+       }
+}
+#endif
+
 union reg_data {
        u8 data_bytes[8];
        ulong data_ulong;
        u64 data_u64;
 };
 
+static bool unaligned_ctl __read_mostly;
+
+/* sysctl hooks */
+int unaligned_enabled __read_mostly = 1;       /* Enabled by default */
+
 int handle_misaligned_load(struct pt_regs *regs)
 {
        union reg_data val;
        unsigned long epc = regs->epc;
-       unsigned long insn = get_insn(epc);
-       unsigned long addr = csr_read(mtval);
+       unsigned long insn;
+       unsigned long addr = regs->badaddr;
        int i, fp = 0, shift = 0, len = 0;
 
+       perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
+
+       *this_cpu_ptr(&misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_EMULATED;
+
+       if (!unaligned_enabled)
+               return -1;
+
+       if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS))
+               return -1;
+
+       if (get_insn(regs, epc, &insn))
+               return -1;
+
        regs->epc = 0;
 
        if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
@@ -305,13 +486,21 @@ int handle_misaligned_load(struct pt_regs *regs)
                return -1;
        }
 
+       if (!IS_ENABLED(CONFIG_FPU) && fp)
+               return -EOPNOTSUPP;
+
        val.data_u64 = 0;
-       for (i = 0; i < len; i++)
-               val.data_bytes[i] = load_u8((void *)(addr + i));
+       for (i = 0; i < len; i++) {
+               if (load_u8(regs, (void *)(addr + i), &val.data_bytes[i]))
+                       return -1;
+       }
 
-       if (fp)
-               return -1;
-       SET_RD(insn, regs, val.data_ulong << shift >> shift);
+       if (!fp)
+               SET_RD(insn, regs, val.data_ulong << shift >> shift);
+       else if (len == 8)
+               set_f64_rd(insn, regs, val.data_u64);
+       else
+               set_f32_rd(insn, regs, val.data_ulong);
 
        regs->epc = epc + INSN_LEN(insn);
 
@@ -322,9 +511,20 @@ int handle_misaligned_store(struct pt_regs *regs)
 {
        union reg_data val;
        unsigned long epc = regs->epc;
-       unsigned long insn = get_insn(epc);
-       unsigned long addr = csr_read(mtval);
-       int i, len = 0;
+       unsigned long insn;
+       unsigned long addr = regs->badaddr;
+       int i, len = 0, fp = 0;
+
+       perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
+
+       if (!unaligned_enabled)
+               return -1;
+
+       if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS))
+               return -1;
+
+       if (get_insn(regs, epc, &insn))
+               return -1;
 
        regs->epc = 0;
 
@@ -336,6 +536,14 @@ int handle_misaligned_store(struct pt_regs *regs)
        } else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
                len = 8;
 #endif
+       } else if ((insn & INSN_MASK_FSD) == INSN_MATCH_FSD) {
+               fp = 1;
+               len = 8;
+               val.data_u64 = GET_F64_RS2(insn, regs);
+       } else if ((insn & INSN_MASK_FSW) == INSN_MATCH_FSW) {
+               fp = 1;
+               len = 4;
+               val.data_ulong = GET_F32_RS2(insn, regs);
        } else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
                len = 2;
 #if defined(CONFIG_64BIT)
@@ -354,15 +562,88 @@ int handle_misaligned_store(struct pt_regs *regs)
                   ((insn >> SH_RD) & 0x1f)) {
                len = 4;
                val.data_ulong = GET_RS2C(insn, regs);
+       } else if ((insn & INSN_MASK_C_FSD) == INSN_MATCH_C_FSD) {
+               fp = 1;
+               len = 8;
+               val.data_u64 = GET_F64_RS2S(insn, regs);
+       } else if ((insn & INSN_MASK_C_FSDSP) == INSN_MATCH_C_FSDSP) {
+               fp = 1;
+               len = 8;
+               val.data_u64 = GET_F64_RS2C(insn, regs);
+#if !defined(CONFIG_64BIT)
+       } else if ((insn & INSN_MASK_C_FSW) == INSN_MATCH_C_FSW) {
+               fp = 1;
+               len = 4;
+               val.data_ulong = GET_F32_RS2S(insn, regs);
+       } else if ((insn & INSN_MASK_C_FSWSP) == INSN_MATCH_C_FSWSP) {
+               fp = 1;
+               len = 4;
+               val.data_ulong = GET_F32_RS2C(insn, regs);
+#endif
        } else {
                regs->epc = epc;
                return -1;
        }
 
-       for (i = 0; i < len; i++)
-               store_u8((void *)(addr + i), val.data_bytes[i]);
+       if (!IS_ENABLED(CONFIG_FPU) && fp)
+               return -EOPNOTSUPP;
+
+       for (i = 0; i < len; i++) {
+               if (store_u8(regs, (void *)(addr + i), val.data_bytes[i]))
+                       return -1;
+       }
 
        regs->epc = epc + INSN_LEN(insn);
 
        return 0;
 }
+
+bool check_unaligned_access_emulated(int cpu)
+{
+       long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
+       unsigned long tmp_var, tmp_val;
+       bool misaligned_emu_detected;
+
+       *mas_ptr = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
+
+       __asm__ __volatile__ (
+               "       "REG_L" %[tmp], 1(%[ptr])\n"
+               : [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory");
+
+       misaligned_emu_detected = (*mas_ptr == RISCV_HWPROBE_MISALIGNED_EMULATED);
+       /*
+        * If unaligned_ctl is already set, this means that we detected that all
+        * CPUS uses emulated misaligned access at boot time. If that changed
+        * when hotplugging the new cpu, this is something we don't handle.
+        */
+       if (unlikely(unaligned_ctl && !misaligned_emu_detected)) {
+               pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n");
+               while (true)
+                       cpu_relax();
+       }
+
+       return misaligned_emu_detected;
+}
+
+void unaligned_emulation_finish(void)
+{
+       int cpu;
+
+       /*
+        * We can only support PR_UNALIGN controls if all CPUs have misaligned
+        * accesses emulated since tasks requesting such control can run on any
+        * CPU.
+        */
+       for_each_present_cpu(cpu) {
+               if (per_cpu(misaligned_access_speed, cpu) !=
+                                       RISCV_HWPROBE_MISALIGNED_EMULATED) {
+                       return;
+               }
+       }
+       unaligned_ctl = true;
+}
+
+bool unaligned_ctl_available(void)
+{
+       return unaligned_ctl;
+}
index e8aa7c38000755d4984573467174df4b20f22dac..9b517fe1b8a8ecfddfae487dc9e829cc622334f2 100644 (file)
@@ -36,7 +36,7 @@ CPPFLAGS_vdso.lds += -DHAS_VGETTIMEOFDAY
 endif
 
 # Disable -pg to prevent insert call site
-CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS)
 
 # Disable profiling and instrumentation for VDSO code
 GCOV_PROFILE := n
index 82f97d67c23e9bdde94b0d2f655f52d32c8fd6d1..8f884227e8bca7fd3634217e71d4ee4ed122559a 100644 (file)
@@ -8,7 +8,7 @@
 
        .text
 /* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */
-ENTRY(__vdso_flush_icache)
+SYM_FUNC_START(__vdso_flush_icache)
        .cfi_startproc
 #ifdef CONFIG_SMP
        li a7, __NR_riscv_flush_icache
@@ -19,4 +19,4 @@ ENTRY(__vdso_flush_icache)
 #endif
        ret
        .cfi_endproc
-ENDPROC(__vdso_flush_icache)
+SYM_FUNC_END(__vdso_flush_icache)
index bb0c05e2ffbae3d6aa3609fc5b5de84630aaa37d..9c1bd531907f2fefda1d0778191073ca6b70df1a 100644 (file)
@@ -8,11 +8,11 @@
 
        .text
 /* int __vdso_getcpu(unsigned *cpu, unsigned *node, void *unused); */
-ENTRY(__vdso_getcpu)
+SYM_FUNC_START(__vdso_getcpu)
        .cfi_startproc
        /* For now, just do the syscall. */
        li a7, __NR_getcpu
        ecall
        ret
        .cfi_endproc
-ENDPROC(__vdso_getcpu)
+SYM_FUNC_END(__vdso_getcpu)
index d40bec6ac0786690374b8fde5eaddda928c788d2..cadf725ef798370bbe248f80dcf207c7fd439e7b 100644 (file)
@@ -37,7 +37,7 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
 
        /* This is something we can handle, fill out the pairs. */
        while (p < end) {
-               if (p->key <= RISCV_HWPROBE_MAX_KEY) {
+               if (riscv_hwprobe_key_is_valid(p->key)) {
                        p->value = avd->all_cpu_hwprobe_values[p->key];
 
                } else {
index 10438c7c626acc8034fa22d6765422fbc7b67f0b..3dc022aa8931ad3b3798f4cc492ce795dd7b7bf5 100644 (file)
@@ -7,10 +7,10 @@
 #include <asm/unistd.h>
 
        .text
-ENTRY(__vdso_rt_sigreturn)
+SYM_FUNC_START(__vdso_rt_sigreturn)
        .cfi_startproc
        .cfi_signal_frame
        li a7, __NR_rt_sigreturn
        ecall
        .cfi_endproc
-ENDPROC(__vdso_rt_sigreturn)
+SYM_FUNC_END(__vdso_rt_sigreturn)
index 4e704146c77a092e481b8b532c19b11e3efa82e4..77e57f8305216c466f51979c91899754b4a7b382 100644 (file)
@@ -5,11 +5,11 @@
 #include <asm/unistd.h>
 
 .text
-ENTRY(riscv_hwprobe)
+SYM_FUNC_START(riscv_hwprobe)
        .cfi_startproc
        li a7, __NR_riscv_hwprobe
        ecall
        ret
 
        .cfi_endproc
-ENDPROC(riscv_hwprobe)
+SYM_FUNC_END(riscv_hwprobe)
index 82ce64900f3d7e7af48a211b8e24aeec0952d504..cbe2a179331d2511a8b4a26c06383e46131661b1 100644 (file)
@@ -23,35 +23,31 @@ SECTIONS
        .gnu.version_d  : { *(.gnu.version_d) }
        .gnu.version_r  : { *(.gnu.version_r) }
 
-       .note           : { *(.note.*) }                :text   :note
        .dynamic        : { *(.dynamic) }               :text   :dynamic
 
+       .rodata         : {
+               *(.rodata .rodata.* .gnu.linkonce.r.*)
+               *(.got.plt) *(.got)
+               *(.data .data.* .gnu.linkonce.d.*)
+               *(.dynbss)
+               *(.bss .bss.* .gnu.linkonce.b.*)
+       }
+
+       .note           : { *(.note.*) }                :text   :note
+
        .eh_frame_hdr   : { *(.eh_frame_hdr) }          :text   :eh_frame_hdr
        .eh_frame       : { KEEP (*(.eh_frame)) }       :text
 
-       .rodata         : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
-
        /*
-        * This linker script is used both with -r and with -shared.
-        * For the layouts to match, we need to skip more than enough
-        * space for the dynamic symbol table, etc. If this amount is
-        * insufficient, ld -shared will error; simply increase it here.
+        * Text is well-separated from actual data: there's plenty of
+        * stuff that isn't used at runtime in between.
         */
-       . = 0x800;
+       . = ALIGN(16);
        .text           : { *(.text .text.*) }          :text
 
        . = ALIGN(4);
        .alternative : {
-               __alt_start = .;
                *(.alternative)
-               __alt_end = .;
-       }
-
-       .data           : {
-               *(.got.plt) *(.got)
-               *(.data .data.* .gnu.linkonce.d.*)
-               *(.dynbss)
-               *(.bss .bss.* .gnu.linkonce.b.*)
        }
 }
 
index 74bb27440527b3bafa1418fc75337162e9bc3589..a944294f6f23a70335070dc877588321429da0de 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/kvm_host.h>
 #include <linux/percpu.h>
 #include <linux/spinlock.h>
-#include <asm/hwcap.h>
+#include <asm/cpufeature.h>
 #include <asm/kvm_aia_imsic.h>
 
 struct aia_hgei_control {
index 48ae0d4b3932457f642760b01c831bf84f5fb3bc..225a435d9c9a9c25b8cf24f4501a2e9e3bb94d1b 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/module.h>
 #include <linux/kvm_host.h>
 #include <asm/csr.h>
-#include <asm/hwcap.h>
+#include <asm/cpufeature.h>
 #include <asm/sbi.h>
 
 long kvm_arch_dev_ioctl(struct file *filp,
index 44bc324aeeb08d824804fd1138e4ab2b0d5e2d8e..23c0e82b5103cdd950b2da266258260292c0cea5 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/kvm_host.h>
 #include <asm/cacheflush.h>
 #include <asm/csr.h>
-#include <asm/hwcap.h>
+#include <asm/cpufeature.h>
 #include <asm/insn-def.h>
 
 #define has_svinval()  riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL)
index 08ba48a395aa2a232bd00755f02355e6770d0ee7..030904d82b583e1ce3f4e44cdabe4e61e708e616 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/err.h>
 #include <linux/kvm_host.h>
 #include <linux/uaccess.h>
-#include <asm/hwcap.h>
+#include <asm/cpufeature.h>
 
 #ifdef CONFIG_FPU
 void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
index c6ebce6126b55006a2714a43d5cd3f123636d654..f8c9fa0c03c5abbd8a8035f255455e7d5d1c9288 100644 (file)
@@ -13,7 +13,7 @@
 #include <linux/uaccess.h>
 #include <linux/kvm_host.h>
 #include <asm/cacheflush.h>
-#include <asm/hwcap.h>
+#include <asm/cpufeature.h>
 #include <asm/kvm_vcpu_vector.h>
 #include <asm/vector.h>
 
index b430cbb695214da2a500d46c58c7113faa04237c..b339a2682f252bb8c0ac6d3803a8eab46e1e1443 100644 (file)
@@ -11,7 +11,7 @@
 #include <linux/err.h>
 #include <linux/kvm_host.h>
 #include <linux/uaccess.h>
-#include <asm/hwcap.h>
+#include <asm/cpufeature.h>
 #include <asm/kvm_vcpu_vector.h>
 #include <asm/vector.h>
 
index d7a256eb53f404feaf8ebc750453968bc5352188..b22de1231144c29758d3fae335a5d727f44b51bb 100644 (file)
@@ -29,41 +29,41 @@ SYM_FUNC_START(clear_page)
        lw      a1, riscv_cboz_block_size
        add     a2, a0, a2
 .Lzero_loop:
-       CBO_zero(a0)
+       CBO_ZERO(a0)
        add     a0, a0, a1
        CBOZ_ALT(11, "bltu a0, a2, .Lzero_loop; ret", "nop; nop")
-       CBO_zero(a0)
+       CBO_ZERO(a0)
        add     a0, a0, a1
        CBOZ_ALT(10, "bltu a0, a2, .Lzero_loop; ret", "nop; nop")
-       CBO_zero(a0)
+       CBO_ZERO(a0)
        add     a0, a0, a1
-       CBO_zero(a0)
+       CBO_ZERO(a0)
        add     a0, a0, a1
        CBOZ_ALT(9, "bltu a0, a2, .Lzero_loop; ret", "nop; nop")
-       CBO_zero(a0)
+       CBO_ZERO(a0)
        add     a0, a0, a1
-       CBO_zero(a0)
+       CBO_ZERO(a0)
        add     a0, a0, a1
-       CBO_zero(a0)
+       CBO_ZERO(a0)
        add     a0, a0, a1
-       CBO_zero(a0)
+       CBO_ZERO(a0)
        add     a0, a0, a1
        CBOZ_ALT(8, "bltu a0, a2, .Lzero_loop; ret", "nop; nop")
-       CBO_zero(a0)
+       CBO_ZERO(a0)
        add     a0, a0, a1
-       CBO_zero(a0)
+       CBO_ZERO(a0)
        add     a0, a0, a1
-       CBO_zero(a0)
+       CBO_ZERO(a0)
        add     a0, a0, a1
-       CBO_zero(a0)
+       CBO_ZERO(a0)
        add     a0, a0, a1
-       CBO_zero(a0)
+       CBO_ZERO(a0)
        add     a0, a0, a1
-       CBO_zero(a0)
+       CBO_ZERO(a0)
        add     a0, a0, a1
-       CBO_zero(a0)
+       CBO_ZERO(a0)
        add     a0, a0, a1
-       CBO_zero(a0)
+       CBO_ZERO(a0)
        add     a0, a0, a1
        bltu    a0, a2, .Lzero_loop
        ret
index 1a40d01a95439e1592b673ad76e5f5b964bbbed3..44e009ec5fef683a3290d57f31239661a5352f9e 100644 (file)
@@ -7,8 +7,7 @@
 #include <asm/asm.h>
 
 /* void *memcpy(void *, const void *, size_t) */
-ENTRY(__memcpy)
-WEAK(memcpy)
+SYM_FUNC_START(__memcpy)
        move t6, a0  /* Preserve return value */
 
        /* Defer to byte-oriented copy for small sizes */
@@ -105,6 +104,7 @@ WEAK(memcpy)
        bltu a1, a3, 5b
 6:
        ret
-END(__memcpy)
+SYM_FUNC_END(__memcpy)
+SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy)
 SYM_FUNC_ALIAS(__pi_memcpy, __memcpy)
 SYM_FUNC_ALIAS(__pi___memcpy, __memcpy)
index 838ff2022fe32d8e16769ef0e5962d9c294b0ed3..cb3e2e7ef0baa248d906717523a6f848f591eaf9 100644 (file)
@@ -7,7 +7,6 @@
 #include <asm/asm.h>
 
 SYM_FUNC_START(__memmove)
-SYM_FUNC_START_WEAK(memmove)
        /*
         * Returns
         *   a0 - dest
@@ -26,8 +25,8 @@ SYM_FUNC_START_WEAK(memmove)
         */
 
        /* Return if nothing to do */
-       beq a0, a1, return_from_memmove
-       beqz a2, return_from_memmove
+       beq a0, a1, .Lreturn_from_memmove
+       beqz a2, .Lreturn_from_memmove
 
        /*
         * Register Uses
@@ -60,7 +59,7 @@ SYM_FUNC_START_WEAK(memmove)
         * small enough not to bother.
         */
        andi t0, a2, -(2 * SZREG)
-       beqz t0, byte_copy
+       beqz t0, .Lbyte_copy
 
        /*
         * Now solve for t5 and t6.
@@ -87,14 +86,14 @@ SYM_FUNC_START_WEAK(memmove)
         */
        xor  t0, a0, a1
        andi t1, t0, (SZREG - 1)
-       beqz t1, coaligned_copy
+       beqz t1, .Lcoaligned_copy
        /* Fall through to misaligned fixup copy */
 
-misaligned_fixup_copy:
-       bltu a1, a0, misaligned_fixup_copy_reverse
+.Lmisaligned_fixup_copy:
+       bltu a1, a0, .Lmisaligned_fixup_copy_reverse
 
-misaligned_fixup_copy_forward:
-       jal  t0, byte_copy_until_aligned_forward
+.Lmisaligned_fixup_copy_forward:
+       jal  t0, .Lbyte_copy_until_aligned_forward
 
        andi a5, a1, (SZREG - 1) /* Find the alignment offset of src (a1) */
        slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */
@@ -153,10 +152,10 @@ misaligned_fixup_copy_forward:
        mv    t3, t6 /* Fix the dest pointer in case the loop was broken */
 
        add  a1, t3, a5 /* Restore the src pointer */
-       j byte_copy_forward /* Copy any remaining bytes */
+       j .Lbyte_copy_forward /* Copy any remaining bytes */
 
-misaligned_fixup_copy_reverse:
-       jal  t0, byte_copy_until_aligned_reverse
+.Lmisaligned_fixup_copy_reverse:
+       jal  t0, .Lbyte_copy_until_aligned_reverse
 
        andi a5, a4, (SZREG - 1) /* Find the alignment offset of src (a4) */
        slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */
@@ -215,18 +214,18 @@ misaligned_fixup_copy_reverse:
        mv    t4, t5 /* Fix the dest pointer in case the loop was broken */
 
        add  a4, t4, a5 /* Restore the src pointer */
-       j byte_copy_reverse /* Copy any remaining bytes */
+       j .Lbyte_copy_reverse /* Copy any remaining bytes */
 
 /*
  * Simple copy loops for SZREG co-aligned memory locations.
  * These also make calls to do byte copies for any unaligned
  * data at their terminations.
  */
-coaligned_copy:
-       bltu a1, a0, coaligned_copy_reverse
+.Lcoaligned_copy:
+       bltu a1, a0, .Lcoaligned_copy_reverse
 
-coaligned_copy_forward:
-       jal t0, byte_copy_until_aligned_forward
+.Lcoaligned_copy_forward:
+       jal t0, .Lbyte_copy_until_aligned_forward
 
        1:
        REG_L t1, ( 0 * SZREG)(a1)
@@ -235,10 +234,10 @@ coaligned_copy_forward:
        REG_S t1, (-1 * SZREG)(t3)
        bne   t3, t6, 1b
 
-       j byte_copy_forward /* Copy any remaining bytes */
+       j .Lbyte_copy_forward /* Copy any remaining bytes */
 
-coaligned_copy_reverse:
-       jal t0, byte_copy_until_aligned_reverse
+.Lcoaligned_copy_reverse:
+       jal t0, .Lbyte_copy_until_aligned_reverse
 
        1:
        REG_L t1, (-1 * SZREG)(a4)
@@ -247,7 +246,7 @@ coaligned_copy_reverse:
        REG_S t1, ( 0 * SZREG)(t4)
        bne   t4, t5, 1b
 
-       j byte_copy_reverse /* Copy any remaining bytes */
+       j .Lbyte_copy_reverse /* Copy any remaining bytes */
 
 /*
  * These are basically sub-functions within the function.  They
@@ -258,7 +257,7 @@ coaligned_copy_reverse:
  * up from where they were left and we avoid code duplication
  * without any overhead except the call in and return jumps.
  */
-byte_copy_until_aligned_forward:
+.Lbyte_copy_until_aligned_forward:
        beq  t3, t5, 2f
        1:
        lb   t1,  0(a1)
@@ -269,7 +268,7 @@ byte_copy_until_aligned_forward:
        2:
        jalr zero, 0x0(t0) /* Return to multibyte copy loop */
 
-byte_copy_until_aligned_reverse:
+.Lbyte_copy_until_aligned_reverse:
        beq  t4, t6, 2f
        1:
        lb   t1, -1(a4)
@@ -285,10 +284,10 @@ byte_copy_until_aligned_reverse:
  * These will byte copy until they reach the end of data to copy.
  * At that point, they will call to return from memmove.
  */
-byte_copy:
-       bltu a1, a0, byte_copy_reverse
+.Lbyte_copy:
+       bltu a1, a0, .Lbyte_copy_reverse
 
-byte_copy_forward:
+.Lbyte_copy_forward:
        beq  t3, t4, 2f
        1:
        lb   t1,  0(a1)
@@ -299,7 +298,7 @@ byte_copy_forward:
        2:
        ret
 
-byte_copy_reverse:
+.Lbyte_copy_reverse:
        beq  t4, t3, 2f
        1:
        lb   t1, -1(a4)
@@ -309,10 +308,10 @@ byte_copy_reverse:
        bne  t4, t3, 1b
        2:
 
-return_from_memmove:
+.Lreturn_from_memmove:
        ret
 
-SYM_FUNC_END(memmove)
 SYM_FUNC_END(__memmove)
+SYM_FUNC_ALIAS_WEAK(memmove, __memmove)
 SYM_FUNC_ALIAS(__pi_memmove, __memmove)
 SYM_FUNC_ALIAS(__pi___memmove, __memmove)
index 34c5360c6705c56466cf9890f1a7261e6383a5e3..35f358e70bdb6bf79bda0f366e34c98c6ef5bbc3 100644 (file)
@@ -8,8 +8,7 @@
 #include <asm/asm.h>
 
 /* void *memset(void *, int, size_t) */
-ENTRY(__memset)
-WEAK(memset)
+SYM_FUNC_START(__memset)
        move t0, a0  /* Preserve return value */
 
        /* Defer to byte-oriented fill for small sizes */
@@ -110,4 +109,5 @@ WEAK(memset)
        bltu t0, a3, 5b
 6:
        ret
-END(__memset)
+SYM_FUNC_END(__memset)
+SYM_FUNC_ALIAS_WEAK(memset, __memset)
index 09b47ebacf2e8743bc1cf77be0693b4499748afb..3ab438f30d1328707862134f819e8a74598c6dce 100644 (file)
@@ -10,8 +10,7 @@
        _asm_extable    100b, \lbl
        .endm
 
-ENTRY(__asm_copy_to_user)
-ENTRY(__asm_copy_from_user)
+SYM_FUNC_START(__asm_copy_to_user)
 
        /* Enable access to user memory */
        li t6, SR_SUM
@@ -181,13 +180,13 @@ ENTRY(__asm_copy_from_user)
        csrc CSR_STATUS, t6
        sub a0, t5, a0
        ret
-ENDPROC(__asm_copy_to_user)
-ENDPROC(__asm_copy_from_user)
+SYM_FUNC_END(__asm_copy_to_user)
 EXPORT_SYMBOL(__asm_copy_to_user)
+SYM_FUNC_ALIAS(__asm_copy_from_user, __asm_copy_to_user)
 EXPORT_SYMBOL(__asm_copy_from_user)
 
 
-ENTRY(__clear_user)
+SYM_FUNC_START(__clear_user)
 
        /* Enable access to user memory */
        li t6, SR_SUM
@@ -233,5 +232,5 @@ ENTRY(__clear_user)
        csrc CSR_STATUS, t6
        sub a0, a3, a0
        ret
-ENDPROC(__clear_user)
+SYM_FUNC_END(__clear_user)
 EXPORT_SYMBOL(__clear_user)
index 9c454f90fd3da21200d180da8a463d45a15da178..3a4dfc8babcf8c3ef4cf2d4c39731b0e9067eb14 100644 (file)
@@ -36,3 +36,4 @@ endif
 
 obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
 obj-$(CONFIG_RISCV_DMA_NONCOHERENT) += dma-noncoherent.o
+obj-$(CONFIG_RISCV_NONSTANDARD_CACHE_OPS) += cache-ops.o
diff --git a/arch/riscv/mm/cache-ops.c b/arch/riscv/mm/cache-ops.c
new file mode 100644 (file)
index 0000000..a993ad1
--- /dev/null
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ */
+
+#include <asm/dma-noncoherent.h>
+
+struct riscv_nonstd_cache_ops noncoherent_cache_ops __ro_after_init;
+
+void
+riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops)
+{
+       if (!ops)
+               return;
+       noncoherent_cache_ops = *ops;
+}
+EXPORT_SYMBOL_GPL(riscv_noncoherent_register_cache_ops);
index f1387272a5512d14a8facdb6a97b208109468fc5..55a34f2020a85a895932c92d94a7577bf410f8dc 100644 (file)
@@ -3,7 +3,9 @@
  * Copyright (C) 2017 SiFive
  */
 
+#include <linux/acpi.h>
 #include <linux/of.h>
+#include <asm/acpi.h>
 #include <asm/cacheflush.h>
 
 #ifdef CONFIG_SMP
@@ -124,13 +126,24 @@ void __init riscv_init_cbo_blocksizes(void)
        unsigned long cbom_hartid, cboz_hartid;
        u32 cbom_block_size = 0, cboz_block_size = 0;
        struct device_node *node;
+       struct acpi_table_header *rhct;
+       acpi_status status;
+
+       if (acpi_disabled) {
+               for_each_of_cpu_node(node) {
+                       /* set block-size for cbom and/or cboz extension if available */
+                       cbo_get_block_size(node, "riscv,cbom-block-size",
+                                          &cbom_block_size, &cbom_hartid);
+                       cbo_get_block_size(node, "riscv,cboz-block-size",
+                                          &cboz_block_size, &cboz_hartid);
+               }
+       } else {
+               status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
+               if (ACPI_FAILURE(status))
+                       return;
 
-       for_each_of_cpu_node(node) {
-               /* set block-size for cbom and/or cboz extension if available */
-               cbo_get_block_size(node, "riscv,cbom-block-size",
-                                  &cbom_block_size, &cbom_hartid);
-               cbo_get_block_size(node, "riscv,cboz-block-size",
-                                  &cboz_block_size, &cboz_hartid);
+               acpi_get_cbo_block_size(rhct, &cbom_block_size, &cboz_block_size, NULL);
+               acpi_put_table((struct acpi_table_header *)rhct);
        }
 
        if (cbom_block_size)
index b76e7e192eb183460c3caf897b36f5a0ec59b30b..4e4e469b8dd66cfdf3e24346a514db2d3dd55773 100644 (file)
@@ -15,12 +15,6 @@ static bool noncoherent_supported __ro_after_init;
 int dma_cache_alignment __ro_after_init = ARCH_DMA_MINALIGN;
 EXPORT_SYMBOL_GPL(dma_cache_alignment);
 
-struct riscv_nonstd_cache_ops noncoherent_cache_ops __ro_after_init = {
-       .wback = NULL,
-       .inv = NULL,
-       .wback_inv = NULL,
-};
-
 static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size)
 {
        void *vaddr = phys_to_virt(paddr);
@@ -31,7 +25,7 @@ static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size)
                return;
        }
 #endif
-       ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
+       ALT_CMO_OP(CLEAN, vaddr, size, riscv_cbom_block_size);
 }
 
 static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size)
@@ -45,7 +39,7 @@ static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size)
        }
 #endif
 
-       ALT_CMO_OP(inval, vaddr, size, riscv_cbom_block_size);
+       ALT_CMO_OP(INVAL, vaddr, size, riscv_cbom_block_size);
 }
 
 static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
@@ -59,7 +53,7 @@ static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
        }
 #endif
 
-       ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
+       ALT_CMO_OP(FLUSH, vaddr, size, riscv_cbom_block_size);
 }
 
 static inline bool arch_sync_dma_clean_before_fromdevice(void)
@@ -131,7 +125,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
        }
 #endif
 
-       ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size);
+       ALT_CMO_OP(FLUSH, flush_addr, size, riscv_cbom_block_size);
 }
 
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
@@ -162,12 +156,3 @@ void __init riscv_set_dma_cache_alignment(void)
        if (!noncoherent_supported)
                dma_cache_alignment = 1;
 }
-
-void riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops)
-{
-       if (!ops)
-               return;
-
-       noncoherent_cache_ops = *ops;
-}
-EXPORT_SYMBOL_GPL(riscv_noncoherent_register_cache_ops);
index d9a4e87028644f962f1c51ad3a89c8b475063e05..2e011cbddf3af373ea68d703e7e5736c5b04155a 100644 (file)
@@ -49,10 +49,12 @@ u64 satp_mode __ro_after_init = SATP_MODE_32;
 #endif
 EXPORT_SYMBOL(satp_mode);
 
+#ifdef CONFIG_64BIT
 bool pgtable_l4_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL);
 bool pgtable_l5_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL);
 EXPORT_SYMBOL(pgtable_l4_enabled);
 EXPORT_SYMBOL(pgtable_l5_enabled);
+#endif
 
 phys_addr_t phys_ram_base __ro_after_init;
 EXPORT_SYMBOL(phys_ram_base);
@@ -664,16 +666,16 @@ void __init create_pgd_mapping(pgd_t *pgdp,
 static uintptr_t __init best_map_size(phys_addr_t pa, uintptr_t va,
                                      phys_addr_t size)
 {
-       if (!(pa & (PGDIR_SIZE - 1)) && !(va & (PGDIR_SIZE - 1)) && size >= PGDIR_SIZE)
-               return PGDIR_SIZE;
-
-       if (!(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE)
+       if (pgtable_l5_enabled &&
+           !(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE)
                return P4D_SIZE;
 
-       if (!(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE)
+       if (pgtable_l4_enabled &&
+           !(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE)
                return PUD_SIZE;
 
-       if (!(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE)
+       if (IS_ENABLED(CONFIG_64BIT) &&
+           !(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE)
                return PMD_SIZE;
 
        return PAGE_SIZE;
index 161d0b34c2cb28dbc9962d2ec7c4db64fbe08a34..fc5fc4f785c481c20acec4b68ba1a75d278ee150 100644 (file)
@@ -5,6 +5,7 @@
 
 #include <linux/pagewalk.h>
 #include <linux/pgtable.h>
+#include <linux/vmalloc.h>
 #include <asm/tlbflush.h>
 #include <asm/bitops.h>
 #include <asm/set_memory.h>
@@ -25,19 +26,6 @@ static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
        return new_val;
 }
 
-static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr,
-                             unsigned long next, struct mm_walk *walk)
-{
-       pgd_t val = READ_ONCE(*pgd);
-
-       if (pgd_leaf(val)) {
-               val = __pgd(set_pageattr_masks(pgd_val(val), walk));
-               set_pgd(pgd, val);
-       }
-
-       return 0;
-}
-
 static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
                              unsigned long next, struct mm_walk *walk)
 {
@@ -96,7 +84,6 @@ static int pageattr_pte_hole(unsigned long addr, unsigned long next,
 }
 
 static const struct mm_walk_ops pageattr_ops = {
-       .pgd_entry = pageattr_pgd_entry,
        .p4d_entry = pageattr_p4d_entry,
        .pud_entry = pageattr_pud_entry,
        .pmd_entry = pageattr_pmd_entry,
@@ -105,12 +92,181 @@ static const struct mm_walk_ops pageattr_ops = {
        .walk_lock = PGWALK_RDLOCK,
 };
 
+#ifdef CONFIG_64BIT
+static int __split_linear_mapping_pmd(pud_t *pudp,
+                                     unsigned long vaddr, unsigned long end)
+{
+       pmd_t *pmdp;
+       unsigned long next;
+
+       pmdp = pmd_offset(pudp, vaddr);
+
+       do {
+               next = pmd_addr_end(vaddr, end);
+
+               if (next - vaddr >= PMD_SIZE &&
+                   vaddr <= (vaddr & PMD_MASK) && end >= next)
+                       continue;
+
+               if (pmd_leaf(*pmdp)) {
+                       struct page *pte_page;
+                       unsigned long pfn = _pmd_pfn(*pmdp);
+                       pgprot_t prot = __pgprot(pmd_val(*pmdp) & ~_PAGE_PFN_MASK);
+                       pte_t *ptep_new;
+                       int i;
+
+                       pte_page = alloc_page(GFP_KERNEL);
+                       if (!pte_page)
+                               return -ENOMEM;
+
+                       ptep_new = (pte_t *)page_address(pte_page);
+                       for (i = 0; i < PTRS_PER_PTE; ++i, ++ptep_new)
+                               set_pte(ptep_new, pfn_pte(pfn + i, prot));
+
+                       smp_wmb();
+
+                       set_pmd(pmdp, pfn_pmd(page_to_pfn(pte_page), PAGE_TABLE));
+               }
+       } while (pmdp++, vaddr = next, vaddr != end);
+
+       return 0;
+}
+
+static int __split_linear_mapping_pud(p4d_t *p4dp,
+                                     unsigned long vaddr, unsigned long end)
+{
+       pud_t *pudp;
+       unsigned long next;
+       int ret;
+
+       pudp = pud_offset(p4dp, vaddr);
+
+       do {
+               next = pud_addr_end(vaddr, end);
+
+               if (next - vaddr >= PUD_SIZE &&
+                   vaddr <= (vaddr & PUD_MASK) && end >= next)
+                       continue;
+
+               if (pud_leaf(*pudp)) {
+                       struct page *pmd_page;
+                       unsigned long pfn = _pud_pfn(*pudp);
+                       pgprot_t prot = __pgprot(pud_val(*pudp) & ~_PAGE_PFN_MASK);
+                       pmd_t *pmdp_new;
+                       int i;
+
+                       pmd_page = alloc_page(GFP_KERNEL);
+                       if (!pmd_page)
+                               return -ENOMEM;
+
+                       pmdp_new = (pmd_t *)page_address(pmd_page);
+                       for (i = 0; i < PTRS_PER_PMD; ++i, ++pmdp_new)
+                               set_pmd(pmdp_new,
+                                       pfn_pmd(pfn + ((i * PMD_SIZE) >> PAGE_SHIFT), prot));
+
+                       smp_wmb();
+
+                       set_pud(pudp, pfn_pud(page_to_pfn(pmd_page), PAGE_TABLE));
+               }
+
+               ret = __split_linear_mapping_pmd(pudp, vaddr, next);
+               if (ret)
+                       return ret;
+       } while (pudp++, vaddr = next, vaddr != end);
+
+       return 0;
+}
+
+static int __split_linear_mapping_p4d(pgd_t *pgdp,
+                                     unsigned long vaddr, unsigned long end)
+{
+       p4d_t *p4dp;
+       unsigned long next;
+       int ret;
+
+       p4dp = p4d_offset(pgdp, vaddr);
+
+       do {
+               next = p4d_addr_end(vaddr, end);
+
+               /*
+                * If [vaddr; end] contains [vaddr & P4D_MASK; next], we don't
+                * need to split, we'll change the protections on the whole P4D.
+                */
+               if (next - vaddr >= P4D_SIZE &&
+                   vaddr <= (vaddr & P4D_MASK) && end >= next)
+                       continue;
+
+               if (p4d_leaf(*p4dp)) {
+                       struct page *pud_page;
+                       unsigned long pfn = _p4d_pfn(*p4dp);
+                       pgprot_t prot = __pgprot(p4d_val(*p4dp) & ~_PAGE_PFN_MASK);
+                       pud_t *pudp_new;
+                       int i;
+
+                       pud_page = alloc_page(GFP_KERNEL);
+                       if (!pud_page)
+                               return -ENOMEM;
+
+                       /*
+                        * Fill the pud level with leaf puds that have the same
+                        * protections as the leaf p4d.
+                        */
+                       pudp_new = (pud_t *)page_address(pud_page);
+                       for (i = 0; i < PTRS_PER_PUD; ++i, ++pudp_new)
+                               set_pud(pudp_new,
+                                       pfn_pud(pfn + ((i * PUD_SIZE) >> PAGE_SHIFT), prot));
+
+                       /*
+                        * Make sure the pud filling is not reordered with the
+                        * p4d store which could result in seeing a partially
+                        * filled pud level.
+                        */
+                       smp_wmb();
+
+                       set_p4d(p4dp, pfn_p4d(page_to_pfn(pud_page), PAGE_TABLE));
+               }
+
+               ret = __split_linear_mapping_pud(p4dp, vaddr, next);
+               if (ret)
+                       return ret;
+       } while (p4dp++, vaddr = next, vaddr != end);
+
+       return 0;
+}
+
+static int __split_linear_mapping_pgd(pgd_t *pgdp,
+                                     unsigned long vaddr,
+                                     unsigned long end)
+{
+       unsigned long next;
+       int ret;
+
+       do {
+               next = pgd_addr_end(vaddr, end);
+               /* We never use PGD mappings for the linear mapping */
+               ret = __split_linear_mapping_p4d(pgdp, vaddr, next);
+               if (ret)
+                       return ret;
+       } while (pgdp++, vaddr = next, vaddr != end);
+
+       return 0;
+}
+
+static int split_linear_mapping(unsigned long start, unsigned long end)
+{
+       return __split_linear_mapping_pgd(pgd_offset_k(start), start, end);
+}
+#endif /* CONFIG_64BIT */
+
 static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
                        pgprot_t clear_mask)
 {
        int ret;
        unsigned long start = addr;
        unsigned long end = start + PAGE_SIZE * numpages;
+       unsigned long __maybe_unused lm_start;
+       unsigned long __maybe_unused lm_end;
        struct pageattr_masks masks = {
                .set_mask = set_mask,
                .clear_mask = clear_mask
@@ -120,11 +276,67 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
                return 0;
 
        mmap_write_lock(&init_mm);
+
+#ifdef CONFIG_64BIT
+       /*
+        * We are about to change the permissions of a kernel mapping, we must
+        * apply the same changes to its linear mapping alias, which may imply
+        * splitting a huge mapping.
+        */
+
+       if (is_vmalloc_or_module_addr((void *)start)) {
+               struct vm_struct *area = NULL;
+               int i, page_start;
+
+               area = find_vm_area((void *)start);
+               page_start = (start - (unsigned long)area->addr) >> PAGE_SHIFT;
+
+               for (i = page_start; i < page_start + numpages; ++i) {
+                       lm_start = (unsigned long)page_address(area->pages[i]);
+                       lm_end = lm_start + PAGE_SIZE;
+
+                       ret = split_linear_mapping(lm_start, lm_end);
+                       if (ret)
+                               goto unlock;
+
+                       ret = walk_page_range_novma(&init_mm, lm_start, lm_end,
+                                                   &pageattr_ops, NULL, &masks);
+                       if (ret)
+                               goto unlock;
+               }
+       } else if (is_kernel_mapping(start) || is_linear_mapping(start)) {
+               lm_start = (unsigned long)lm_alias(start);
+               lm_end = (unsigned long)lm_alias(end);
+
+               ret = split_linear_mapping(lm_start, lm_end);
+               if (ret)
+                       goto unlock;
+
+               ret = walk_page_range_novma(&init_mm, lm_start, lm_end,
+                                           &pageattr_ops, NULL, &masks);
+               if (ret)
+                       goto unlock;
+       }
+
        ret =  walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
                                     &masks);
+
+unlock:
+       mmap_write_unlock(&init_mm);
+
+       /*
+        * We can't use flush_tlb_kernel_range() here as we may have split a
+        * hugepage that is larger than that, so let's flush everything.
+        */
+       flush_tlb_all();
+#else
+       ret =  walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
+                                    &masks);
+
        mmap_write_unlock(&init_mm);
 
        flush_tlb_kernel_range(start, end);
+#endif
 
        return ret;
 }
@@ -159,36 +371,14 @@ int set_memory_nx(unsigned long addr, int numpages)
 
 int set_direct_map_invalid_noflush(struct page *page)
 {
-       int ret;
-       unsigned long start = (unsigned long)page_address(page);
-       unsigned long end = start + PAGE_SIZE;
-       struct pageattr_masks masks = {
-               .set_mask = __pgprot(0),
-               .clear_mask = __pgprot(_PAGE_PRESENT)
-       };
-
-       mmap_read_lock(&init_mm);
-       ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
-       mmap_read_unlock(&init_mm);
-
-       return ret;
+       return __set_memory((unsigned long)page_address(page), 1,
+                           __pgprot(0), __pgprot(_PAGE_PRESENT));
 }
 
 int set_direct_map_default_noflush(struct page *page)
 {
-       int ret;
-       unsigned long start = (unsigned long)page_address(page);
-       unsigned long end = start + PAGE_SIZE;
-       struct pageattr_masks masks = {
-               .set_mask = PAGE_KERNEL,
-               .clear_mask = __pgprot(0)
-       };
-
-       mmap_read_lock(&init_mm);
-       ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
-       mmap_read_unlock(&init_mm);
-
-       return ret;
+       return __set_memory((unsigned long)page_address(page), 1,
+                           PAGE_KERNEL, __pgprot(0));
 }
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
index c5fc5ec96f6d4b1fc71b2949ed970c561b7fcb14..370a422ede1101b768f4849e946d6af4fdfc8bd5 100644 (file)
@@ -17,7 +17,7 @@ void arch_wb_cache_pmem(void *addr, size_t size)
                return;
        }
 #endif
-       ALT_CMO_OP(clean, addr, size, riscv_cbom_block_size);
+       ALT_CMO_OP(CLEAN, addr, size, riscv_cbom_block_size);
 }
 EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
 
@@ -29,6 +29,6 @@ void arch_invalidate_pmem(void *addr, size_t size)
                return;
        }
 #endif
-       ALT_CMO_OP(inval, addr, size, riscv_cbom_block_size);
+       ALT_CMO_OP(INVAL, addr, size, riscv_cbom_block_size);
 }
 EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
index 20a9f991a6d7461be7d177723cc3b27284865cd6..657c27bc07a7694edbb70795c4b2bd102b8780d0 100644 (file)
@@ -129,55 +129,55 @@ static struct ptd_mm_info efi_ptd_info = {
 /* Page Table Entry */
 struct prot_bits {
        u64 mask;
-       u64 val;
        const char *set;
        const char *clear;
 };
 
 static const struct prot_bits pte_bits[] = {
        {
+#ifdef CONFIG_64BIT
+               .mask = _PAGE_NAPOT,
+               .set = "N",
+               .clear = ".",
+       }, {
+               .mask = _PAGE_MTMASK_SVPBMT,
+               .set = "MT(%s)",
+               .clear = "  ..  ",
+       }, {
+#endif
                .mask = _PAGE_SOFT,
-               .val = _PAGE_SOFT,
-               .set = "RSW",
-               .clear = "   ",
+               .set = "RSW(%d)",
+               .clear = "  ..  ",
        }, {
                .mask = _PAGE_DIRTY,
-               .val = _PAGE_DIRTY,
                .set = "D",
                .clear = ".",
        }, {
                .mask = _PAGE_ACCESSED,
-               .val = _PAGE_ACCESSED,
                .set = "A",
                .clear = ".",
        }, {
                .mask = _PAGE_GLOBAL,
-               .val = _PAGE_GLOBAL,
                .set = "G",
                .clear = ".",
        }, {
                .mask = _PAGE_USER,
-               .val = _PAGE_USER,
                .set = "U",
                .clear = ".",
        }, {
                .mask = _PAGE_EXEC,
-               .val = _PAGE_EXEC,
                .set = "X",
                .clear = ".",
        }, {
                .mask = _PAGE_WRITE,
-               .val = _PAGE_WRITE,
                .set = "W",
                .clear = ".",
        }, {
                .mask = _PAGE_READ,
-               .val = _PAGE_READ,
                .set = "R",
                .clear = ".",
        }, {
                .mask = _PAGE_PRESENT,
-               .val = _PAGE_PRESENT,
                .set = "V",
                .clear = ".",
        }
@@ -208,15 +208,30 @@ static void dump_prot(struct pg_state *st)
        unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(pte_bits); i++) {
-               const char *s;
+               char s[7];
+               unsigned long val;
 
-               if ((st->current_prot & pte_bits[i].mask) == pte_bits[i].val)
-                       s = pte_bits[i].set;
-               else
-                       s = pte_bits[i].clear;
+               val = st->current_prot & pte_bits[i].mask;
+               if (val) {
+                       if (pte_bits[i].mask == _PAGE_SOFT)
+                               sprintf(s, pte_bits[i].set, val >> 8);
+#ifdef CONFIG_64BIT
+                       else if (pte_bits[i].mask == _PAGE_MTMASK_SVPBMT) {
+                               if (val == _PAGE_NOCACHE_SVPBMT)
+                                       sprintf(s, pte_bits[i].set, "NC");
+                               else if (val == _PAGE_IO_SVPBMT)
+                                       sprintf(s, pte_bits[i].set, "IO");
+                               else
+                                       sprintf(s, pte_bits[i].set, "??");
+                       }
+#endif
+                       else
+                               sprintf(s, "%s", pte_bits[i].set);
+               } else {
+                       sprintf(s, "%s", pte_bits[i].clear);
+               }
 
-               if (s)
-                       pt_dump_seq_printf(st->seq, " %s", s);
+               pt_dump_seq_printf(st->seq, " %s", s);
        }
 }
 
@@ -384,6 +399,9 @@ static int __init ptdump_init(void)
 
        kernel_ptd_info.base_addr = KERN_VIRT_START;
 
+       pg_level[1].name = pgtable_l5_enabled ? "P4D" : "PGD";
+       pg_level[2].name = pgtable_l4_enabled ? "PUD" : "PGD";
+
        for (i = 0; i < ARRAY_SIZE(pg_level); i++)
                for (j = 0; j < ARRAY_SIZE(pte_bits); j++)
                        pg_level[i].mask |= pte_bits[j].mask;
index 77be59aadc735ea9979473fd2d4dd8ffa04394e2..e6659d7368b35403d1b91739080496bfc45442af 100644 (file)
@@ -3,33 +3,56 @@
 #include <linux/mm.h>
 #include <linux/smp.h>
 #include <linux/sched.h>
+#include <linux/hugetlb.h>
 #include <asm/sbi.h>
 #include <asm/mmu_context.h>
 
 static inline void local_flush_tlb_all_asid(unsigned long asid)
 {
-       __asm__ __volatile__ ("sfence.vma x0, %0"
-                       :
-                       : "r" (asid)
-                       : "memory");
+       if (asid != FLUSH_TLB_NO_ASID)
+               __asm__ __volatile__ ("sfence.vma x0, %0"
+                               :
+                               : "r" (asid)
+                               : "memory");
+       else
+               local_flush_tlb_all();
 }
 
 static inline void local_flush_tlb_page_asid(unsigned long addr,
                unsigned long asid)
 {
-       __asm__ __volatile__ ("sfence.vma %0, %1"
-                       :
-                       : "r" (addr), "r" (asid)
-                       : "memory");
+       if (asid != FLUSH_TLB_NO_ASID)
+               __asm__ __volatile__ ("sfence.vma %0, %1"
+                               :
+                               : "r" (addr), "r" (asid)
+                               : "memory");
+       else
+               local_flush_tlb_page(addr);
 }
 
-static inline void local_flush_tlb_range(unsigned long start,
-               unsigned long size, unsigned long stride)
+/*
+ * Flush entire TLB if number of entries to be flushed is greater
+ * than the threshold below.
+ */
+static unsigned long tlb_flush_all_threshold __read_mostly = 64;
+
+static void local_flush_tlb_range_threshold_asid(unsigned long start,
+                                                unsigned long size,
+                                                unsigned long stride,
+                                                unsigned long asid)
 {
-       if (size <= stride)
-               local_flush_tlb_page(start);
-       else
-               local_flush_tlb_all();
+       unsigned long nr_ptes_in_range = DIV_ROUND_UP(size, stride);
+       int i;
+
+       if (nr_ptes_in_range > tlb_flush_all_threshold) {
+               local_flush_tlb_all_asid(asid);
+               return;
+       }
+
+       for (i = 0; i < nr_ptes_in_range; ++i) {
+               local_flush_tlb_page_asid(start, asid);
+               start += stride;
+       }
 }
 
 static inline void local_flush_tlb_range_asid(unsigned long start,
@@ -37,8 +60,10 @@ static inline void local_flush_tlb_range_asid(unsigned long start,
 {
        if (size <= stride)
                local_flush_tlb_page_asid(start, asid);
-       else
+       else if (size == FLUSH_TLB_MAX_SIZE)
                local_flush_tlb_all_asid(asid);
+       else
+               local_flush_tlb_range_threshold_asid(start, size, stride, asid);
 }
 
 static void __ipi_flush_tlb_all(void *info)
@@ -51,7 +76,7 @@ void flush_tlb_all(void)
        if (riscv_use_ipi_for_rfence())
                on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
        else
-               sbi_remote_sfence_vma(NULL, 0, -1);
+               sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
 }
 
 struct flush_tlb_range_data {
@@ -68,68 +93,62 @@ static void __ipi_flush_tlb_range_asid(void *info)
        local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
 }
 
-static void __ipi_flush_tlb_range(void *info)
-{
-       struct flush_tlb_range_data *d = info;
-
-       local_flush_tlb_range(d->start, d->size, d->stride);
-}
-
 static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
                              unsigned long size, unsigned long stride)
 {
        struct flush_tlb_range_data ftd;
-       struct cpumask *cmask = mm_cpumask(mm);
-       unsigned int cpuid;
+       const struct cpumask *cmask;
+       unsigned long asid = FLUSH_TLB_NO_ASID;
        bool broadcast;
 
-       if (cpumask_empty(cmask))
-               return;
+       if (mm) {
+               unsigned int cpuid;
 
-       cpuid = get_cpu();
-       /* check if the tlbflush needs to be sent to other CPUs */
-       broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
-       if (static_branch_unlikely(&use_asid_allocator)) {
-               unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask;
-
-               if (broadcast) {
-                       if (riscv_use_ipi_for_rfence()) {
-                               ftd.asid = asid;
-                               ftd.start = start;
-                               ftd.size = size;
-                               ftd.stride = stride;
-                               on_each_cpu_mask(cmask,
-                                                __ipi_flush_tlb_range_asid,
-                                                &ftd, 1);
-                       } else
-                               sbi_remote_sfence_vma_asid(cmask,
-                                                          start, size, asid);
-               } else {
-                       local_flush_tlb_range_asid(start, size, stride, asid);
-               }
+               cmask = mm_cpumask(mm);
+               if (cpumask_empty(cmask))
+                       return;
+
+               cpuid = get_cpu();
+               /* check if the tlbflush needs to be sent to other CPUs */
+               broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
+
+               if (static_branch_unlikely(&use_asid_allocator))
+                       asid = atomic_long_read(&mm->context.id) & asid_mask;
        } else {
-               if (broadcast) {
-                       if (riscv_use_ipi_for_rfence()) {
-                               ftd.asid = 0;
-                               ftd.start = start;
-                               ftd.size = size;
-                               ftd.stride = stride;
-                               on_each_cpu_mask(cmask,
-                                                __ipi_flush_tlb_range,
-                                                &ftd, 1);
-                       } else
-                               sbi_remote_sfence_vma(cmask, start, size);
-               } else {
-                       local_flush_tlb_range(start, size, stride);
-               }
+               cmask = cpu_online_mask;
+               broadcast = true;
        }
 
-       put_cpu();
+       if (broadcast) {
+               if (riscv_use_ipi_for_rfence()) {
+                       ftd.asid = asid;
+                       ftd.start = start;
+                       ftd.size = size;
+                       ftd.stride = stride;
+                       on_each_cpu_mask(cmask,
+                                        __ipi_flush_tlb_range_asid,
+                                        &ftd, 1);
+               } else
+                       sbi_remote_sfence_vma_asid(cmask,
+                                                  start, size, asid);
+       } else {
+               local_flush_tlb_range_asid(start, size, stride, asid);
+       }
+
+       if (mm)
+               put_cpu();
 }
 
 void flush_tlb_mm(struct mm_struct *mm)
 {
-       __flush_tlb_range(mm, 0, -1, PAGE_SIZE);
+       __flush_tlb_range(mm, 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
+}
+
+void flush_tlb_mm_range(struct mm_struct *mm,
+                       unsigned long start, unsigned long end,
+                       unsigned int page_size)
+{
+       __flush_tlb_range(mm, start, end - start, page_size);
 }
 
 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
@@ -140,8 +159,40 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
                     unsigned long end)
 {
-       __flush_tlb_range(vma->vm_mm, start, end - start, PAGE_SIZE);
+       unsigned long stride_size;
+
+       if (!is_vm_hugetlb_page(vma)) {
+               stride_size = PAGE_SIZE;
+       } else {
+               stride_size = huge_page_size(hstate_vma(vma));
+
+               /*
+                * As stated in the privileged specification, every PTE in a
+                * NAPOT region must be invalidated, so reset the stride in that
+                * case.
+                */
+               if (has_svnapot()) {
+                       if (stride_size >= PGDIR_SIZE)
+                               stride_size = PGDIR_SIZE;
+                       else if (stride_size >= P4D_SIZE)
+                               stride_size = P4D_SIZE;
+                       else if (stride_size >= PUD_SIZE)
+                               stride_size = PUD_SIZE;
+                       else if (stride_size >= PMD_SIZE)
+                               stride_size = PMD_SIZE;
+                       else
+                               stride_size = PAGE_SIZE;
+               }
+       }
+
+       __flush_tlb_range(vma->vm_mm, start, end - start, stride_size);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+       __flush_tlb_range(NULL, start, end - start, PAGE_SIZE);
 }
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
                        unsigned long end)
index 9e6476719abbbb4e9206cd89292be7f25261a5b6..280b0eb352b8b1025b29711451f828fd50ce7631 100644 (file)
@@ -81,6 +81,14 @@ ifdef CONFIG_CFI_CLANG
 PURGATORY_CFLAGS_REMOVE                += $(CC_FLAGS_CFI)
 endif
 
+ifdef CONFIG_RELOCATABLE
+PURGATORY_CFLAGS_REMOVE                += -fPIE
+endif
+
+ifdef CONFIG_SHADOW_CALL_STACK
+PURGATORY_CFLAGS_REMOVE                += $(CC_FLAGS_SCS)
+endif
+
 CFLAGS_REMOVE_purgatory.o      += $(PURGATORY_CFLAGS_REMOVE)
 CFLAGS_purgatory.o             += $(PURGATORY_CFLAGS)
 
index 0194f4554130ae6b89cf5db97a069a65bc2c6fc1..5bcf3af903daa2f9fb2aaf1e57d79121bfcda988 100644 (file)
@@ -7,15 +7,11 @@
  * Author: Li Zhengyu (lizhengyu3@huawei.com)
  *
  */
-
-.macro size, sym:req
-       .size \sym, . - \sym
-.endm
+#include <linux/linkage.h>
 
 .text
 
-.globl purgatory_start
-purgatory_start:
+SYM_CODE_START(purgatory_start)
 
        lla     sp, .Lstack
        mv      s0, a0  /* The hartid of the current hart */
@@ -28,8 +24,7 @@ purgatory_start:
        mv      a1, s1
        ld      a2, riscv_kernel_entry
        jr      a2
-
-size purgatory_start
+SYM_CODE_END(purgatory_start)
 
 .align 4
        .rept   256
@@ -39,9 +34,6 @@ size purgatory_start
 
 .data
 
-.globl riscv_kernel_entry
-riscv_kernel_entry:
-       .quad   0
-size riscv_kernel_entry
+SYM_DATA(riscv_kernel_entry, .quad 0)
 
 .end
index b0d67ac8695f9f232072c324a6b408c8f3b657bb..3bec98d20283b4eee33f946111ed35e3f8897bff 100644 (file)
@@ -236,6 +236,7 @@ config S390
        select THREAD_INFO_IN_TASK
        select TRACE_IRQFLAGS_SUPPORT
        select TTY
+       select USER_STACKTRACE_SUPPORT
        select VIRT_CPU_ACCOUNTING
        select ZONE_DMA
        # Note: keep the above list sorted alphabetically
index 7b7521762633f9a75a1a4a09070436c7e2782ffc..2ab4872fbee1c9ab6938c2bf973d9590ccea2d06 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/init.h>
 #include <linux/ctype.h>
 #include <linux/pgtable.h>
+#include <asm/page-states.h>
 #include <asm/ebcdic.h>
 #include <asm/sclp.h>
 #include <asm/sections.h>
@@ -24,6 +25,7 @@ unsigned int __bootdata_preserved(zlib_dfltcc_support) = ZLIB_DFLTCC_FULL;
 struct ipl_parameter_block __bootdata_preserved(ipl_block);
 int __bootdata_preserved(ipl_block_valid);
 int __bootdata_preserved(__kaslr_enabled);
+int __bootdata_preserved(cmma_flag) = 1;
 
 unsigned long vmalloc_size = VMALLOC_DEFAULT_SIZE;
 unsigned long memory_limit;
@@ -295,6 +297,12 @@ void parse_boot_command_line(void)
                if (!strcmp(param, "nokaslr"))
                        __kaslr_enabled = 0;
 
+               if (!strcmp(param, "cmma")) {
+                       rc = kstrtobool(val, &enabled);
+                       if (!rc && !enabled)
+                               cmma_flag = 0;
+               }
+
 #if IS_ENABLED(CONFIG_KVM)
                if (!strcmp(param, "prot_virt")) {
                        rc = kstrtobool(val, &enabled);
index 8826c4f1864595c96a6073940f0371d4487b4945..8104e0e3d188d8e58b3c2824672d84ea780f0d7f 100644 (file)
@@ -1,6 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/string.h>
 #include <linux/elf.h>
+#include <asm/page-states.h>
 #include <asm/boot_data.h>
 #include <asm/sections.h>
 #include <asm/maccess.h>
@@ -57,6 +58,48 @@ static void detect_facilities(void)
                machine.has_nx = 1;
 }
 
+static int cmma_test_essa(void)
+{
+       unsigned long reg1, reg2, tmp = 0;
+       int rc = 1;
+       psw_t old;
+
+       /* Test ESSA_GET_STATE */
+       asm volatile(
+               "       mvc     0(16,%[psw_old]),0(%[psw_pgm])\n"
+               "       epsw    %[reg1],%[reg2]\n"
+               "       st      %[reg1],0(%[psw_pgm])\n"
+               "       st      %[reg2],4(%[psw_pgm])\n"
+               "       larl    %[reg1],1f\n"
+               "       stg     %[reg1],8(%[psw_pgm])\n"
+               "       .insn   rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
+               "       la      %[rc],0\n"
+               "1:     mvc     0(16,%[psw_pgm]),0(%[psw_old])\n"
+               : [reg1] "=&d" (reg1),
+                 [reg2] "=&a" (reg2),
+                 [rc] "+&d" (rc),
+                 [tmp] "=&d" (tmp),
+                 "+Q" (S390_lowcore.program_new_psw),
+                 "=Q" (old)
+               : [psw_old] "a" (&old),
+                 [psw_pgm] "a" (&S390_lowcore.program_new_psw),
+                 [cmd] "i" (ESSA_GET_STATE)
+               : "cc", "memory");
+       return rc;
+}
+
+static void cmma_init(void)
+{
+       if (!cmma_flag)
+               return;
+       if (cmma_test_essa()) {
+               cmma_flag = 0;
+               return;
+       }
+       if (test_facility(147))
+               cmma_flag = 2;
+}
+
 static void setup_lpp(void)
 {
        S390_lowcore.current_pid = 0;
@@ -306,6 +349,7 @@ void startup_kernel(void)
        setup_boot_command_line();
        parse_boot_command_line();
        detect_facilities();
+       cmma_init();
        sanitize_prot_virt_host();
        max_physmem_end = detect_max_physmem_end();
        setup_ident_map_size(max_physmem_end);
index 3075d65e112c1cac41767187ba3eaa58f390ceea..e3a4500a5a75714370285f46f8c5a97518b6a31d 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/sched/task.h>
 #include <linux/pgtable.h>
 #include <linux/kasan.h>
+#include <asm/page-states.h>
 #include <asm/pgalloc.h>
 #include <asm/facility.h>
 #include <asm/sections.h>
@@ -70,6 +71,10 @@ static void kasan_populate_shadow(void)
        crst_table_init((unsigned long *)kasan_early_shadow_pud, pud_val(pud_z));
        crst_table_init((unsigned long *)kasan_early_shadow_pmd, pmd_val(pmd_z));
        memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
+       __arch_set_page_dat(kasan_early_shadow_p4d, 1UL << CRST_ALLOC_ORDER);
+       __arch_set_page_dat(kasan_early_shadow_pud, 1UL << CRST_ALLOC_ORDER);
+       __arch_set_page_dat(kasan_early_shadow_pmd, 1UL << CRST_ALLOC_ORDER);
+       __arch_set_page_dat(kasan_early_shadow_pte, 1);
 
        /*
         * Current memory layout:
@@ -223,6 +228,7 @@ static void *boot_crst_alloc(unsigned long val)
 
        table = (unsigned long *)physmem_alloc_top_down(RR_VMEM, size, size);
        crst_table_init(table, val);
+       __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
        return table;
 }
 
@@ -238,6 +244,7 @@ static pte_t *boot_pte_alloc(void)
        if (!pte_leftover) {
                pte_leftover = (void *)physmem_alloc_top_down(RR_VMEM, PAGE_SIZE, PAGE_SIZE);
                pte = pte_leftover + _PAGE_TABLE_SIZE;
+               __arch_set_page_dat(pte, 1);
        } else {
                pte = pte_leftover;
                pte_leftover = NULL;
@@ -418,6 +425,14 @@ void setup_vmem(unsigned long asce_limit)
        unsigned long asce_bits;
        int i;
 
+       /*
+        * Mark whole memory as no-dat. This must be done before any
+        * page tables are allocated, or kernel image builtin pages
+        * are marked as dat tables.
+        */
+       for_each_physmem_online_range(i, &start, &end)
+               __arch_set_page_nodat((void *)start, (end - start) >> PAGE_SHIFT);
+
        if (asce_limit == _REGION1_SIZE) {
                asce_type = _REGION2_ENTRY_EMPTY;
                asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
@@ -429,6 +444,8 @@ void setup_vmem(unsigned long asce_limit)
 
        crst_table_init((unsigned long *)swapper_pg_dir, asce_type);
        crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
+       __arch_set_page_dat((void *)swapper_pg_dir, 1UL << CRST_ALLOC_ORDER);
+       __arch_set_page_dat((void *)invalid_pg_dir, 1UL << CRST_ALLOC_ORDER);
 
        /*
         * To allow prefixing the lowcore must be mapped with 4KB pages.
index 21b9e5290c0488d3ba9ed1db83a81c4e906f17af..01f1682a73b7614388ff6d71c35d83c998d7932c 100644 (file)
@@ -73,8 +73,6 @@ struct kprobe_ctlblk {
 void arch_remove_kprobe(struct kprobe *p);
 
 int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
-int kprobe_exceptions_notify(struct notifier_block *self,
-       unsigned long val, void *data);
 
 #define flush_insn_slot(p)     do { } while (0)
 
index 829d68e2c68582430b29e3c154cccb02df6ea130..bb1b4bef1878b2bf9359ea41943113c729c5d594 100644 (file)
@@ -11,7 +11,6 @@ typedef struct {
        cpumask_t cpu_attach_mask;
        atomic_t flush_count;
        unsigned int flush_mm;
-       struct list_head pgtable_list;
        struct list_head gmap_list;
        unsigned long gmap_asce;
        unsigned long asce;
@@ -39,7 +38,6 @@ typedef struct {
 
 #define INIT_MM_CONTEXT(name)                                             \
        .context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock),           \
-       .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
        .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
 
 #endif
index 757fe6f0d802c0b2b137e1a79946ec00c1cef002..929af18b09081a0893559aec76f2ac869bf79fff 100644 (file)
@@ -22,7 +22,6 @@ static inline int init_new_context(struct task_struct *tsk,
        unsigned long asce_type, init_entry;
 
        spin_lock_init(&mm->context.lock);
-       INIT_LIST_HEAD(&mm->context.pgtable_list);
        INIT_LIST_HEAD(&mm->context.gmap_list);
        cpumask_clear(&mm->context.cpu_attach_mask);
        atomic_set(&mm->context.flush_count, 0);
index c33c4deb545ffda9078bd6d1f5bb22804488060f..08fcbd6281208ef89151f4dbcd41737270b9a56e 100644 (file)
@@ -7,6 +7,9 @@
 #ifndef PAGE_STATES_H
 #define PAGE_STATES_H
 
+#include <asm/sections.h>
+#include <asm/page.h>
+
 #define ESSA_GET_STATE                 0
 #define ESSA_SET_STABLE                        1
 #define ESSA_SET_UNUSED                        2
 
 #define ESSA_MAX       ESSA_SET_STABLE_NODAT
 
+extern int __bootdata_preserved(cmma_flag);
+
+static __always_inline unsigned long essa(unsigned long paddr, unsigned char cmd)
+{
+       unsigned long rc;
+
+       asm volatile(
+               "       .insn   rrf,0xb9ab0000,%[rc],%[paddr],%[cmd],0"
+               : [rc] "=d" (rc)
+               : [paddr] "d" (paddr),
+                 [cmd] "i" (cmd));
+       return rc;
+}
+
+static __always_inline void __set_page_state(void *addr, unsigned long num_pages, unsigned char cmd)
+{
+       unsigned long paddr = __pa(addr) & PAGE_MASK;
+
+       while (num_pages--) {
+               essa(paddr, cmd);
+               paddr += PAGE_SIZE;
+       }
+}
+
+static inline void __set_page_unused(void *addr, unsigned long num_pages)
+{
+       __set_page_state(addr, num_pages, ESSA_SET_UNUSED);
+}
+
+static inline void __set_page_stable_dat(void *addr, unsigned long num_pages)
+{
+       __set_page_state(addr, num_pages, ESSA_SET_STABLE);
+}
+
+static inline void __set_page_stable_nodat(void *addr, unsigned long num_pages)
+{
+       __set_page_state(addr, num_pages, ESSA_SET_STABLE_NODAT);
+}
+
+static inline void __arch_set_page_nodat(void *addr, unsigned long num_pages)
+{
+       if (!cmma_flag)
+               return;
+       if (cmma_flag < 2)
+               __set_page_stable_dat(addr, num_pages);
+       else
+               __set_page_stable_nodat(addr, num_pages);
+}
+
+static inline void __arch_set_page_dat(void *addr, unsigned long num_pages)
+{
+       if (!cmma_flag)
+               return;
+       __set_page_stable_dat(addr, num_pages);
+}
+
 #endif
index cfec0743314eaa507999c36fe346d84fdbe0f95d..73b9c3bf377f886411b687992024217e4371e08b 100644 (file)
@@ -164,7 +164,6 @@ static inline int page_reset_referenced(unsigned long addr)
 struct page;
 void arch_free_page(struct page *page, int order);
 void arch_alloc_page(struct page *page, int order);
-void arch_set_page_dat(struct page *page, int order);
 
 static inline int devmem_is_allowed(unsigned long pfn)
 {
index b248694e00247b57b5be3338535038e9910335d1..e91cd6bbc330d960996eb0b0f9148e0a818ba3ef 100644 (file)
@@ -159,13 +159,6 @@ struct zpci_dev {
        unsigned long   *dma_table;
        int             tlb_refresh;
 
-       spinlock_t      iommu_bitmap_lock;
-       unsigned long   *iommu_bitmap;
-       unsigned long   *lazy_bitmap;
-       unsigned long   iommu_size;
-       unsigned long   iommu_pages;
-       unsigned int    next_bit;
-
        struct iommu_device iommu_dev;  /* IOMMU core handle */
 
        char res_name[16];
@@ -180,10 +173,6 @@ struct zpci_dev {
        struct zpci_fmb *fmb;
        u16             fmb_update;     /* update interval */
        u16             fmb_length;
-       /* software counters */
-       atomic64_t allocated_pages;
-       atomic64_t mapped_pages;
-       atomic64_t unmapped_pages;
 
        u8              version;
        enum pci_bus_speed max_bus_speed;
index d6189ed14f84874ac1f135d1480377d64f6f7383..f0c677ddd270606df61e7fd6ccd9b6c17f89f6b9 100644 (file)
@@ -50,6 +50,9 @@ struct clp_fh_list_entry {
 #define CLP_UTIL_STR_LEN       64
 #define CLP_PFIP_NR_SEGMENTS   4
 
+/* PCI function type numbers */
+#define PCI_FUNC_TYPE_ISM      0x5     /* ISM device */
+
 extern bool zpci_unique_uid;
 
 struct clp_rsp_slpc_pci {
index 7119c04c51c5c864677de1ed928b33a8b74b6d74..42d7cc4262ca48d1368cc31ab804f07bf558a7b7 100644 (file)
@@ -82,117 +82,16 @@ enum zpci_ioat_dtype {
 #define ZPCI_TABLE_VALID_MASK          0x20
 #define ZPCI_TABLE_PROT_MASK           0x200
 
-static inline unsigned int calc_rtx(dma_addr_t ptr)
-{
-       return ((unsigned long) ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
-}
-
-static inline unsigned int calc_sx(dma_addr_t ptr)
-{
-       return ((unsigned long) ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK;
-}
-
-static inline unsigned int calc_px(dma_addr_t ptr)
-{
-       return ((unsigned long) ptr >> PAGE_SHIFT) & ZPCI_PT_MASK;
-}
-
-static inline void set_pt_pfaa(unsigned long *entry, phys_addr_t pfaa)
-{
-       *entry &= ZPCI_PTE_FLAG_MASK;
-       *entry |= (pfaa & ZPCI_PTE_ADDR_MASK);
-}
-
-static inline void set_rt_sto(unsigned long *entry, phys_addr_t sto)
-{
-       *entry &= ZPCI_RTE_FLAG_MASK;
-       *entry |= (sto & ZPCI_RTE_ADDR_MASK);
-       *entry |= ZPCI_TABLE_TYPE_RTX;
-}
-
-static inline void set_st_pto(unsigned long *entry, phys_addr_t pto)
-{
-       *entry &= ZPCI_STE_FLAG_MASK;
-       *entry |= (pto & ZPCI_STE_ADDR_MASK);
-       *entry |= ZPCI_TABLE_TYPE_SX;
-}
-
-static inline void validate_rt_entry(unsigned long *entry)
-{
-       *entry &= ~ZPCI_TABLE_VALID_MASK;
-       *entry &= ~ZPCI_TABLE_OFFSET_MASK;
-       *entry |= ZPCI_TABLE_VALID;
-       *entry |= ZPCI_TABLE_LEN_RTX;
-}
-
-static inline void validate_st_entry(unsigned long *entry)
-{
-       *entry &= ~ZPCI_TABLE_VALID_MASK;
-       *entry |= ZPCI_TABLE_VALID;
-}
-
-static inline void invalidate_pt_entry(unsigned long *entry)
-{
-       WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_INVALID);
-       *entry &= ~ZPCI_PTE_VALID_MASK;
-       *entry |= ZPCI_PTE_INVALID;
-}
-
-static inline void validate_pt_entry(unsigned long *entry)
-{
-       WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID);
-       *entry &= ~ZPCI_PTE_VALID_MASK;
-       *entry |= ZPCI_PTE_VALID;
-}
-
-static inline void entry_set_protected(unsigned long *entry)
-{
-       *entry &= ~ZPCI_TABLE_PROT_MASK;
-       *entry |= ZPCI_TABLE_PROTECTED;
-}
-
-static inline void entry_clr_protected(unsigned long *entry)
-{
-       *entry &= ~ZPCI_TABLE_PROT_MASK;
-       *entry |= ZPCI_TABLE_UNPROTECTED;
-}
-
-static inline int reg_entry_isvalid(unsigned long entry)
-{
-       return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID;
-}
-
-static inline int pt_entry_isvalid(unsigned long entry)
-{
-       return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
-}
-
-static inline unsigned long *get_rt_sto(unsigned long entry)
-{
-       if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
-               return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK);
-       else
-               return NULL;
-
-}
-
-static inline unsigned long *get_st_pto(unsigned long entry)
-{
-       if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX)
-               return phys_to_virt(entry & ZPCI_STE_ADDR_MASK);
-       else
-               return NULL;
-}
-
-/* Prototypes */
-void dma_free_seg_table(unsigned long);
-unsigned long *dma_alloc_cpu_table(gfp_t gfp);
-void dma_cleanup_tables(unsigned long *);
-unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr,
-                                 gfp_t gfp);
-void dma_update_cpu_trans(unsigned long *entry, phys_addr_t page_addr, int flags);
-
-extern const struct dma_map_ops s390_pci_dma_ops;
+struct zpci_iommu_ctrs {
+       atomic64_t              mapped_pages;
+       atomic64_t              unmapped_pages;
+       atomic64_t              global_rpcits;
+       atomic64_t              sync_map_rpcits;
+       atomic64_t              sync_rpcits;
+};
+
+struct zpci_dev;
 
+struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev);
 
 #endif
index 376b4b23bdaa349dff7065b8b64295302b046c09..502d655fe6ae6650dd24fded14215f903c0311e3 100644 (file)
@@ -25,7 +25,6 @@ void crst_table_free(struct mm_struct *, unsigned long *);
 unsigned long *page_table_alloc(struct mm_struct *);
 struct page *page_table_alloc_pgste(struct mm_struct *mm);
 void page_table_free(struct mm_struct *, unsigned long *);
-void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
 void page_table_free_pgste(struct page *page);
 extern int page_table_allocate_pgste;
 
index 25cadc2b9cff277e46ec688bd20b288fb095f77e..df316436d2e140a4c4e04ef19f938e7e194d0b2c 100644 (file)
@@ -125,9 +125,6 @@ static inline void vmcp_cma_reserve(void) { }
 
 void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault);
 
-void cmma_init(void);
-void cmma_init_nodat(void);
-
 extern void (*_machine_restart)(char *command);
 extern void (*_machine_halt)(void);
 extern void (*_machine_power_off)(void);
index 78f7b729b65f105efbb6dd6dfeb56fd97552b05b..31ec4f545e036a26bfc0d8cc7fb8163fdd2c93a5 100644 (file)
@@ -6,6 +6,13 @@
 #include <linux/ptrace.h>
 #include <asm/switch_to.h>
 
+struct stack_frame_user {
+       unsigned long back_chain;
+       unsigned long empty1[5];
+       unsigned long gprs[10];
+       unsigned long empty2[4];
+};
+
 enum stack_type {
        STACK_TYPE_UNKNOWN,
        STACK_TYPE_TASK,
index 383b1f91442c997b21af066a287318020a88dea2..d1455a601adcad03a6bd7ec2f467cec47ed55bd8 100644 (file)
@@ -69,12 +69,9 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
        tlb->mm->context.flush_mm = 1;
        tlb->freed_tables = 1;
        tlb->cleared_pmds = 1;
-       /*
-        * page_table_free_rcu takes care of the allocation bit masks
-        * of the 2K table fragments in the 4K page table page,
-        * then calls tlb_remove_table.
-        */
-       page_table_free_rcu(tlb, (unsigned long *) pte, address);
+       if (mm_alloc_pgste(tlb->mm))
+               gmap_unlink(tlb->mm, (unsigned long *)pte, address);
+       tlb_remove_ptdesc(tlb, pte);
 }
 
 /*
@@ -112,7 +109,7 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
        __tlb_adjust_range(tlb, address, PAGE_SIZE);
        tlb->mm->context.flush_mm = 1;
        tlb->freed_tables = 1;
-       tlb_remove_table(tlb, p4d);
+       tlb_remove_ptdesc(tlb, p4d);
 }
 
 /*
@@ -130,7 +127,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
        tlb->mm->context.flush_mm = 1;
        tlb->freed_tables = 1;
        tlb->cleared_p4ds = 1;
-       tlb_remove_table(tlb, pud);
+       tlb_remove_ptdesc(tlb, pud);
 }
 
 
index ff1f02b54771cbe26a8c3c45b75def84a0e157f7..eb43e5922a25d75489d7fe1cfe715449ac55c0c8 100644 (file)
@@ -46,6 +46,7 @@ decompressor_handled_param(vmalloc);
 decompressor_handled_param(dfltcc);
 decompressor_handled_param(facilities);
 decompressor_handled_param(nokaslr);
+decompressor_handled_param(cmma);
 #if IS_ENABLED(CONFIG_KVM)
 decompressor_handled_param(prot_virt);
 #endif
index c27321cb0969fbe6b3c6e9ec8a99db344000285e..dfa77da2fd2ec5413b6358c853f5a8532d250419 100644 (file)
 #include <linux/export.h>
 #include <linux/seq_file.h>
 #include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/compat.h>
 #include <linux/sysfs.h>
+#include <asm/stacktrace.h>
 #include <asm/irq.h>
 #include <asm/cpu_mf.h>
 #include <asm/lowcore.h>
@@ -212,6 +215,44 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
        }
 }
 
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+                        struct pt_regs *regs)
+{
+       struct stack_frame_user __user *sf;
+       unsigned long ip, sp;
+       bool first = true;
+
+       if (is_compat_task())
+               return;
+       perf_callchain_store(entry, instruction_pointer(regs));
+       sf = (void __user *)user_stack_pointer(regs);
+       pagefault_disable();
+       while (entry->nr < entry->max_stack) {
+               if (__get_user(sp, &sf->back_chain))
+                       break;
+               if (__get_user(ip, &sf->gprs[8]))
+                       break;
+               if (ip & 0x1) {
+                       /*
+                        * If the instruction address is invalid, and this
+                        * is the first stack frame, assume r14 has not
+                        * been written to the stack yet. Otherwise exit.
+                        */
+                       if (first && !(regs->gprs[14] & 0x1))
+                               ip = regs->gprs[14];
+                       else
+                               break;
+               }
+               perf_callchain_store(entry, ip);
+               /* Sanity check: ABI requires SP to be aligned 8 bytes. */
+               if (!sp || sp & 0x7)
+                       break;
+               sf = (void __user *)sp;
+               first = false;
+       }
+       pagefault_enable();
+}
+
 /* Perf definitions for PMU event attributes in sysfs */
 ssize_t cpumf_events_sysfs_show(struct device *dev,
                                struct device_attribute *attr, char *page)
index 0787010139f7769aa8c98020d5d69c5fa720247d..94f440e38303191d64097f12a274cb46e5a10e2e 100644 (file)
@@ -6,9 +6,12 @@
  */
 
 #include <linux/stacktrace.h>
+#include <linux/uaccess.h>
+#include <linux/compat.h>
 #include <asm/stacktrace.h>
 #include <asm/unwind.h>
 #include <asm/kprobes.h>
+#include <asm/ptrace.h>
 
 void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
                     struct task_struct *task, struct pt_regs *regs)
@@ -58,3 +61,43 @@ int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
                return -EINVAL;
        return 0;
 }
+
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+                         const struct pt_regs *regs)
+{
+       struct stack_frame_user __user *sf;
+       unsigned long ip, sp;
+       bool first = true;
+
+       if (is_compat_task())
+               return;
+       if (!consume_entry(cookie, instruction_pointer(regs)))
+               return;
+       sf = (void __user *)user_stack_pointer(regs);
+       pagefault_disable();
+       while (1) {
+               if (__get_user(sp, &sf->back_chain))
+                       break;
+               if (__get_user(ip, &sf->gprs[8]))
+                       break;
+               if (ip & 0x1) {
+                       /*
+                        * If the instruction address is invalid, and this
+                        * is the first stack frame, assume r14 has not
+                        * been written to the stack yet. Otherwise exit.
+                        */
+                       if (first && !(regs->gprs[14] & 0x1))
+                               ip = regs->gprs[14];
+                       else
+                               break;
+               }
+               if (!consume_entry(cookie, ip))
+                       break;
+               /* Sanity check: ABI requires SP to be aligned 8 bytes. */
+               if (!sp || sp & 0x7)
+                       break;
+               sf = (void __user *)sp;
+               first = false;
+       }
+       pagefault_enable();
+}
index 20786f6883b299f1af02411b7e8dc2151e762433..6f96b5a71c6383d07eb447cb80df70214bdd1910 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/ksm.h>
 #include <linux/mman.h>
 #include <linux/pgtable.h>
-
+#include <asm/page-states.h>
 #include <asm/pgalloc.h>
 #include <asm/gmap.h>
 #include <asm/page.h>
@@ -33,7 +33,7 @@ static struct page *gmap_alloc_crst(void)
        page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
        if (!page)
                return NULL;
-       arch_set_page_dat(page, CRST_ALLOC_ORDER);
+       __arch_set_page_dat(page_to_virt(page), 1UL << CRST_ALLOC_ORDER);
        return page;
 }
 
index 7eca10c32caa5bab87bc822a21b71b91fce32ef0..43e612bc2bcd34524a08903c8e07710492da3fb8 100644 (file)
@@ -164,14 +164,10 @@ void __init mem_init(void)
 
        pv_init();
        kfence_split_mapping();
-       /* Setup guest page hinting */
-       cmma_init();
 
        /* this will put all low memory onto the freelists */
        memblock_free_all();
        setup_zero_pages();     /* Setup zeroed pages. */
-
-       cmma_init_nodat();
 }
 
 void free_initmem(void)
index a31acb2c4ef24c387ed5536457de5a61a03fef4a..01f9b39e65f5b05376f7dce1471b05aae00df7b7 100644 (file)
  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  */
 
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/types.h>
 #include <linux/mm.h>
-#include <linux/memblock.h>
-#include <linux/gfp.h>
-#include <linux/init.h>
-#include <asm/asm-extable.h>
-#include <asm/facility.h>
 #include <asm/page-states.h>
+#include <asm/sections.h>
+#include <asm/page.h>
 
-static int cmma_flag = 1;
-
-static int __init cmma(char *str)
-{
-       bool enabled;
-
-       if (!kstrtobool(str, &enabled))
-               cmma_flag = enabled;
-       return 1;
-}
-__setup("cmma=", cmma);
-
-static inline int cmma_test_essa(void)
-{
-       unsigned long tmp = 0;
-       int rc = -EOPNOTSUPP;
-
-       /* test ESSA_GET_STATE */
-       asm volatile(
-               "       .insn   rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n"
-               "0:     la      %[rc],0\n"
-               "1:\n"
-               EX_TABLE(0b,1b)
-               : [rc] "+&d" (rc), [tmp] "+&d" (tmp)
-               : [cmd] "i" (ESSA_GET_STATE));
-       return rc;
-}
-
-void __init cmma_init(void)
-{
-       if (!cmma_flag)
-               return;
-       if (cmma_test_essa()) {
-               cmma_flag = 0;
-               return;
-       }
-       if (test_facility(147))
-               cmma_flag = 2;
-}
-
-static inline void set_page_unused(struct page *page, int order)
-{
-       int i, rc;
-
-       for (i = 0; i < (1 << order); i++)
-               asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
-                            : "=&d" (rc)
-                            : "a" (page_to_phys(page + i)),
-                              "i" (ESSA_SET_UNUSED));
-}
-
-static inline void set_page_stable_dat(struct page *page, int order)
-{
-       int i, rc;
-
-       for (i = 0; i < (1 << order); i++)
-               asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
-                            : "=&d" (rc)
-                            : "a" (page_to_phys(page + i)),
-                              "i" (ESSA_SET_STABLE));
-}
-
-static inline void set_page_stable_nodat(struct page *page, int order)
-{
-       int i, rc;
-
-       for (i = 0; i < (1 << order); i++)
-               asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
-                            : "=&d" (rc)
-                            : "a" (page_to_phys(page + i)),
-                              "i" (ESSA_SET_STABLE_NODAT));
-}
-
-static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end)
-{
-       unsigned long next;
-       struct page *page;
-       pmd_t *pmd;
-
-       pmd = pmd_offset(pud, addr);
-       do {
-               next = pmd_addr_end(addr, end);
-               if (pmd_none(*pmd) || pmd_large(*pmd))
-                       continue;
-               page = phys_to_page(pmd_val(*pmd));
-               set_bit(PG_arch_1, &page->flags);
-       } while (pmd++, addr = next, addr != end);
-}
-
-static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
-{
-       unsigned long next;
-       struct page *page;
-       pud_t *pud;
-       int i;
-
-       pud = pud_offset(p4d, addr);
-       do {
-               next = pud_addr_end(addr, end);
-               if (pud_none(*pud) || pud_large(*pud))
-                       continue;
-               if (!pud_folded(*pud)) {
-                       page = phys_to_page(pud_val(*pud));
-                       for (i = 0; i < 4; i++)
-                               set_bit(PG_arch_1, &page[i].flags);
-               }
-               mark_kernel_pmd(pud, addr, next);
-       } while (pud++, addr = next, addr != end);
-}
-
-static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
-{
-       unsigned long next;
-       struct page *page;
-       p4d_t *p4d;
-       int i;
-
-       p4d = p4d_offset(pgd, addr);
-       do {
-               next = p4d_addr_end(addr, end);
-               if (p4d_none(*p4d))
-                       continue;
-               if (!p4d_folded(*p4d)) {
-                       page = phys_to_page(p4d_val(*p4d));
-                       for (i = 0; i < 4; i++)
-                               set_bit(PG_arch_1, &page[i].flags);
-               }
-               mark_kernel_pud(p4d, addr, next);
-       } while (p4d++, addr = next, addr != end);
-}
-
-static void mark_kernel_pgd(void)
-{
-       unsigned long addr, next, max_addr;
-       struct page *page;
-       pgd_t *pgd;
-       int i;
-
-       addr = 0;
-       /*
-        * Figure out maximum virtual address accessible with the
-        * kernel ASCE. This is required to keep the page table walker
-        * from accessing non-existent entries.
-        */
-       max_addr = (S390_lowcore.kernel_asce.val & _ASCE_TYPE_MASK) >> 2;
-       max_addr = 1UL << (max_addr * 11 + 31);
-       pgd = pgd_offset_k(addr);
-       do {
-               next = pgd_addr_end(addr, max_addr);
-               if (pgd_none(*pgd))
-                       continue;
-               if (!pgd_folded(*pgd)) {
-                       page = phys_to_page(pgd_val(*pgd));
-                       for (i = 0; i < 4; i++)
-                               set_bit(PG_arch_1, &page[i].flags);
-               }
-               mark_kernel_p4d(pgd, addr, next);
-       } while (pgd++, addr = next, addr != max_addr);
-}
-
-void __init cmma_init_nodat(void)
-{
-       struct page *page;
-       unsigned long start, end, ix;
-       int i;
-
-       if (cmma_flag < 2)
-               return;
-       /* Mark pages used in kernel page tables */
-       mark_kernel_pgd();
-       page = virt_to_page(&swapper_pg_dir);
-       for (i = 0; i < 4; i++)
-               set_bit(PG_arch_1, &page[i].flags);
-       page = virt_to_page(&invalid_pg_dir);
-       for (i = 0; i < 4; i++)
-               set_bit(PG_arch_1, &page[i].flags);
-
-       /* Set all kernel pages not used for page tables to stable/no-dat */
-       for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
-               page = pfn_to_page(start);
-               for (ix = start; ix < end; ix++, page++) {
-                       if (__test_and_clear_bit(PG_arch_1, &page->flags))
-                               continue;       /* skip page table pages */
-                       if (!list_empty(&page->lru))
-                               continue;       /* skip free pages */
-                       set_page_stable_nodat(page, 0);
-               }
-       }
-}
+int __bootdata_preserved(cmma_flag);
 
 void arch_free_page(struct page *page, int order)
 {
        if (!cmma_flag)
                return;
-       set_page_unused(page, order);
+       __set_page_unused(page_to_virt(page), 1UL << order);
 }
 
 void arch_alloc_page(struct page *page, int order)
@@ -220,14 +26,7 @@ void arch_alloc_page(struct page *page, int order)
        if (!cmma_flag)
                return;
        if (cmma_flag < 2)
-               set_page_stable_dat(page, order);
+               __set_page_stable_dat(page_to_virt(page), 1UL << order);
        else
-               set_page_stable_nodat(page, order);
-}
-
-void arch_set_page_dat(struct page *page, int order)
-{
-       if (!cmma_flag)
-               return;
-       set_page_stable_dat(page, order);
+               __set_page_stable_nodat(page_to_virt(page), 1UL << order);
 }
index 5488ae17318ee314698ab16b7ce3a368fd8e8f0d..008e487c94a631aa72615bfde3081717e4ffa876 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/slab.h>
 #include <linux/mm.h>
 #include <asm/mmu_context.h>
+#include <asm/page-states.h>
 #include <asm/pgalloc.h>
 #include <asm/gmap.h>
 #include <asm/tlb.h>
@@ -43,11 +44,13 @@ __initcall(page_table_register_sysctl);
 unsigned long *crst_table_alloc(struct mm_struct *mm)
 {
        struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
+       unsigned long *table;
 
        if (!ptdesc)
                return NULL;
-       arch_set_page_dat(ptdesc_page(ptdesc), CRST_ALLOC_ORDER);
-       return (unsigned long *) ptdesc_to_virt(ptdesc);
+       table = ptdesc_to_virt(ptdesc);
+       __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
+       return table;
 }
 
 void crst_table_free(struct mm_struct *mm, unsigned long *table)
@@ -130,11 +133,6 @@ err_p4d:
        return -ENOMEM;
 }
 
-static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
-{
-       return atomic_fetch_xor(bits, v) ^ bits;
-}
-
 #ifdef CONFIG_PGSTE
 
 struct page *page_table_alloc_pgste(struct mm_struct *mm)
@@ -145,7 +143,7 @@ struct page *page_table_alloc_pgste(struct mm_struct *mm)
        ptdesc = pagetable_alloc(GFP_KERNEL, 0);
        if (ptdesc) {
                table = (u64 *)ptdesc_to_virt(ptdesc);
-               arch_set_page_dat(virt_to_page(table), 0);
+               __arch_set_page_dat(table, 1);
                memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
                memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
        }
@@ -159,125 +157,11 @@ void page_table_free_pgste(struct page *page)
 
 #endif /* CONFIG_PGSTE */
 
-/*
- * A 2KB-pgtable is either upper or lower half of a normal page.
- * The second half of the page may be unused or used as another
- * 2KB-pgtable.
- *
- * Whenever possible the parent page for a new 2KB-pgtable is picked
- * from the list of partially allocated pages mm_context_t::pgtable_list.
- * In case the list is empty a new parent page is allocated and added to
- * the list.
- *
- * When a parent page gets fully allocated it contains 2KB-pgtables in both
- * upper and lower halves and is removed from mm_context_t::pgtable_list.
- *
- * When 2KB-pgtable is freed from to fully allocated parent page that
- * page turns partially allocated and added to mm_context_t::pgtable_list.
- *
- * If 2KB-pgtable is freed from the partially allocated parent page that
- * page turns unused and gets removed from mm_context_t::pgtable_list.
- * Furthermore, the unused parent page is released.
- *
- * As follows from the above, no unallocated or fully allocated parent
- * pages are contained in mm_context_t::pgtable_list.
- *
- * The upper byte (bits 24-31) of the parent page _refcount is used
- * for tracking contained 2KB-pgtables and has the following format:
- *
- *   PP  AA
- * 01234567    upper byte (bits 24-31) of struct page::_refcount
- *   ||  ||
- *   ||  |+--- upper 2KB-pgtable is allocated
- *   ||  +---- lower 2KB-pgtable is allocated
- *   |+------- upper 2KB-pgtable is pending for removal
- *   +-------- lower 2KB-pgtable is pending for removal
- *
- * (See commit 620b4e903179 ("s390: use _refcount for pgtables") on why
- * using _refcount is possible).
- *
- * When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
- * The parent page is either:
- *   - added to mm_context_t::pgtable_list in case the second half of the
- *     parent page is still unallocated;
- *   - removed from mm_context_t::pgtable_list in case both hales of the
- *     parent page are allocated;
- * These operations are protected with mm_context_t::lock.
- *
- * When 2KB-pgtable is deallocated the corresponding AA bit is set to 0
- * and the corresponding PP bit is set to 1 in a single atomic operation.
- * Thus, PP and AA bits corresponding to the same 2KB-pgtable are mutually
- * exclusive and may never be both set to 1!
- * The parent page is either:
- *   - added to mm_context_t::pgtable_list in case the second half of the
- *     parent page is still allocated;
- *   - removed from mm_context_t::pgtable_list in case the second half of
- *     the parent page is unallocated;
- * These operations are protected with mm_context_t::lock.
- *
- * It is important to understand that mm_context_t::lock only protects
- * mm_context_t::pgtable_list and AA bits, but not the parent page itself
- * and PP bits.
- *
- * Releasing the parent page happens whenever the PP bit turns from 1 to 0,
- * while both AA bits and the second PP bit are already unset. Then the
- * parent page does not contain any 2KB-pgtable fragment anymore, and it has
- * also been removed from mm_context_t::pgtable_list. It is safe to release
- * the page therefore.
- *
- * PGSTE memory spaces use full 4KB-pgtables and do not need most of the
- * logic described above. Both AA bits are set to 1 to denote a 4KB-pgtable
- * while the PP bits are never used, nor such a page is added to or removed
- * from mm_context_t::pgtable_list.
- *
- * pte_free_defer() overrides those rules: it takes the page off pgtable_list,
- * and prevents both 2K fragments from being reused. pte_free_defer() has to
- * guarantee that its pgtable cannot be reused before the RCU grace period
- * has elapsed (which page_table_free_rcu() does not actually guarantee).
- * But for simplicity, because page->rcu_head overlays page->lru, and because
- * the RCU callback might not be called before the mm_context_t has been freed,
- * pte_free_defer() in this implementation prevents both fragments from being
- * reused, and delays making the call to RCU until both fragments are freed.
- */
 unsigned long *page_table_alloc(struct mm_struct *mm)
 {
-       unsigned long *table;
        struct ptdesc *ptdesc;
-       unsigned int mask, bit;
-
-       /* Try to get a fragment of a 4K page as a 2K page table */
-       if (!mm_alloc_pgste(mm)) {
-               table = NULL;
-               spin_lock_bh(&mm->context.lock);
-               if (!list_empty(&mm->context.pgtable_list)) {
-                       ptdesc = list_first_entry(&mm->context.pgtable_list,
-                                               struct ptdesc, pt_list);
-                       mask = atomic_read(&ptdesc->_refcount) >> 24;
-                       /*
-                        * The pending removal bits must also be checked.
-                        * Failure to do so might lead to an impossible
-                        * value of (i.e 0x13 or 0x23) written to _refcount.
-                        * Such values violate the assumption that pending and
-                        * allocation bits are mutually exclusive, and the rest
-                        * of the code unrails as result. That could lead to
-                        * a whole bunch of races and corruptions.
-                        */
-                       mask = (mask | (mask >> 4)) & 0x03U;
-                       if (mask != 0x03U) {
-                               table = (unsigned long *) ptdesc_to_virt(ptdesc);
-                               bit = mask & 1;         /* =1 -> second 2K */
-                               if (bit)
-                                       table += PTRS_PER_PTE;
-                               atomic_xor_bits(&ptdesc->_refcount,
-                                                       0x01U << (bit + 24));
-                               list_del_init(&ptdesc->pt_list);
-                       }
-               }
-               spin_unlock_bh(&mm->context.lock);
-               if (table)
-                       return table;
-       }
-       /* Allocate a fresh page */
+       unsigned long *table;
+
        ptdesc = pagetable_alloc(GFP_KERNEL, 0);
        if (!ptdesc)
                return NULL;
@@ -285,177 +169,57 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
                pagetable_free(ptdesc);
                return NULL;
        }
-       arch_set_page_dat(ptdesc_page(ptdesc), 0);
-       /* Initialize page table */
-       table = (unsigned long *) ptdesc_to_virt(ptdesc);
-       if (mm_alloc_pgste(mm)) {
-               /* Return 4K page table with PGSTEs */
-               INIT_LIST_HEAD(&ptdesc->pt_list);
-               atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
-               memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
-               memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
-       } else {
-               /* Return the first 2K fragment of the page */
-               atomic_xor_bits(&ptdesc->_refcount, 0x01U << 24);
-               memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
-               spin_lock_bh(&mm->context.lock);
-               list_add(&ptdesc->pt_list, &mm->context.pgtable_list);
-               spin_unlock_bh(&mm->context.lock);
-       }
+       table = ptdesc_to_virt(ptdesc);
+       __arch_set_page_dat(table, 1);
+       /* pt_list is used by gmap only */
+       INIT_LIST_HEAD(&ptdesc->pt_list);
+       memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
+       memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
        return table;
 }
 
-static void page_table_release_check(struct page *page, void *table,
-                                    unsigned int half, unsigned int mask)
+static void pagetable_pte_dtor_free(struct ptdesc *ptdesc)
 {
-       char msg[128];
-
-       if (!IS_ENABLED(CONFIG_DEBUG_VM))
-               return;
-       if (!mask && list_empty(&page->lru))
-               return;
-       snprintf(msg, sizeof(msg),
-                "Invalid pgtable %p release half 0x%02x mask 0x%02x",
-                table, half, mask);
-       dump_page(page, msg);
-}
-
-static void pte_free_now(struct rcu_head *head)
-{
-       struct ptdesc *ptdesc;
-
-       ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
        pagetable_pte_dtor(ptdesc);
        pagetable_free(ptdesc);
 }
 
 void page_table_free(struct mm_struct *mm, unsigned long *table)
 {
-       unsigned int mask, bit, half;
        struct ptdesc *ptdesc = virt_to_ptdesc(table);
 
-       if (!mm_alloc_pgste(mm)) {
-               /* Free 2K page table fragment of a 4K page */
-               bit = ((unsigned long) table & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
-               spin_lock_bh(&mm->context.lock);
-               /*
-                * Mark the page for delayed release. The actual release
-                * will happen outside of the critical section from this
-                * function or from __tlb_remove_table()
-                */
-               mask = atomic_xor_bits(&ptdesc->_refcount, 0x11U << (bit + 24));
-               mask >>= 24;
-               if ((mask & 0x03U) && !folio_test_active(ptdesc_folio(ptdesc))) {
-                       /*
-                        * Other half is allocated, and neither half has had
-                        * its free deferred: add page to head of list, to make
-                        * this freed half available for immediate reuse.
-                        */
-                       list_add(&ptdesc->pt_list, &mm->context.pgtable_list);
-               } else {
-                       /* If page is on list, now remove it. */
-                       list_del_init(&ptdesc->pt_list);
-               }
-               spin_unlock_bh(&mm->context.lock);
-               mask = atomic_xor_bits(&ptdesc->_refcount, 0x10U << (bit + 24));
-               mask >>= 24;
-               if (mask != 0x00U)
-                       return;
-               half = 0x01U << bit;
-       } else {
-               half = 0x03U;
-               mask = atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
-               mask >>= 24;
-       }
-
-       page_table_release_check(ptdesc_page(ptdesc), table, half, mask);
-       if (folio_test_clear_active(ptdesc_folio(ptdesc)))
-               call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
-       else
-               pte_free_now(&ptdesc->pt_rcu_head);
+       pagetable_pte_dtor_free(ptdesc);
 }
 
-void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
-                        unsigned long vmaddr)
+void __tlb_remove_table(void *table)
 {
-       struct mm_struct *mm;
-       unsigned int bit, mask;
        struct ptdesc *ptdesc = virt_to_ptdesc(table);
+       struct page *page = ptdesc_page(ptdesc);
 
-       mm = tlb->mm;
-       if (mm_alloc_pgste(mm)) {
-               gmap_unlink(mm, table, vmaddr);
-               table = (unsigned long *) ((unsigned long)table | 0x03U);
-               tlb_remove_ptdesc(tlb, table);
+       if (compound_order(page) == CRST_ALLOC_ORDER) {
+               /* pmd, pud, or p4d */
+               pagetable_free(ptdesc);
                return;
        }
-       bit = ((unsigned long) table & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
-       spin_lock_bh(&mm->context.lock);
-       /*
-        * Mark the page for delayed release. The actual release will happen
-        * outside of the critical section from __tlb_remove_table() or from
-        * page_table_free()
-        */
-       mask = atomic_xor_bits(&ptdesc->_refcount, 0x11U << (bit + 24));
-       mask >>= 24;
-       if ((mask & 0x03U) && !folio_test_active(ptdesc_folio(ptdesc))) {
-               /*
-                * Other half is allocated, and neither half has had
-                * its free deferred: add page to end of list, to make
-                * this freed half available for reuse once its pending
-                * bit has been cleared by __tlb_remove_table().
-                */
-               list_add_tail(&ptdesc->pt_list, &mm->context.pgtable_list);
-       } else {
-               /* If page is on list, now remove it. */
-               list_del_init(&ptdesc->pt_list);
-       }
-       spin_unlock_bh(&mm->context.lock);
-       table = (unsigned long *) ((unsigned long) table | (0x01U << bit));
-       tlb_remove_table(tlb, table);
+       pagetable_pte_dtor_free(ptdesc);
 }
 
-void __tlb_remove_table(void *_table)
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static void pte_free_now(struct rcu_head *head)
 {
-       unsigned int mask = (unsigned long) _table & 0x03U, half = mask;
-       void *table = (void *)((unsigned long) _table ^ mask);
-       struct ptdesc *ptdesc = virt_to_ptdesc(table);
+       struct ptdesc *ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
 
-       switch (half) {
-       case 0x00U:     /* pmd, pud, or p4d */
-               pagetable_free(ptdesc);
-               return;
-       case 0x01U:     /* lower 2K of a 4K page table */
-       case 0x02U:     /* higher 2K of a 4K page table */
-               mask = atomic_xor_bits(&ptdesc->_refcount, mask << (4 + 24));
-               mask >>= 24;
-               if (mask != 0x00U)
-                       return;
-               break;
-       case 0x03U:     /* 4K page table with pgstes */
-               mask = atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24);
-               mask >>= 24;
-               break;
-       }
-
-       page_table_release_check(ptdesc_page(ptdesc), table, half, mask);
-       if (folio_test_clear_active(ptdesc_folio(ptdesc)))
-               call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
-       else
-               pte_free_now(&ptdesc->pt_rcu_head);
+       pagetable_pte_dtor_free(ptdesc);
 }
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
 {
-       struct page *page;
+       struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
 
-       page = virt_to_page(pgtable);
-       SetPageActive(page);
-       page_table_free(mm, (unsigned long *)pgtable);
+       call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
        /*
-        * page_table_free() does not do the pgste gmap_unlink() which
-        * page_table_free_rcu() does: warn us if pgste ever reaches here.
+        * THPs are not allowed for KVM guests. Warn if pgste ever reaches here.
+        * Turn to the generic pte_free_defer() version once gmap is removed.
         */
        WARN_ON_ONCE(mm_has_pgste(mm));
 }
index 2e8a1064f103ace6d861016aa521e16239dd9738..186a020857cf6a6ebe3b9a513d01bfd8eba233cb 100644 (file)
@@ -50,8 +50,7 @@ void *vmem_crst_alloc(unsigned long val)
        if (!table)
                return NULL;
        crst_table_init(table, val);
-       if (slab_is_available())
-               arch_set_page_dat(virt_to_page(table), CRST_ALLOC_ORDER);
+       __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
        return table;
 }
 
@@ -67,6 +66,7 @@ pte_t __ref *vmem_pte_alloc(void)
        if (!pte)
                return NULL;
        memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
+       __arch_set_page_dat(pte, 1);
        return pte;
 }
 
index 5ae31ca9dd441d6180b13e624c0adaca4e49fc23..0547a10406e72a1a0745a842228130bc0710f1a0 100644 (file)
@@ -3,7 +3,7 @@
 # Makefile for the s390 PCI subsystem.
 #
 
-obj-$(CONFIG_PCI)      += pci.o pci_irq.o pci_dma.o pci_clp.o pci_sysfs.o \
+obj-$(CONFIG_PCI)      += pci.o pci_irq.o pci_clp.o pci_sysfs.o \
                           pci_event.o pci_debug.o pci_insn.o pci_mmio.o \
                           pci_bus.o pci_kvm_hook.o
 obj-$(CONFIG_PCI_IOV)  += pci_iov.o
index 6fab5c085565109d509e59372982ecc79d3168e0..676ac74026a82b578f857e2426a501abdec014c7 100644 (file)
@@ -124,7 +124,11 @@ int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
 
        WARN_ON_ONCE(iota & 0x3fff);
        fib.pba = base;
-       fib.pal = limit;
+       /* Work around off by one in ISM virt device */
+       if (zdev->pft == PCI_FUNC_TYPE_ISM && limit > base)
+               fib.pal = limit + (1 << 12);
+       else
+               fib.pal = limit;
        fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
        fib.gd = zdev->gisa;
        cc = zpci_mod_fc(req, &fib, status);
@@ -153,6 +157,7 @@ int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
 int zpci_fmb_enable_device(struct zpci_dev *zdev)
 {
        u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
+       struct zpci_iommu_ctrs *ctrs;
        struct zpci_fib fib = {0};
        u8 cc, status;
 
@@ -165,9 +170,15 @@ int zpci_fmb_enable_device(struct zpci_dev *zdev)
        WARN_ON((u64) zdev->fmb & 0xf);
 
        /* reset software counters */
-       atomic64_set(&zdev->allocated_pages, 0);
-       atomic64_set(&zdev->mapped_pages, 0);
-       atomic64_set(&zdev->unmapped_pages, 0);
+       ctrs = zpci_get_iommu_ctrs(zdev);
+       if (ctrs) {
+               atomic64_set(&ctrs->mapped_pages, 0);
+               atomic64_set(&ctrs->unmapped_pages, 0);
+               atomic64_set(&ctrs->global_rpcits, 0);
+               atomic64_set(&ctrs->sync_map_rpcits, 0);
+               atomic64_set(&ctrs->sync_rpcits, 0);
+       }
+
 
        fib.fmb_addr = virt_to_phys(zdev->fmb);
        fib.gd = zdev->gisa;
@@ -582,7 +593,6 @@ int pcibios_device_add(struct pci_dev *pdev)
                pdev->no_vf_scan = 1;
 
        pdev->dev.groups = zpci_attr_groups;
-       pdev->dev.dma_ops = &s390_pci_dma_ops;
        zpci_map_resources(pdev);
 
        for (i = 0; i < PCI_STD_NUM_BARS; i++) {
@@ -756,8 +766,6 @@ int zpci_hot_reset_device(struct zpci_dev *zdev)
        if (zdev->dma_table)
                rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
                                        virt_to_phys(zdev->dma_table), &status);
-       else
-               rc = zpci_dma_init_device(zdev);
        if (rc) {
                zpci_disable_device(zdev);
                return rc;
@@ -865,11 +873,6 @@ int zpci_deconfigure_device(struct zpci_dev *zdev)
        if (zdev->zbus->bus)
                zpci_bus_remove_device(zdev, false);
 
-       if (zdev->dma_table) {
-               rc = zpci_dma_exit_device(zdev);
-               if (rc)
-                       return rc;
-       }
        if (zdev_enabled(zdev)) {
                rc = zpci_disable_device(zdev);
                if (rc)
@@ -918,8 +921,6 @@ void zpci_release_device(struct kref *kref)
        if (zdev->zbus->bus)
                zpci_bus_remove_device(zdev, false);
 
-       if (zdev->dma_table)
-               zpci_dma_exit_device(zdev);
        if (zdev_enabled(zdev))
                zpci_disable_device(zdev);
 
@@ -1109,10 +1110,6 @@ static int __init pci_base_init(void)
        if (rc)
                goto out_irq;
 
-       rc = zpci_dma_init();
-       if (rc)
-               goto out_dma;
-
        rc = clp_scan_pci_devices();
        if (rc)
                goto out_find;
@@ -1122,8 +1119,6 @@ static int __init pci_base_init(void)
        return 0;
 
 out_find:
-       zpci_dma_exit();
-out_dma:
        zpci_irq_exit();
 out_irq:
        zpci_mem_exit();
index 32245b970a0cf1d72ace477e750f5c340d708943..daa5d7450c7d383b254d225d022e23a14c105e41 100644 (file)
@@ -47,11 +47,6 @@ static int zpci_bus_prepare_device(struct zpci_dev *zdev)
                rc = zpci_enable_device(zdev);
                if (rc)
                        return rc;
-               rc = zpci_dma_init_device(zdev);
-               if (rc) {
-                       zpci_disable_device(zdev);
-                       return rc;
-               }
        }
 
        if (!zdev->has_resources) {
index ca6bd98eec136e9a920a250729108730ec652ef5..6dde2263c79d1f57274e016e5867fe3a09ed473c 100644 (file)
@@ -53,9 +53,11 @@ static char *pci_fmt3_names[] = {
 };
 
 static char *pci_sw_names[] = {
-       "Allocated pages",
        "Mapped pages",
        "Unmapped pages",
+       "Global RPCITs",
+       "Sync Map RPCITs",
+       "Sync RPCITs",
 };
 
 static void pci_fmb_show(struct seq_file *m, char *name[], int length,
@@ -69,10 +71,14 @@ static void pci_fmb_show(struct seq_file *m, char *name[], int length,
 
 static void pci_sw_counter_show(struct seq_file *m)
 {
-       struct zpci_dev *zdev = m->private;
-       atomic64_t *counter = &zdev->allocated_pages;
+       struct zpci_iommu_ctrs  *ctrs = zpci_get_iommu_ctrs(m->private);
+       atomic64_t *counter;
        int i;
 
+       if (!ctrs)
+               return;
+
+       counter = &ctrs->mapped_pages;
        for (i = 0; i < ARRAY_SIZE(pci_sw_names); i++, counter++)
                seq_printf(m, "%26s:\t%llu\n", pci_sw_names[i],
                           atomic64_read(counter));
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
deleted file mode 100644 (file)
index 9920908..0000000
+++ /dev/null
@@ -1,746 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright IBM Corp. 2012
- *
- * Author(s):
- *   Jan Glauber <jang@linux.vnet.ibm.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <linux/iommu-helper.h>
-#include <linux/dma-map-ops.h>
-#include <linux/vmalloc.h>
-#include <linux/pci.h>
-#include <asm/pci_dma.h>
-
-static struct kmem_cache *dma_region_table_cache;
-static struct kmem_cache *dma_page_table_cache;
-static int s390_iommu_strict;
-static u64 s390_iommu_aperture;
-static u32 s390_iommu_aperture_factor = 1;
-
-static int zpci_refresh_global(struct zpci_dev *zdev)
-{
-       return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma,
-                                 zdev->iommu_pages * PAGE_SIZE);
-}
-
-unsigned long *dma_alloc_cpu_table(gfp_t gfp)
-{
-       unsigned long *table, *entry;
-
-       table = kmem_cache_alloc(dma_region_table_cache, gfp);
-       if (!table)
-               return NULL;
-
-       for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
-               *entry = ZPCI_TABLE_INVALID;
-       return table;
-}
-
-static void dma_free_cpu_table(void *table)
-{
-       kmem_cache_free(dma_region_table_cache, table);
-}
-
-static unsigned long *dma_alloc_page_table(gfp_t gfp)
-{
-       unsigned long *table, *entry;
-
-       table = kmem_cache_alloc(dma_page_table_cache, gfp);
-       if (!table)
-               return NULL;
-
-       for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
-               *entry = ZPCI_PTE_INVALID;
-       return table;
-}
-
-static void dma_free_page_table(void *table)
-{
-       kmem_cache_free(dma_page_table_cache, table);
-}
-
-static unsigned long *dma_get_seg_table_origin(unsigned long *rtep, gfp_t gfp)
-{
-       unsigned long old_rte, rte;
-       unsigned long *sto;
-
-       rte = READ_ONCE(*rtep);
-       if (reg_entry_isvalid(rte)) {
-               sto = get_rt_sto(rte);
-       } else {
-               sto = dma_alloc_cpu_table(gfp);
-               if (!sto)
-                       return NULL;
-
-               set_rt_sto(&rte, virt_to_phys(sto));
-               validate_rt_entry(&rte);
-               entry_clr_protected(&rte);
-
-               old_rte = cmpxchg(rtep, ZPCI_TABLE_INVALID, rte);
-               if (old_rte != ZPCI_TABLE_INVALID) {
-                       /* Somone else was faster, use theirs */
-                       dma_free_cpu_table(sto);
-                       sto = get_rt_sto(old_rte);
-               }
-       }
-       return sto;
-}
-
-static unsigned long *dma_get_page_table_origin(unsigned long *step, gfp_t gfp)
-{
-       unsigned long old_ste, ste;
-       unsigned long *pto;
-
-       ste = READ_ONCE(*step);
-       if (reg_entry_isvalid(ste)) {
-               pto = get_st_pto(ste);
-       } else {
-               pto = dma_alloc_page_table(gfp);
-               if (!pto)
-                       return NULL;
-               set_st_pto(&ste, virt_to_phys(pto));
-               validate_st_entry(&ste);
-               entry_clr_protected(&ste);
-
-               old_ste = cmpxchg(step, ZPCI_TABLE_INVALID, ste);
-               if (old_ste != ZPCI_TABLE_INVALID) {
-                       /* Somone else was faster, use theirs */
-                       dma_free_page_table(pto);
-                       pto = get_st_pto(old_ste);
-               }
-       }
-       return pto;
-}
-
-unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr,
-                                 gfp_t gfp)
-{
-       unsigned long *sto, *pto;
-       unsigned int rtx, sx, px;
-
-       rtx = calc_rtx(dma_addr);
-       sto = dma_get_seg_table_origin(&rto[rtx], gfp);
-       if (!sto)
-               return NULL;
-
-       sx = calc_sx(dma_addr);
-       pto = dma_get_page_table_origin(&sto[sx], gfp);
-       if (!pto)
-               return NULL;
-
-       px = calc_px(dma_addr);
-       return &pto[px];
-}
-
-void dma_update_cpu_trans(unsigned long *ptep, phys_addr_t page_addr, int flags)
-{
-       unsigned long pte;
-
-       pte = READ_ONCE(*ptep);
-       if (flags & ZPCI_PTE_INVALID) {
-               invalidate_pt_entry(&pte);
-       } else {
-               set_pt_pfaa(&pte, page_addr);
-               validate_pt_entry(&pte);
-       }
-
-       if (flags & ZPCI_TABLE_PROTECTED)
-               entry_set_protected(&pte);
-       else
-               entry_clr_protected(&pte);
-
-       xchg(ptep, pte);
-}
-
-static int __dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa,
-                             dma_addr_t dma_addr, size_t size, int flags)
-{
-       unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       phys_addr_t page_addr = (pa & PAGE_MASK);
-       unsigned long *entry;
-       int i, rc = 0;
-
-       if (!nr_pages)
-               return -EINVAL;
-
-       if (!zdev->dma_table)
-               return -EINVAL;
-
-       for (i = 0; i < nr_pages; i++) {
-               entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr,
-                                          GFP_ATOMIC);
-               if (!entry) {
-                       rc = -ENOMEM;
-                       goto undo_cpu_trans;
-               }
-               dma_update_cpu_trans(entry, page_addr, flags);
-               page_addr += PAGE_SIZE;
-               dma_addr += PAGE_SIZE;
-       }
-
-undo_cpu_trans:
-       if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
-               flags = ZPCI_PTE_INVALID;
-               while (i-- > 0) {
-                       page_addr -= PAGE_SIZE;
-                       dma_addr -= PAGE_SIZE;
-                       entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr,
-                                                  GFP_ATOMIC);
-                       if (!entry)
-                               break;
-                       dma_update_cpu_trans(entry, page_addr, flags);
-               }
-       }
-       return rc;
-}
-
-static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
-                          size_t size, int flags)
-{
-       unsigned long irqflags;
-       int ret;
-
-       /*
-        * With zdev->tlb_refresh == 0, rpcit is not required to establish new
-        * translations when previously invalid translation-table entries are
-        * validated. With lazy unmap, rpcit is skipped for previously valid
-        * entries, but a global rpcit is then required before any address can
-        * be re-used, i.e. after each iommu bitmap wrap-around.
-        */
-       if ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID) {
-               if (!zdev->tlb_refresh)
-                       return 0;
-       } else {
-               if (!s390_iommu_strict)
-                       return 0;
-       }
-
-       ret = zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
-                                PAGE_ALIGN(size));
-       if (ret == -ENOMEM && !s390_iommu_strict) {
-               /* enable the hypervisor to free some resources */
-               if (zpci_refresh_global(zdev))
-                       goto out;
-
-               spin_lock_irqsave(&zdev->iommu_bitmap_lock, irqflags);
-               bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
-                             zdev->lazy_bitmap, zdev->iommu_pages);
-               bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
-               spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, irqflags);
-               ret = 0;
-       }
-out:
-       return ret;
-}
-
-static int dma_update_trans(struct zpci_dev *zdev, phys_addr_t pa,
-                           dma_addr_t dma_addr, size_t size, int flags)
-{
-       int rc;
-
-       rc = __dma_update_trans(zdev, pa, dma_addr, size, flags);
-       if (rc)
-               return rc;
-
-       rc = __dma_purge_tlb(zdev, dma_addr, size, flags);
-       if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
-               __dma_update_trans(zdev, pa, dma_addr, size, ZPCI_PTE_INVALID);
-
-       return rc;
-}
-
-void dma_free_seg_table(unsigned long entry)
-{
-       unsigned long *sto = get_rt_sto(entry);
-       int sx;
-
-       for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
-               if (reg_entry_isvalid(sto[sx]))
-                       dma_free_page_table(get_st_pto(sto[sx]));
-
-       dma_free_cpu_table(sto);
-}
-
-void dma_cleanup_tables(unsigned long *table)
-{
-       int rtx;
-
-       if (!table)
-               return;
-
-       for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
-               if (reg_entry_isvalid(table[rtx]))
-                       dma_free_seg_table(table[rtx]);
-
-       dma_free_cpu_table(table);
-}
-
-static unsigned long __dma_alloc_iommu(struct device *dev,
-                                      unsigned long start, int size)
-{
-       struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
-
-       return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
-                               start, size, zdev->start_dma >> PAGE_SHIFT,
-                               dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT),
-                               0);
-}
-
-static dma_addr_t dma_alloc_address(struct device *dev, int size)
-{
-       struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
-       unsigned long offset, flags;
-
-       spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
-       offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
-       if (offset == -1) {
-               if (!s390_iommu_strict) {
-                       /* global flush before DMA addresses are reused */
-                       if (zpci_refresh_global(zdev))
-                               goto out_error;
-
-                       bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
-                                     zdev->lazy_bitmap, zdev->iommu_pages);
-                       bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
-               }
-               /* wrap-around */
-               offset = __dma_alloc_iommu(dev, 0, size);
-               if (offset == -1)
-                       goto out_error;
-       }
-       zdev->next_bit = offset + size;
-       spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
-
-       return zdev->start_dma + offset * PAGE_SIZE;
-
-out_error:
-       spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
-       return DMA_MAPPING_ERROR;
-}
-
-static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
-{
-       struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
-       unsigned long flags, offset;
-
-       offset = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
-
-       spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
-       if (!zdev->iommu_bitmap)
-               goto out;
-
-       if (s390_iommu_strict)
-               bitmap_clear(zdev->iommu_bitmap, offset, size);
-       else
-               bitmap_set(zdev->lazy_bitmap, offset, size);
-
-out:
-       spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
-}
-
-static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
-{
-       struct {
-               unsigned long rc;
-               unsigned long addr;
-       } __packed data = {rc, addr};
-
-       zpci_err_hex(&data, sizeof(data));
-}
-
-static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
-                                    unsigned long offset, size_t size,
-                                    enum dma_data_direction direction,
-                                    unsigned long attrs)
-{
-       struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
-       unsigned long pa = page_to_phys(page) + offset;
-       int flags = ZPCI_PTE_VALID;
-       unsigned long nr_pages;
-       dma_addr_t dma_addr;
-       int ret;
-
-       /* This rounds up number of pages based on size and offset */
-       nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
-       dma_addr = dma_alloc_address(dev, nr_pages);
-       if (dma_addr == DMA_MAPPING_ERROR) {
-               ret = -ENOSPC;
-               goto out_err;
-       }
-
-       /* Use rounded up size */
-       size = nr_pages * PAGE_SIZE;
-
-       if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
-               flags |= ZPCI_TABLE_PROTECTED;
-
-       ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
-       if (ret)
-               goto out_free;
-
-       atomic64_add(nr_pages, &zdev->mapped_pages);
-       return dma_addr + (offset & ~PAGE_MASK);
-
-out_free:
-       dma_free_address(dev, dma_addr, nr_pages);
-out_err:
-       zpci_err("map error:\n");
-       zpci_err_dma(ret, pa);
-       return DMA_MAPPING_ERROR;
-}
-
-static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
-                                size_t size, enum dma_data_direction direction,
-                                unsigned long attrs)
-{
-       struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
-       int npages, ret;
-
-       npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
-       dma_addr = dma_addr & PAGE_MASK;
-       ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
-                              ZPCI_PTE_INVALID);
-       if (ret) {
-               zpci_err("unmap error:\n");
-               zpci_err_dma(ret, dma_addr);
-               return;
-       }
-
-       atomic64_add(npages, &zdev->unmapped_pages);
-       dma_free_address(dev, dma_addr, npages);
-}
-
-static void *s390_dma_alloc(struct device *dev, size_t size,
-                           dma_addr_t *dma_handle, gfp_t flag,
-                           unsigned long attrs)
-{
-       struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
-       struct page *page;
-       phys_addr_t pa;
-       dma_addr_t map;
-
-       size = PAGE_ALIGN(size);
-       page = alloc_pages(flag | __GFP_ZERO, get_order(size));
-       if (!page)
-               return NULL;
-
-       pa = page_to_phys(page);
-       map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
-       if (dma_mapping_error(dev, map)) {
-               __free_pages(page, get_order(size));
-               return NULL;
-       }
-
-       atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages);
-       if (dma_handle)
-               *dma_handle = map;
-       return phys_to_virt(pa);
-}
-
-static void s390_dma_free(struct device *dev, size_t size,
-                         void *vaddr, dma_addr_t dma_handle,
-                         unsigned long attrs)
-{
-       struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
-
-       size = PAGE_ALIGN(size);
-       atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
-       s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0);
-       free_pages((unsigned long)vaddr, get_order(size));
-}
-
-/* Map a segment into a contiguous dma address area */
-static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
-                            size_t size, dma_addr_t *handle,
-                            enum dma_data_direction dir)
-{
-       unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
-       dma_addr_t dma_addr_base, dma_addr;
-       int flags = ZPCI_PTE_VALID;
-       struct scatterlist *s;
-       phys_addr_t pa = 0;
-       int ret;
-
-       dma_addr_base = dma_alloc_address(dev, nr_pages);
-       if (dma_addr_base == DMA_MAPPING_ERROR)
-               return -ENOMEM;
-
-       dma_addr = dma_addr_base;
-       if (dir == DMA_NONE || dir == DMA_TO_DEVICE)
-               flags |= ZPCI_TABLE_PROTECTED;
-
-       for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
-               pa = page_to_phys(sg_page(s));
-               ret = __dma_update_trans(zdev, pa, dma_addr,
-                                        s->offset + s->length, flags);
-               if (ret)
-                       goto unmap;
-
-               dma_addr += s->offset + s->length;
-       }
-       ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
-       if (ret)
-               goto unmap;
-
-       *handle = dma_addr_base;
-       atomic64_add(nr_pages, &zdev->mapped_pages);
-
-       return ret;
-
-unmap:
-       dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
-                        ZPCI_PTE_INVALID);
-       dma_free_address(dev, dma_addr_base, nr_pages);
-       zpci_err("map error:\n");
-       zpci_err_dma(ret, pa);
-       return ret;
-}
-
-static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
-                          int nr_elements, enum dma_data_direction dir,
-                          unsigned long attrs)
-{
-       struct scatterlist *s = sg, *start = sg, *dma = sg;
-       unsigned int max = dma_get_max_seg_size(dev);
-       unsigned int size = s->offset + s->length;
-       unsigned int offset = s->offset;
-       int count = 0, i, ret;
-
-       for (i = 1; i < nr_elements; i++) {
-               s = sg_next(s);
-
-               s->dma_length = 0;
-
-               if (s->offset || (size & ~PAGE_MASK) ||
-                   size + s->length > max) {
-                       ret = __s390_dma_map_sg(dev, start, size,
-                                               &dma->dma_address, dir);
-                       if (ret)
-                               goto unmap;
-
-                       dma->dma_address += offset;
-                       dma->dma_length = size - offset;
-
-                       size = offset = s->offset;
-                       start = s;
-                       dma = sg_next(dma);
-                       count++;
-               }
-               size += s->length;
-       }
-       ret = __s390_dma_map_sg(dev, start, size, &dma->dma_address, dir);
-       if (ret)
-               goto unmap;
-
-       dma->dma_address += offset;
-       dma->dma_length = size - offset;
-
-       return count + 1;
-unmap:
-       for_each_sg(sg, s, count, i)
-               s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s),
-                                    dir, attrs);
-
-       return ret;
-}
-
-static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
-                             int nr_elements, enum dma_data_direction dir,
-                             unsigned long attrs)
-{
-       struct scatterlist *s;
-       int i;
-
-       for_each_sg(sg, s, nr_elements, i) {
-               if (s->dma_length)
-                       s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
-                                            dir, attrs);
-               s->dma_address = 0;
-               s->dma_length = 0;
-       }
-}
-
-static unsigned long *bitmap_vzalloc(size_t bits, gfp_t flags)
-{
-       size_t n = BITS_TO_LONGS(bits);
-       size_t bytes;
-
-       if (unlikely(check_mul_overflow(n, sizeof(unsigned long), &bytes)))
-               return NULL;
-
-       return vzalloc(bytes);
-}
-       
-int zpci_dma_init_device(struct zpci_dev *zdev)
-{
-       u8 status;
-       int rc;
-
-       /*
-        * At this point, if the device is part of an IOMMU domain, this would
-        * be a strong hint towards a bug in the IOMMU API (common) code and/or
-        * simultaneous access via IOMMU and DMA API. So let's issue a warning.
-        */
-       WARN_ON(zdev->s390_domain);
-
-       spin_lock_init(&zdev->iommu_bitmap_lock);
-
-       zdev->dma_table = dma_alloc_cpu_table(GFP_KERNEL);
-       if (!zdev->dma_table) {
-               rc = -ENOMEM;
-               goto out;
-       }
-
-       /*
-        * Restrict the iommu bitmap size to the minimum of the following:
-        * - s390_iommu_aperture which defaults to high_memory
-        * - 3-level pagetable address limit minus start_dma offset
-        * - DMA address range allowed by the hardware (clp query pci fn)
-        *
-        * Also set zdev->end_dma to the actual end address of the usable
-        * range, instead of the theoretical maximum as reported by hardware.
-        *
-        * This limits the number of concurrently usable DMA mappings since
-        * for each DMA mapped memory address we need a DMA address including
-        * extra DMA addresses for multiple mappings of the same memory address.
-        */
-       zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
-       zdev->iommu_size = min3(s390_iommu_aperture,
-                               ZPCI_TABLE_SIZE_RT - zdev->start_dma,
-                               zdev->end_dma - zdev->start_dma + 1);
-       zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
-       zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
-       zdev->iommu_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
-       if (!zdev->iommu_bitmap) {
-               rc = -ENOMEM;
-               goto free_dma_table;
-       }
-       if (!s390_iommu_strict) {
-               zdev->lazy_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
-               if (!zdev->lazy_bitmap) {
-                       rc = -ENOMEM;
-                       goto free_bitmap;
-               }
-
-       }
-       if (zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
-                              virt_to_phys(zdev->dma_table), &status)) {
-               rc = -EIO;
-               goto free_bitmap;
-       }
-
-       return 0;
-free_bitmap:
-       vfree(zdev->iommu_bitmap);
-       zdev->iommu_bitmap = NULL;
-       vfree(zdev->lazy_bitmap);
-       zdev->lazy_bitmap = NULL;
-free_dma_table:
-       dma_free_cpu_table(zdev->dma_table);
-       zdev->dma_table = NULL;
-out:
-       return rc;
-}
-
-int zpci_dma_exit_device(struct zpci_dev *zdev)
-{
-       int cc = 0;
-
-       /*
-        * At this point, if the device is part of an IOMMU domain, this would
-        * be a strong hint towards a bug in the IOMMU API (common) code and/or
-        * simultaneous access via IOMMU and DMA API. So let's issue a warning.
-        */
-       WARN_ON(zdev->s390_domain);
-       if (zdev_enabled(zdev))
-               cc = zpci_unregister_ioat(zdev, 0);
-       /*
-        * cc == 3 indicates the function is gone already. This can happen
-        * if the function was deconfigured/disabled suddenly and we have not
-        * received a new handle yet.
-        */
-       if (cc && cc != 3)
-               return -EIO;
-
-       dma_cleanup_tables(zdev->dma_table);
-       zdev->dma_table = NULL;
-       vfree(zdev->iommu_bitmap);
-       zdev->iommu_bitmap = NULL;
-       vfree(zdev->lazy_bitmap);
-       zdev->lazy_bitmap = NULL;
-       zdev->next_bit = 0;
-       return 0;
-}
-
-static int __init dma_alloc_cpu_table_caches(void)
-{
-       dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
-                                       ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
-                                       0, NULL);
-       if (!dma_region_table_cache)
-               return -ENOMEM;
-
-       dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
-                                       ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
-                                       0, NULL);
-       if (!dma_page_table_cache) {
-               kmem_cache_destroy(dma_region_table_cache);
-               return -ENOMEM;
-       }
-       return 0;
-}
-
-int __init zpci_dma_init(void)
-{
-       s390_iommu_aperture = (u64)virt_to_phys(high_memory);
-       if (!s390_iommu_aperture_factor)
-               s390_iommu_aperture = ULONG_MAX;
-       else
-               s390_iommu_aperture *= s390_iommu_aperture_factor;
-
-       return dma_alloc_cpu_table_caches();
-}
-
-void zpci_dma_exit(void)
-{
-       kmem_cache_destroy(dma_page_table_cache);
-       kmem_cache_destroy(dma_region_table_cache);
-}
-
-const struct dma_map_ops s390_pci_dma_ops = {
-       .alloc          = s390_dma_alloc,
-       .free           = s390_dma_free,
-       .map_sg         = s390_dma_map_sg,
-       .unmap_sg       = s390_dma_unmap_sg,
-       .map_page       = s390_dma_map_pages,
-       .unmap_page     = s390_dma_unmap_pages,
-       .mmap           = dma_common_mmap,
-       .get_sgtable    = dma_common_get_sgtable,
-       .alloc_pages    = dma_common_alloc_pages,
-       .free_pages     = dma_common_free_pages,
-       /* dma_supported is unconditionally true without a callback */
-};
-EXPORT_SYMBOL_GPL(s390_pci_dma_ops);
-
-static int __init s390_iommu_setup(char *str)
-{
-       if (!strcmp(str, "strict"))
-               s390_iommu_strict = 1;
-       return 1;
-}
-
-__setup("s390_iommu=", s390_iommu_setup);
-
-static int __init s390_iommu_aperture_setup(char *str)
-{
-       if (kstrtou32(str, 10, &s390_iommu_aperture_factor))
-               s390_iommu_aperture_factor = 1;
-       return 1;
-}
-
-__setup("s390_iommu_aperture=", s390_iommu_aperture_setup);
index b9324ca2eb94034200efa9e9f18371873bef7c28..4d9773ef9e0a856e8a21b1ca46174e653daa6360 100644 (file)
@@ -59,9 +59,16 @@ static inline bool ers_result_indicates_abort(pci_ers_result_t ers_res)
        }
 }
 
-static bool is_passed_through(struct zpci_dev *zdev)
+static bool is_passed_through(struct pci_dev *pdev)
 {
-       return zdev->s390_domain;
+       struct zpci_dev *zdev = to_zpci(pdev);
+       bool ret;
+
+       mutex_lock(&zdev->kzdev_lock);
+       ret = !!zdev->kzdev;
+       mutex_unlock(&zdev->kzdev_lock);
+
+       return ret;
 }
 
 static bool is_driver_supported(struct pci_driver *driver)
@@ -176,7 +183,7 @@ static pci_ers_result_t zpci_event_attempt_error_recovery(struct pci_dev *pdev)
        }
        pdev->error_state = pci_channel_io_frozen;
 
-       if (is_passed_through(to_zpci(pdev))) {
+       if (is_passed_through(pdev)) {
                pr_info("%s: Cannot be recovered in the host because it is a pass-through device\n",
                        pci_name(pdev));
                goto out_unlock;
@@ -239,7 +246,7 @@ static void zpci_event_io_failure(struct pci_dev *pdev, pci_channel_state_t es)
         * we will inject the error event and let the guest recover the device
         * itself.
         */
-       if (is_passed_through(to_zpci(pdev)))
+       if (is_passed_through(pdev))
                goto out;
        driver = to_pci_driver(pdev->dev.driver);
        if (driver && driver->err_handler && driver->err_handler->error_detected)
@@ -306,8 +313,6 @@ static void zpci_event_hard_deconfigured(struct zpci_dev *zdev, u32 fh)
        /* Even though the device is already gone we still
         * need to free zPCI resources as part of the disable.
         */
-       if (zdev->dma_table)
-               zpci_dma_exit_device(zdev);
        if (zdev_enabled(zdev))
                zpci_disable_device(zdev);
        zdev->state = ZPCI_FN_STATE_STANDBY;
index cae280e5c047d1d5eaa405c86b5e8444350961c1..8a7abac5181645d6635ed95e1f5706942948c642 100644 (file)
@@ -56,6 +56,7 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
        struct pci_dev *pdev = to_pci_dev(dev);
        struct zpci_dev *zdev = to_zpci(pdev);
        int ret = 0;
+       u8 status;
 
        /* Can't use device_remove_self() here as that would lead us to lock
         * the pci_rescan_remove_lock while holding the device' kernfs lock.
@@ -82,12 +83,6 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
        pci_lock_rescan_remove();
        if (pci_dev_is_added(pdev)) {
                pci_stop_and_remove_bus_device(pdev);
-               if (zdev->dma_table) {
-                       ret = zpci_dma_exit_device(zdev);
-                       if (ret)
-                               goto out;
-               }
-
                if (zdev_enabled(zdev)) {
                        ret = zpci_disable_device(zdev);
                        /*
@@ -105,14 +100,16 @@ static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
                ret = zpci_enable_device(zdev);
                if (ret)
                        goto out;
-               ret = zpci_dma_init_device(zdev);
-               if (ret) {
-                       zpci_disable_device(zdev);
-                       goto out;
+
+               if (zdev->dma_table) {
+                       ret = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
+                                                virt_to_phys(zdev->dma_table), &status);
+                       if (ret)
+                               zpci_disable_device(zdev);
                }
-               pci_rescan_bus(zdev->zbus->bus);
        }
 out:
+       pci_rescan_bus(zdev->zbus->bus);
        pci_unlock_rescan_remove();
        if (kn)
                sysfs_unbreak_active_protection(kn);
index eeba83e0a7d29408dfeb5b56272f0b10a26a242a..65d4c3316a5bd29ccc93742947f48c4687021510 100644 (file)
@@ -46,8 +46,6 @@ struct kprobe_ctlblk {
 };
 
 extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
-extern int kprobe_exceptions_notify(struct notifier_block *self,
-                                   unsigned long val, void *data);
 extern int kprobe_handle_illslot(unsigned long pc);
 #else
 
index 06c2bc767ef75112fbae2ca477dc319df44ccef8..aec742cd898f272cc5f960a392780b59e3054b7e 100644 (file)
@@ -47,8 +47,6 @@ struct kprobe_ctlblk {
        struct prev_kprobe prev_kprobe;
 };
 
-int kprobe_exceptions_notify(struct notifier_block *self,
-                            unsigned long val, void *data);
 int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
 asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
                                      struct pt_regs *regs);
index d11206ceff3b1668b474f227a7965bcfafc157f9..1b5d17a9f70dde9f711c53ea2db97762d73eadf3 100644 (file)
@@ -106,6 +106,27 @@ int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport)
 }
 EXPORT_SYMBOL_GPL(tdx_mcall_get_report0);
 
+/**
+ * tdx_hcall_get_quote() - Wrapper to request TD Quote using GetQuote
+ *                         hypercall.
+ * @buf: Address of the directly mapped shared kernel buffer which
+ *       contains TDREPORT. The same buffer will be used by VMM to
+ *       store the generated TD Quote output.
+ * @size: size of the tdquote buffer (4KB-aligned).
+ *
+ * Refer to section titled "TDG.VP.VMCALL<GetQuote>" in the TDX GHCI
+ * v1.0 specification for more information on GetQuote hypercall.
+ * It is used in the TDX guest driver module to get the TD Quote.
+ *
+ * Return 0 on success or error code on failure.
+ */
+u64 tdx_hcall_get_quote(u8 *buf, size_t size)
+{
+       /* Since buf is a shared memory, set the shared (decrypted) bits */
+       return _tdx_hypercall(TDVMCALL_GET_QUOTE, cc_mkdec(virt_to_phys(buf)), size, 0, 0);
+}
+EXPORT_SYMBOL_GPL(tdx_hcall_get_quote);
+
 static void __noreturn tdx_panic(const char *msg)
 {
        struct tdx_module_args args = {
index a2e9317aad4955c1110e9f19d5b94ffd837b3865..5939694dfb28dc10733af721f3fc2a71dd3e7057 100644 (file)
@@ -113,8 +113,6 @@ struct kprobe_ctlblk {
 };
 
 extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
-extern int kprobe_exceptions_notify(struct notifier_block *self,
-                                   unsigned long val, void *data);
 extern int kprobe_int3_handler(struct pt_regs *regs);
 
 #else
index f74695dea2176b4cd76340b7f6f03fa430c78829..ccce7ebd8677287359c4de9271fe29ecf6793d31 100644 (file)
@@ -23,6 +23,7 @@
 
 /* TDX hypercall Leaf IDs */
 #define TDVMCALL_MAP_GPA               0x10001
+#define TDVMCALL_GET_QUOTE             0x10002
 #define TDVMCALL_REPORT_FATAL_ERROR    0x10003
 
 #define TDVMCALL_STATUS_RETRY          1
index adcbe3f1de30b78109b50a6be284e83cc836c22c..f3d5305a60fc50b13708d80ffecab0a529239ee0 100644 (file)
@@ -56,6 +56,8 @@ bool tdx_early_handle_ve(struct pt_regs *regs);
 
 int tdx_mcall_get_report0(u8 *reportdata, u8 *tdreport);
 
+u64 tdx_hcall_get_quote(u8 *buf, size_t size);
+
 #else
 
 static inline void tdx_early_init(void) { };
index 9d51e9894ece782e10ebf5a415076db61d9d061c..fdf25b8d6e784f9904ee7892277acdb25430f3d3 100644 (file)
@@ -501,8 +501,8 @@ static inline void bio_check_ro(struct bio *bio)
        if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
                if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
                        return;
-               pr_warn("Trying to write to read-only block-device %pg\n",
-                       bio->bi_bdev);
+               pr_warn_ratelimited("Trying to write to read-only block-device %pg\n",
+                                   bio->bi_bdev);
                /* Older lvm-tools actually trigger this */
        }
 }
index bbf51d55724e3fdb1a1dc339860396e5f767e8fc..70661f58ee41c4ca0d07a02dc966c8f4ef608431 100644 (file)
@@ -1297,10 +1297,12 @@ config CRYPTO_JITTERENTROPY
 
          See https://www.chronox.de/jent.html
 
+if CRYPTO_JITTERENTROPY
+if CRYPTO_FIPS && EXPERT
+
 choice
        prompt "CPU Jitter RNG Memory Size"
        default CRYPTO_JITTERENTROPY_MEMSIZE_2
-       depends on CRYPTO_JITTERENTROPY
        help
          The Jitter RNG measures the execution time of memory accesses.
          Multiple consecutive memory accesses are performed. If the memory
@@ -1344,7 +1346,6 @@ config CRYPTO_JITTERENTROPY_OSR
        int "CPU Jitter RNG Oversampling Rate"
        range 1 15
        default 1
-       depends on CRYPTO_JITTERENTROPY
        help
          The Jitter RNG allows the specification of an oversampling rate (OSR).
          The Jitter RNG operation requires a fixed amount of timing
@@ -1359,7 +1360,6 @@ config CRYPTO_JITTERENTROPY_OSR
 
 config CRYPTO_JITTERENTROPY_TESTINTERFACE
        bool "CPU Jitter RNG Test Interface"
-       depends on CRYPTO_JITTERENTROPY
        help
          The test interface allows a privileged process to capture
          the raw unconditioned high resolution time stamp noise that
@@ -1377,6 +1377,28 @@ config CRYPTO_JITTERENTROPY_TESTINTERFACE
 
          If unsure, select N.
 
+endif  # if CRYPTO_FIPS && EXPERT
+
+if !(CRYPTO_FIPS && EXPERT)
+
+config CRYPTO_JITTERENTROPY_MEMORY_BLOCKS
+       int
+       default 64
+
+config CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE
+       int
+       default 32
+
+config CRYPTO_JITTERENTROPY_OSR
+       int
+       default 1
+
+config CRYPTO_JITTERENTROPY_TESTINTERFACE
+       bool
+
+endif  # if !(CRYPTO_FIPS && EXPERT)
+endif  # if CRYPTO_JITTERENTROPY
+
 config CRYPTO_KDF800108_CTR
        tristate
        select CRYPTO_HMAC
index deee55f939dc8c0a12efd205007eb6693da17eb6..80c3e5354711e1fcaf2e7d487ac742bdc0e2cf77 100644 (file)
@@ -651,6 +651,7 @@ struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash)
                        err = PTR_ERR(shash);
                        goto out_free_nhash;
                }
+               nhash->using_shash = true;
                *nctx = shash;
                return nhash;
        }
index 9826907eb06e1431c02ddf775d84fc9a3be64647..7bdad836fc6207727300e79c2d6f7db485baf80a 100644 (file)
@@ -135,8 +135,6 @@ source "drivers/uio/Kconfig"
 
 source "drivers/vfio/Kconfig"
 
-source "drivers/vlynq/Kconfig"
-
 source "drivers/virt/Kconfig"
 
 source "drivers/virtio/Kconfig"
index 722d15be0eb7fd8282d4807a6870cbffdb34b6e2..d828329c268da35b38ec301d4d3215ece2e83e81 100644 (file)
@@ -151,7 +151,6 @@ obj-$(CONFIG_BCMA)          += bcma/
 obj-$(CONFIG_VHOST_RING)       += vhost/
 obj-$(CONFIG_VHOST_IOTLB)      += vhost/
 obj-$(CONFIG_VHOST)            += vhost/
-obj-$(CONFIG_VLYNQ)            += vlynq/
 obj-$(CONFIG_GREYBUS)          += greybus/
 obj-$(CONFIG_COMEDI)           += comedi/
 obj-$(CONFIG_STAGING)          += staging/
index 554e487cbfab2492a72edf498083a1fbbee5b0cc..f819e760ff195a902aba7e8fd5073c0dc4d67a4c 100644 (file)
@@ -12,6 +12,7 @@ menuconfig ACPI
        select PNP
        select NLS
        select CRC32
+       select FIRMWARE_TABLE
        default y if X86
        help
          Advanced Configuration and Power Interface (ACPI) support for 
index b280b3e9c7d94d8c04c442056cb990ee75f49731..caa2c16e16974a6c76961240c08df8fbe4d598e1 100644 (file)
@@ -8,8 +8,9 @@
 #define pr_fmt(fmt)     "ACPI: RHCT: " fmt
 
 #include <linux/acpi.h>
+#include <linux/bits.h>
 
-static struct acpi_table_header *acpi_get_rhct(void)
+static struct acpi_table_rhct *acpi_get_rhct(void)
 {
        static struct acpi_table_header *rhct;
        acpi_status status;
@@ -26,7 +27,7 @@ static struct acpi_table_header *acpi_get_rhct(void)
                }
        }
 
-       return rhct;
+       return (struct acpi_table_rhct *)rhct;
 }
 
 /*
@@ -48,7 +49,7 @@ int acpi_get_riscv_isa(struct acpi_table_header *table, unsigned int cpu, const
        BUG_ON(acpi_disabled);
 
        if (!table) {
-               rhct = (struct acpi_table_rhct *)acpi_get_rhct();
+               rhct = acpi_get_rhct();
                if (!rhct)
                        return -ENOENT;
        } else {
@@ -81,3 +82,89 @@ int acpi_get_riscv_isa(struct acpi_table_header *table, unsigned int cpu, const
 
        return -1;
 }
+
+static void acpi_parse_hart_info_cmo_node(struct acpi_table_rhct *rhct,
+                                         struct acpi_rhct_hart_info *hart_info,
+                                         u32 *cbom_size, u32 *cboz_size, u32 *cbop_size)
+{
+       u32 size_hartinfo = sizeof(struct acpi_rhct_hart_info);
+       u32 size_hdr = sizeof(struct acpi_rhct_node_header);
+       struct acpi_rhct_node_header *ref_node;
+       struct acpi_rhct_cmo_node *cmo_node;
+       u32 *hart_info_node_offset;
+
+       hart_info_node_offset = ACPI_ADD_PTR(u32, hart_info, size_hartinfo);
+       for (int i = 0; i < hart_info->num_offsets; i++) {
+               ref_node = ACPI_ADD_PTR(struct acpi_rhct_node_header,
+                                       rhct, hart_info_node_offset[i]);
+               if (ref_node->type == ACPI_RHCT_NODE_TYPE_CMO) {
+                       cmo_node = ACPI_ADD_PTR(struct acpi_rhct_cmo_node,
+                                               ref_node, size_hdr);
+                       if (cbom_size && cmo_node->cbom_size <= 30) {
+                               if (!*cbom_size)
+                                       *cbom_size = BIT(cmo_node->cbom_size);
+                               else if (*cbom_size != BIT(cmo_node->cbom_size))
+                                       pr_warn("CBOM size is not the same across harts\n");
+                       }
+
+                       if (cboz_size && cmo_node->cboz_size <= 30) {
+                               if (!*cboz_size)
+                                       *cboz_size = BIT(cmo_node->cboz_size);
+                               else if (*cboz_size != BIT(cmo_node->cboz_size))
+                                       pr_warn("CBOZ size is not the same across harts\n");
+                       }
+
+                       if (cbop_size && cmo_node->cbop_size <= 30) {
+                               if (!*cbop_size)
+                                       *cbop_size = BIT(cmo_node->cbop_size);
+                               else if (*cbop_size != BIT(cmo_node->cbop_size))
+                                       pr_warn("CBOP size is not the same across harts\n");
+                       }
+               }
+       }
+}
+
+/*
+ * During early boot, the caller should call acpi_get_table() and pass its pointer to
+ * these functions (and free up later). At run time, since this table can be used
+ * multiple times, pass NULL so that the table remains in memory.
+ */
+void acpi_get_cbo_block_size(struct acpi_table_header *table, u32 *cbom_size,
+                            u32 *cboz_size, u32 *cbop_size)
+{
+       u32 size_hdr = sizeof(struct acpi_rhct_node_header);
+       struct acpi_rhct_node_header *node, *end;
+       struct acpi_rhct_hart_info *hart_info;
+       struct acpi_table_rhct *rhct;
+
+       if (acpi_disabled)
+               return;
+
+       if (table) {
+               rhct = (struct acpi_table_rhct *)table;
+       } else {
+               rhct = acpi_get_rhct();
+               if (!rhct)
+                       return;
+       }
+
+       if (cbom_size)
+               *cbom_size = 0;
+
+       if (cboz_size)
+               *cboz_size = 0;
+
+       if (cbop_size)
+               *cbop_size = 0;
+
+       end = ACPI_ADD_PTR(struct acpi_rhct_node_header, rhct, rhct->header.length);
+       for (node = ACPI_ADD_PTR(struct acpi_rhct_node_header, rhct, rhct->node_offset);
+            node < end;
+            node = ACPI_ADD_PTR(struct acpi_rhct_node_header, node, node->length)) {
+               if (node->type == ACPI_RHCT_NODE_TYPE_HART_INFO) {
+                       hart_info = ACPI_ADD_PTR(struct acpi_rhct_hart_info, node, size_hdr);
+                       acpi_parse_hart_info_cmo_node(rhct, hart_info, cbom_size,
+                                                     cboz_size, cbop_size);
+               }
+       }
+}
index 8ab0a82b4da41d2aad9f975db596e6d0a9375e20..c1516337f6682840bdee196e50c1de0cc2472a12 100644 (file)
@@ -37,18 +37,6 @@ static struct acpi_table_desc initial_tables[ACPI_MAX_TABLES] __initdata;
 
 static int acpi_apic_instance __initdata_or_acpilib;
 
-enum acpi_subtable_type {
-       ACPI_SUBTABLE_COMMON,
-       ACPI_SUBTABLE_HMAT,
-       ACPI_SUBTABLE_PRMT,
-       ACPI_SUBTABLE_CEDT,
-};
-
-struct acpi_subtable_entry {
-       union acpi_subtable_headers *hdr;
-       enum acpi_subtable_type type;
-};
-
 /*
  * Disable table checksum verification for the early stage due to the size
  * limitation of the current x86 early mapping implementation.
@@ -237,167 +225,6 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
        }
 }
 
-static unsigned long __init_or_acpilib
-acpi_get_entry_type(struct acpi_subtable_entry *entry)
-{
-       switch (entry->type) {
-       case ACPI_SUBTABLE_COMMON:
-               return entry->hdr->common.type;
-       case ACPI_SUBTABLE_HMAT:
-               return entry->hdr->hmat.type;
-       case ACPI_SUBTABLE_PRMT:
-               return 0;
-       case ACPI_SUBTABLE_CEDT:
-               return entry->hdr->cedt.type;
-       }
-       return 0;
-}
-
-static unsigned long __init_or_acpilib
-acpi_get_entry_length(struct acpi_subtable_entry *entry)
-{
-       switch (entry->type) {
-       case ACPI_SUBTABLE_COMMON:
-               return entry->hdr->common.length;
-       case ACPI_SUBTABLE_HMAT:
-               return entry->hdr->hmat.length;
-       case ACPI_SUBTABLE_PRMT:
-               return entry->hdr->prmt.length;
-       case ACPI_SUBTABLE_CEDT:
-               return entry->hdr->cedt.length;
-       }
-       return 0;
-}
-
-static unsigned long __init_or_acpilib
-acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
-{
-       switch (entry->type) {
-       case ACPI_SUBTABLE_COMMON:
-               return sizeof(entry->hdr->common);
-       case ACPI_SUBTABLE_HMAT:
-               return sizeof(entry->hdr->hmat);
-       case ACPI_SUBTABLE_PRMT:
-               return sizeof(entry->hdr->prmt);
-       case ACPI_SUBTABLE_CEDT:
-               return sizeof(entry->hdr->cedt);
-       }
-       return 0;
-}
-
-static enum acpi_subtable_type __init_or_acpilib
-acpi_get_subtable_type(char *id)
-{
-       if (strncmp(id, ACPI_SIG_HMAT, 4) == 0)
-               return ACPI_SUBTABLE_HMAT;
-       if (strncmp(id, ACPI_SIG_PRMT, 4) == 0)
-               return ACPI_SUBTABLE_PRMT;
-       if (strncmp(id, ACPI_SIG_CEDT, 4) == 0)
-               return ACPI_SUBTABLE_CEDT;
-       return ACPI_SUBTABLE_COMMON;
-}
-
-static __init_or_acpilib bool has_handler(struct acpi_subtable_proc *proc)
-{
-       return proc->handler || proc->handler_arg;
-}
-
-static __init_or_acpilib int call_handler(struct acpi_subtable_proc *proc,
-                                         union acpi_subtable_headers *hdr,
-                                         unsigned long end)
-{
-       if (proc->handler)
-               return proc->handler(hdr, end);
-       if (proc->handler_arg)
-               return proc->handler_arg(hdr, proc->arg, end);
-       return -EINVAL;
-}
-
-/**
- * acpi_parse_entries_array - for each proc_num find a suitable subtable
- *
- * @id: table id (for debugging purposes)
- * @table_size: size of the root table
- * @table_header: where does the table start?
- * @proc: array of acpi_subtable_proc struct containing entry id
- *        and associated handler with it
- * @proc_num: how big proc is?
- * @max_entries: how many entries can we process?
- *
- * For each proc_num find a subtable with proc->id and run proc->handler
- * on it. Assumption is that there's only single handler for particular
- * entry id.
- *
- * The table_size is not the size of the complete ACPI table (the length
- * field in the header struct), but only the size of the root table; i.e.,
- * the offset from the very first byte of the complete ACPI table, to the
- * first byte of the very first subtable.
- *
- * On success returns sum of all matching entries for all proc handlers.
- * Otherwise, -ENODEV or -EINVAL is returned.
- */
-static int __init_or_acpilib acpi_parse_entries_array(
-       char *id, unsigned long table_size,
-       struct acpi_table_header *table_header, struct acpi_subtable_proc *proc,
-       int proc_num, unsigned int max_entries)
-{
-       struct acpi_subtable_entry entry;
-       unsigned long table_end, subtable_len, entry_len;
-       int count = 0;
-       int errs = 0;
-       int i;
-
-       table_end = (unsigned long)table_header + table_header->length;
-
-       /* Parse all entries looking for a match. */
-
-       entry.type = acpi_get_subtable_type(id);
-       entry.hdr = (union acpi_subtable_headers *)
-           ((unsigned long)table_header + table_size);
-       subtable_len = acpi_get_subtable_header_length(&entry);
-
-       while (((unsigned long)entry.hdr) + subtable_len  < table_end) {
-               if (max_entries && count >= max_entries)
-                       break;
-
-               for (i = 0; i < proc_num; i++) {
-                       if (acpi_get_entry_type(&entry) != proc[i].id)
-                               continue;
-                       if (!has_handler(&proc[i]) ||
-                           (!errs &&
-                            call_handler(&proc[i], entry.hdr, table_end))) {
-                               errs++;
-                               continue;
-                       }
-
-                       proc[i].count++;
-                       break;
-               }
-               if (i != proc_num)
-                       count++;
-
-               /*
-                * If entry->length is 0, break from this loop to avoid
-                * infinite loop.
-                */
-               entry_len = acpi_get_entry_length(&entry);
-               if (entry_len == 0) {
-                       pr_err("[%4.4s:0x%02x] Invalid zero length\n", id, proc->id);
-                       return -EINVAL;
-               }
-
-               entry.hdr = (union acpi_subtable_headers *)
-                   ((unsigned long)entry.hdr + entry_len);
-       }
-
-       if (max_entries && count > max_entries) {
-               pr_warn("[%4.4s:0x%02x] found the maximum %i entries\n",
-                       id, proc->id, count);
-       }
-
-       return errs ? -EINVAL : count;
-}
-
 int __init_or_acpilib acpi_table_parse_entries_array(
        char *id, unsigned long table_size, struct acpi_subtable_proc *proc,
        int proc_num, unsigned int max_entries)
index 6fb4e8dc8c3cf8752c98fe29ac068fc2008f52ef..09ed67772fae492323361ab7e94f8a8d4345d2e8 100644 (file)
@@ -6180,24 +6180,10 @@ EXPORT_SYMBOL_GPL(ata_pci_remove_one);
 void ata_pci_shutdown_one(struct pci_dev *pdev)
 {
        struct ata_host *host = pci_get_drvdata(pdev);
-       struct ata_port *ap;
-       unsigned long flags;
        int i;
 
-       /* Tell EH to disable all devices */
-       for (i = 0; i < host->n_ports; i++) {
-               ap = host->ports[i];
-               spin_lock_irqsave(ap->lock, flags);
-               ap->pflags |= ATA_PFLAG_UNLOADING;
-               ata_port_schedule_eh(ap);
-               spin_unlock_irqrestore(ap->lock, flags);
-       }
-
        for (i = 0; i < host->n_ports; i++) {
-               ap = host->ports[i];
-
-               /* Wait for EH to complete before freezing the port */
-               ata_port_wait_eh(ap);
+               struct ata_port *ap = host->ports[i];
 
                ap->pflags |= ATA_PFLAG_FROZEN;
 
index 0c2ae430f5aae009c111a4bc63b78bd0e73595ee..18ceefd176df0fe1bb426080b5c16644afec2fd1 100644 (file)
@@ -121,7 +121,7 @@ static struct ata_port_operations pata_falcon_ops = {
        .set_mode       = pata_falcon_set_mode,
 };
 
-static int __init pata_falcon_init_one(struct platform_device *pdev)
+static int pata_falcon_init_one(struct platform_device *pdev)
 {
        struct resource *base_mem_res, *ctl_mem_res;
        struct resource *base_res, *ctl_res, *irq_res;
@@ -216,23 +216,22 @@ static int __init pata_falcon_init_one(struct platform_device *pdev)
                                 IRQF_SHARED, &pata_falcon_sht);
 }
 
-static int __exit pata_falcon_remove_one(struct platform_device *pdev)
+static void pata_falcon_remove_one(struct platform_device *pdev)
 {
        struct ata_host *host = platform_get_drvdata(pdev);
 
        ata_host_detach(host);
-
-       return 0;
 }
 
 static struct platform_driver pata_falcon_driver = {
-       .remove = __exit_p(pata_falcon_remove_one),
+       .probe = pata_falcon_init_one,
+       .remove_new = pata_falcon_remove_one,
        .driver   = {
                .name   = "atari-falcon-ide",
        },
 };
 
-module_platform_driver_probe(pata_falcon_driver, pata_falcon_init_one);
+module_platform_driver(pata_falcon_driver);
 
 MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
 MODULE_DESCRIPTION("low-level driver for Atari Falcon PATA");
index 3bdbe2b65a2b49f14777fd5fdd21b04f111ba056..94df60ac230782b6ea038f65548c1e29cac37edb 100644 (file)
@@ -124,7 +124,7 @@ static struct ata_port_operations pata_gayle_a4000_ops = {
        .set_mode       = pata_gayle_set_mode,
 };
 
-static int __init pata_gayle_init_one(struct platform_device *pdev)
+static int pata_gayle_init_one(struct platform_device *pdev)
 {
        struct resource *res;
        struct gayle_ide_platform_data *pdata;
@@ -193,23 +193,22 @@ static int __init pata_gayle_init_one(struct platform_device *pdev)
        return 0;
 }
 
-static int __exit pata_gayle_remove_one(struct platform_device *pdev)
+static void pata_gayle_remove_one(struct platform_device *pdev)
 {
        struct ata_host *host = platform_get_drvdata(pdev);
 
        ata_host_detach(host);
-
-       return 0;
 }
 
 static struct platform_driver pata_gayle_driver = {
-       .remove = __exit_p(pata_gayle_remove_one),
+       .probe = pata_gayle_init_one,
+       .remove_new = pata_gayle_remove_one,
        .driver   = {
                .name   = "amiga-gayle-ide",
        },
 };
 
-module_platform_driver_probe(pata_gayle_driver, pata_gayle_init_one);
+module_platform_driver(pata_gayle_driver);
 
 MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
 MODULE_DESCRIPTION("low-level driver for Amiga Gayle PATA");
index 234a84ecde8b1b9c586102338daeb70101e58418..ea6157747199411452b064aab91ed8b4ca472f80 100644 (file)
@@ -1620,17 +1620,19 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
        }
 
        if (!map->cache_bypass && map->format.parse_val) {
-               unsigned int ival;
+               unsigned int ival, offset;
                int val_bytes = map->format.val_bytes;
-               for (i = 0; i < val_len / val_bytes; i++) {
-                       ival = map->format.parse_val(val + (i * val_bytes));
-                       ret = regcache_write(map,
-                                            reg + regmap_get_offset(map, i),
-                                            ival);
+
+               /* Cache the last written value for noinc writes */
+               i = noinc ? val_len - val_bytes : 0;
+               for (; i < val_len; i += val_bytes) {
+                       ival = map->format.parse_val(val + i);
+                       offset = noinc ? 0 : regmap_get_offset(map, i / val_bytes);
+                       ret = regcache_write(map, reg + offset, ival);
                        if (ret) {
                                dev_err(map->dev,
                                        "Error in caching of register: %x ret: %d\n",
-                                       reg + regmap_get_offset(map, i), ret);
+                                       reg + offset, ret);
                                return ret;
                        }
                }
index 800f131222fc8f9e8ecf9c8355f2a787a80cc5fc..855fdf5c3b4eaae1b8b462fc4ebfb8a4cbb2e7c3 100644 (file)
@@ -250,7 +250,6 @@ static void nbd_dev_remove(struct nbd_device *nbd)
        struct gendisk *disk = nbd->disk;
 
        del_gendisk(disk);
-       put_disk(disk);
        blk_mq_free_tag_set(&nbd->tag_set);
 
        /*
@@ -261,7 +260,7 @@ static void nbd_dev_remove(struct nbd_device *nbd)
        idr_remove(&nbd_index_idr, nbd->index);
        mutex_unlock(&nbd_index_mutex);
        destroy_workqueue(nbd->recv_workq);
-       kfree(nbd);
+       put_disk(disk);
 }
 
 static void nbd_dev_remove_work(struct work_struct *work)
@@ -1608,6 +1607,13 @@ static void nbd_release(struct gendisk *disk)
        nbd_put(nbd);
 }
 
+static void nbd_free_disk(struct gendisk *disk)
+{
+       struct nbd_device *nbd = disk->private_data;
+
+       kfree(nbd);
+}
+
 static const struct block_device_operations nbd_fops =
 {
        .owner =        THIS_MODULE,
@@ -1615,6 +1621,7 @@ static const struct block_device_operations nbd_fops =
        .release =      nbd_release,
        .ioctl =        nbd_ioctl,
        .compat_ioctl = nbd_ioctl,
+       .free_disk =    nbd_free_disk,
 };
 
 #if IS_ENABLED(CONFIG_DEBUG_FS)
index 4689ac2e0c0e1a8529ab0bf3f4e7dbb5440fc6a6..d53d6aa8ee69a4e38dc2a627bc6095b30028c82b 100644 (file)
@@ -1311,6 +1311,7 @@ static int virtblk_probe(struct virtio_device *vdev)
        u16 min_io_size;
        u8 physical_block_exp, alignment_offset;
        unsigned int queue_depth;
+       size_t max_dma_size;
 
        if (!vdev->config->get) {
                dev_err(&vdev->dev, "%s failure: config access disabled\n",
@@ -1409,7 +1410,8 @@ static int virtblk_probe(struct virtio_device *vdev)
        /* No real sector limit. */
        blk_queue_max_hw_sectors(q, UINT_MAX);
 
-       max_size = virtio_max_dma_size(vdev);
+       max_dma_size = virtio_max_dma_size(vdev);
+       max_size = max_dma_size > U32_MAX ? U32_MAX : max_dma_size;
 
        /* Host can optionally specify maximum segment size and number of
         * segments. */
index 50198657230efdff93202597c1dcc2b45924b0e5..57857c0dfba97e0bfdcd5190e8aee31e11028667 100644 (file)
 #include <linux/io-64-nonatomic-lo-hi.h>
 #include <linux/interrupt.h>
 #include <linux/of_irq.h>
+#include <linux/limits.h>
 #include <clocksource/timer-riscv.h>
 #include <asm/smp.h>
-#include <asm/hwcap.h>
+#include <asm/cpufeature.h>
 #include <asm/sbi.h>
 #include <asm/timex.h>
 
 static DEFINE_STATIC_KEY_FALSE(riscv_sstc_available);
 static bool riscv_timer_cannot_wake_cpu;
 
+static void riscv_clock_event_stop(void)
+{
+       if (static_branch_likely(&riscv_sstc_available)) {
+               csr_write(CSR_STIMECMP, ULONG_MAX);
+               if (IS_ENABLED(CONFIG_32BIT))
+                       csr_write(CSR_STIMECMPH, ULONG_MAX);
+       } else {
+               sbi_set_timer(U64_MAX);
+       }
+}
+
 static int riscv_clock_next_event(unsigned long delta,
                struct clock_event_device *ce)
 {
        u64 next_tval = get_cycles64() + delta;
 
-       csr_set(CSR_IE, IE_TIE);
        if (static_branch_likely(&riscv_sstc_available)) {
 #if defined(CONFIG_32BIT)
                csr_write(CSR_STIMECMP, next_tval & 0xFFFFFFFF);
@@ -94,6 +105,8 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
        ce->irq = riscv_clock_event_irq;
        if (riscv_timer_cannot_wake_cpu)
                ce->features |= CLOCK_EVT_FEAT_C3STOP;
+       if (static_branch_likely(&riscv_sstc_available))
+               ce->rating = 450;
        clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
 
        enable_percpu_irq(riscv_clock_event_irq,
@@ -119,7 +132,7 @@ static irqreturn_t riscv_timer_interrupt(int irq, void *dev_id)
 {
        struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
 
-       csr_clear(CSR_IE, IE_TIE);
+       riscv_clock_event_stop();
        evdev->event_handler(evdev);
 
        return IRQ_HANDLED;
index 11b3e34b76961eee1eb72b2bb8e81dfb7dae82b9..bd1e1357cef8e2b9f232959e4488135cb2709136 100644 (file)
@@ -180,8 +180,11 @@ static const struct of_device_id blocklist[] __initconst = {
        { .compatible = "ti,am62a7", },
        { .compatible = "ti,am62p5", },
 
+       { .compatible = "qcom,ipq5332", },
        { .compatible = "qcom,ipq6018", },
        { .compatible = "qcom,ipq8064", },
+       { .compatible = "qcom,ipq8074", },
+       { .compatible = "qcom,ipq9574", },
        { .compatible = "qcom,apq8064", },
        { .compatible = "qcom,msm8974", },
        { .compatible = "qcom,msm8960", },
index 15367ac08b2b8c55d5bb7d8cf92c633136681409..6355a39418c5b2654c739aff73c041d6dff5d467 100644 (file)
@@ -38,6 +38,11 @@ enum ipq806x_versions {
 
 #define IPQ6000_VERSION        BIT(2)
 
+enum ipq8074_versions {
+       IPQ8074_HAWKEYE_VERSION = 0,
+       IPQ8074_ACORN_VERSION,
+};
+
 struct qcom_cpufreq_drv;
 
 struct qcom_cpufreq_match_data {
@@ -178,6 +183,16 @@ static int qcom_cpufreq_kryo_name_version(struct device *cpu_dev,
        switch (msm_id) {
        case QCOM_ID_MSM8996:
        case QCOM_ID_APQ8096:
+       case QCOM_ID_IPQ5332:
+       case QCOM_ID_IPQ5322:
+       case QCOM_ID_IPQ5312:
+       case QCOM_ID_IPQ5302:
+       case QCOM_ID_IPQ5300:
+       case QCOM_ID_IPQ9514:
+       case QCOM_ID_IPQ9550:
+       case QCOM_ID_IPQ9554:
+       case QCOM_ID_IPQ9570:
+       case QCOM_ID_IPQ9574:
                drv->versions = 1 << (unsigned int)(*speedbin);
                break;
        case QCOM_ID_MSM8996SG:
@@ -338,6 +353,44 @@ static int qcom_cpufreq_ipq6018_name_version(struct device *cpu_dev,
        return 0;
 }
 
+static int qcom_cpufreq_ipq8074_name_version(struct device *cpu_dev,
+                                            struct nvmem_cell *speedbin_nvmem,
+                                            char **pvs_name,
+                                            struct qcom_cpufreq_drv *drv)
+{
+       u32 msm_id;
+       int ret;
+       *pvs_name = NULL;
+
+       ret = qcom_smem_get_soc_id(&msm_id);
+       if (ret)
+               return ret;
+
+       switch (msm_id) {
+       case QCOM_ID_IPQ8070A:
+       case QCOM_ID_IPQ8071A:
+       case QCOM_ID_IPQ8172:
+       case QCOM_ID_IPQ8173:
+       case QCOM_ID_IPQ8174:
+               drv->versions = BIT(IPQ8074_ACORN_VERSION);
+               break;
+       case QCOM_ID_IPQ8072A:
+       case QCOM_ID_IPQ8074A:
+       case QCOM_ID_IPQ8076A:
+       case QCOM_ID_IPQ8078A:
+               drv->versions = BIT(IPQ8074_HAWKEYE_VERSION);
+               break;
+       default:
+               dev_err(cpu_dev,
+                       "SoC ID %u is not part of IPQ8074 family, limiting to 1.4GHz!\n",
+                       msm_id);
+               drv->versions = BIT(IPQ8074_ACORN_VERSION);
+               break;
+       }
+
+       return 0;
+}
+
 static const char *generic_genpd_names[] = { "perf", NULL };
 
 static const struct qcom_cpufreq_match_data match_data_kryo = {
@@ -367,6 +420,10 @@ static const struct qcom_cpufreq_match_data match_data_ipq8064 = {
        .get_version = qcom_cpufreq_ipq8064_name_version,
 };
 
+static const struct qcom_cpufreq_match_data match_data_ipq8074 = {
+       .get_version = qcom_cpufreq_ipq8074_name_version,
+};
+
 static int qcom_cpufreq_probe(struct platform_device *pdev)
 {
        struct qcom_cpufreq_drv *drv;
@@ -494,9 +551,12 @@ static const struct of_device_id qcom_cpufreq_match_list[] __initconst = {
        { .compatible = "qcom,msm8909", .data = &match_data_msm8909 },
        { .compatible = "qcom,msm8996", .data = &match_data_kryo },
        { .compatible = "qcom,qcs404", .data = &match_data_qcs404 },
+       { .compatible = "qcom,ipq5332", .data = &match_data_kryo },
        { .compatible = "qcom,ipq6018", .data = &match_data_ipq6018 },
        { .compatible = "qcom,ipq8064", .data = &match_data_ipq8064 },
+       { .compatible = "qcom,ipq8074", .data = &match_data_ipq8074 },
        { .compatible = "qcom,apq8064", .data = &match_data_krait },
+       { .compatible = "qcom,ipq9574", .data = &match_data_kryo },
        { .compatible = "qcom,msm8974", .data = &match_data_krait },
        { .compatible = "qcom,msm8960", .data = &match_data_krait },
        {},
index 40d055560e52fd2298fa3400384b686e37413b32..2034eb4ce83fb7531be4148e4db0679a39b4587b 100644 (file)
@@ -289,6 +289,9 @@ static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
                        }
                }
        }
+
+       cxlrd->qos_class = cfmws->qtg_id;
+
        rc = cxl_decoder_add(cxld, target_map);
 err_xormap:
        if (rc)
index 45e7e044cf4a0452b873680200d0379a245b3f49..86d7ba23235e3bdefb567e3dc3cb656b4f9593c0 100644 (file)
@@ -73,8 +73,10 @@ struct cxl_rcrb_info;
 resource_size_t __rcrb_to_component(struct device *dev,
                                    struct cxl_rcrb_info *ri,
                                    enum cxl_rcrb which);
+u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb);
 
 extern struct rw_semaphore cxl_dpa_rwsem;
+extern struct rw_semaphore cxl_region_rwsem;
 
 int cxl_memdev_init(void);
 void cxl_memdev_exit(void);
index 4449b34a80cc9b8cf19893ef32984e1b01fdde20..1cc9be85ba4cd1679838714b91953938b3aeddfc 100644 (file)
@@ -81,26 +81,6 @@ static void parse_hdm_decoder_caps(struct cxl_hdm *cxlhdm)
                cxlhdm->interleave_mask |= GENMASK(14, 12);
 }
 
-static int map_hdm_decoder_regs(struct cxl_port *port, void __iomem *crb,
-                               struct cxl_component_regs *regs)
-{
-       struct cxl_register_map map = {
-               .dev = &port->dev,
-               .resource = port->component_reg_phys,
-               .base = crb,
-               .max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
-       };
-
-       cxl_probe_component_regs(&port->dev, crb, &map.component_map);
-       if (!map.component_map.hdm_decoder.valid) {
-               dev_dbg(&port->dev, "HDM decoder registers not implemented\n");
-               /* unique error code to indicate no HDM decoder capability */
-               return -ENODEV;
-       }
-
-       return cxl_map_component_regs(&map, regs, BIT(CXL_CM_CAP_CAP_ID_HDM));
-}
-
 static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
 {
        struct cxl_hdm *cxlhdm;
@@ -153,9 +133,9 @@ static bool should_emulate_decoders(struct cxl_endpoint_dvsec_info *info)
 struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
                                   struct cxl_endpoint_dvsec_info *info)
 {
+       struct cxl_register_map *reg_map = &port->reg_map;
        struct device *dev = &port->dev;
        struct cxl_hdm *cxlhdm;
-       void __iomem *crb;
        int rc;
 
        cxlhdm = devm_kzalloc(dev, sizeof(*cxlhdm), GFP_KERNEL);
@@ -164,19 +144,29 @@ struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port,
        cxlhdm->port = port;
        dev_set_drvdata(dev, cxlhdm);
 
-       crb = ioremap(port->component_reg_phys, CXL_COMPONENT_REG_BLOCK_SIZE);
-       if (!crb && info && info->mem_enabled) {
+       /* Memory devices can configure device HDM using DVSEC range regs. */
+       if (reg_map->resource == CXL_RESOURCE_NONE) {
+               if (!info || !info->mem_enabled) {
+                       dev_err(dev, "No component registers mapped\n");
+                       return ERR_PTR(-ENXIO);
+               }
+
                cxlhdm->decoder_count = info->ranges;
                return cxlhdm;
-       } else if (!crb) {
-               dev_err(dev, "No component registers mapped\n");
-               return ERR_PTR(-ENXIO);
        }
 
-       rc = map_hdm_decoder_regs(port, crb, &cxlhdm->regs);
-       iounmap(crb);
-       if (rc)
+       if (!reg_map->component_map.hdm_decoder.valid) {
+               dev_dbg(&port->dev, "HDM decoder registers not implemented\n");
+               /* unique error code to indicate no HDM decoder capability */
+               return ERR_PTR(-ENODEV);
+       }
+
+       rc = cxl_map_component_regs(reg_map, &cxlhdm->regs,
+                                   BIT(CXL_CM_CAP_CAP_ID_HDM));
+       if (rc) {
+               dev_err(dev, "Failed to map HDM capability.\n");
                return ERR_PTR(rc);
+       }
 
        parse_hdm_decoder_caps(cxlhdm);
        if (cxlhdm->decoder_count == 0) {
@@ -575,17 +565,11 @@ static void cxld_set_type(struct cxl_decoder *cxld, u32 *ctrl)
                          CXL_HDM_DECODER0_CTRL_HOSTONLY);
 }
 
-static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
+static void cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
 {
        struct cxl_dport **t = &cxlsd->target[0];
        int ways = cxlsd->cxld.interleave_ways;
 
-       if (dev_WARN_ONCE(&cxlsd->cxld.dev,
-                         ways > 8 || ways > cxlsd->nr_targets,
-                         "ways: %d overflows targets: %d\n", ways,
-                         cxlsd->nr_targets))
-               return -ENXIO;
-
        *tgt = FIELD_PREP(GENMASK(7, 0), t[0]->port_id);
        if (ways > 1)
                *tgt |= FIELD_PREP(GENMASK(15, 8), t[1]->port_id);
@@ -601,8 +585,6 @@ static int cxlsd_set_targets(struct cxl_switch_decoder *cxlsd, u64 *tgt)
                *tgt |= FIELD_PREP(GENMASK_ULL(55, 48), t[6]->port_id);
        if (ways > 7)
                *tgt |= FIELD_PREP(GENMASK_ULL(63, 56), t[7]->port_id);
-
-       return 0;
 }
 
 /*
@@ -643,13 +625,33 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
        if (cxld->flags & CXL_DECODER_F_ENABLE)
                return 0;
 
-       if (port->commit_end + 1 != id) {
+       if (cxl_num_decoders_committed(port) != id) {
                dev_dbg(&port->dev,
                        "%s: out of order commit, expected decoder%d.%d\n",
-                       dev_name(&cxld->dev), port->id, port->commit_end + 1);
+                       dev_name(&cxld->dev), port->id,
+                       cxl_num_decoders_committed(port));
                return -EBUSY;
        }
 
+       /*
+        * For endpoint decoders hosted on CXL memory devices that
+        * support the sanitize operation, make sure sanitize is not in-flight.
+        */
+       if (is_endpoint_decoder(&cxld->dev)) {
+               struct cxl_endpoint_decoder *cxled =
+                       to_cxl_endpoint_decoder(&cxld->dev);
+               struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+               struct cxl_memdev_state *mds =
+                       to_cxl_memdev_state(cxlmd->cxlds);
+
+               if (mds && mds->security.sanitize_active) {
+                       dev_dbg(&cxlmd->dev,
+                               "attempted to commit %s during sanitize\n",
+                               dev_name(&cxld->dev));
+                       return -EBUSY;
+               }
+       }
+
        down_read(&cxl_dpa_rwsem);
        /* common decoder settings */
        ctrl = readl(hdm + CXL_HDM_DECODER0_CTRL_OFFSET(cxld->id));
@@ -670,13 +672,7 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
                void __iomem *tl_lo = hdm + CXL_HDM_DECODER0_TL_LOW(id);
                u64 targets;
 
-               rc = cxlsd_set_targets(cxlsd, &targets);
-               if (rc) {
-                       dev_dbg(&port->dev, "%s: target configuration error\n",
-                               dev_name(&cxld->dev));
-                       goto err;
-               }
-
+               cxlsd_set_targets(cxlsd, &targets);
                writel(upper_32_bits(targets), tl_hi);
                writel(lower_32_bits(targets), tl_lo);
        } else {
@@ -694,7 +690,6 @@ static int cxl_decoder_commit(struct cxl_decoder *cxld)
 
        port->commit_end++;
        rc = cxld_await_commit(hdm, cxld->id);
-err:
        if (rc) {
                dev_dbg(&port->dev, "%s: error %d committing decoder\n",
                        dev_name(&cxld->dev), rc);
@@ -844,7 +839,7 @@ static int init_hdm_decoder(struct cxl_port *port, struct cxl_decoder *cxld,
                        cxld->target_type = CXL_DECODER_HOSTONLYMEM;
                else
                        cxld->target_type = CXL_DECODER_DEVMEM;
-               if (cxld->id != port->commit_end + 1) {
+               if (cxld->id != cxl_num_decoders_committed(port)) {
                        dev_warn(&port->dev,
                                 "decoder%d.%d: Committed out of order\n",
                                 port->id, cxld->id);
index 4df4f614f490ef96d26f412a9cff42e52d246486..36270dcfb42ef2f6917ea9036c8903e44584e4a2 100644 (file)
@@ -1125,20 +1125,7 @@ int cxl_dev_state_identify(struct cxl_memdev_state *mds)
 }
 EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
 
-/**
- * cxl_mem_sanitize() - Send a sanitization command to the device.
- * @mds: The device data for the operation
- * @cmd: The specific sanitization command opcode
- *
- * Return: 0 if the command was executed successfully, regardless of
- * whether or not the actual security operation is done in the background,
- * such as for the Sanitize case.
- * Error return values can be the result of the mailbox command, -EINVAL
- * when security requirements are not met or invalid contexts.
- *
- * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
- */
-int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
+static int __cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
 {
        int rc;
        u32 sec_out = 0;
@@ -1183,7 +1170,45 @@ int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd)
 
        return 0;
 }
-EXPORT_SYMBOL_NS_GPL(cxl_mem_sanitize, CXL);
+
+
+/**
+ * cxl_mem_sanitize() - Send a sanitization command to the device.
+ * @cxlmd: The device for the operation
+ * @cmd: The specific sanitization command opcode
+ *
+ * Return: 0 if the command was executed successfully, regardless of
+ * whether or not the actual security operation is done in the background,
+ * such as for the Sanitize case.
+ * Error return values can be the result of the mailbox command, -EINVAL
+ * when security requirements are not met or invalid contexts, or -EBUSY
+ * if the sanitize operation is already in flight.
+ *
+ * See CXL 3.0 @8.2.9.8.5.1 Sanitize and @8.2.9.8.5.2 Secure Erase.
+ */
+int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd)
+{
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
+       struct cxl_port  *endpoint;
+       int rc;
+
+       /* synchronize with cxl_mem_probe() and decoder write operations */
+       device_lock(&cxlmd->dev);
+       endpoint = cxlmd->endpoint;
+       down_read(&cxl_region_rwsem);
+       /*
+        * Require an endpoint to be safe otherwise the driver can not
+        * be sure that the device is unmapped.
+        */
+       if (endpoint && cxl_num_decoders_committed(endpoint) == 0)
+               rc = __cxl_mem_sanitize(mds, cmd);
+       else
+               rc = -EBUSY;
+       up_read(&cxl_region_rwsem);
+       device_unlock(&cxlmd->dev);
+
+       return rc;
+}
 
 static int add_dpa_res(struct device *dev, struct resource *parent,
                       struct resource *res, resource_size_t start,
@@ -1224,8 +1249,7 @@ int cxl_mem_create_range_info(struct cxl_memdev_state *mds)
                return 0;
        }
 
-       cxlds->dpa_res =
-               (struct resource)DEFINE_RES_MEM(0, mds->total_bytes);
+       cxlds->dpa_res = DEFINE_RES_MEM(0, mds->total_bytes);
 
        if (mds->partition_align_bytes == 0) {
                rc = add_dpa_res(dev, &cxlds->dpa_res, &cxlds->ram_res, 0,
@@ -1377,6 +1401,8 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
        mutex_init(&mds->mbox_mutex);
        mutex_init(&mds->event.log_lock);
        mds->cxlds.dev = dev;
+       mds->cxlds.reg_map.host = dev;
+       mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
        mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
 
        return mds;
index 14b547c07f547602fd243a4ca1bd0097c751948e..fc5c2b414793bb351ae0077857b78e72e153e902 100644 (file)
@@ -125,13 +125,16 @@ static ssize_t security_state_show(struct device *dev,
        struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
        struct cxl_dev_state *cxlds = cxlmd->cxlds;
        struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
-       u64 reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
-       u32 pct = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_PCT_MASK, reg);
-       u16 cmd = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
        unsigned long state = mds->security.state;
+       int rc = 0;
 
-       if (cmd == CXL_MBOX_OP_SANITIZE && pct != 100)
-               return sysfs_emit(buf, "sanitize\n");
+       /* sync with latest submission state */
+       mutex_lock(&mds->mbox_mutex);
+       if (mds->security.sanitize_active)
+               rc = sysfs_emit(buf, "sanitize\n");
+       mutex_unlock(&mds->mbox_mutex);
+       if (rc)
+               return rc;
 
        if (!(state & CXL_PMEM_SEC_STATE_USER_PASS_SET))
                return sysfs_emit(buf, "disabled\n");
@@ -152,24 +155,17 @@ static ssize_t security_sanitize_store(struct device *dev,
                                       const char *buf, size_t len)
 {
        struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
-       struct cxl_port *port = cxlmd->endpoint;
        bool sanitize;
        ssize_t rc;
 
        if (kstrtobool(buf, &sanitize) || !sanitize)
                return -EINVAL;
 
-       if (!port || !is_cxl_endpoint(port))
-               return -EINVAL;
-
-       /* ensure no regions are mapped to this memdev */
-       if (port->commit_end != -1)
-               return -EBUSY;
-
-       rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SANITIZE);
+       rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SANITIZE);
+       if (rc)
+               return rc;
 
-       return rc ? rc : len;
+       return len;
 }
 static struct device_attribute dev_attr_security_sanitize =
        __ATTR(sanitize, 0200, NULL, security_sanitize_store);
@@ -179,24 +175,17 @@ static ssize_t security_erase_store(struct device *dev,
                                    const char *buf, size_t len)
 {
        struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
-       struct cxl_port *port = cxlmd->endpoint;
        ssize_t rc;
        bool erase;
 
        if (kstrtobool(buf, &erase) || !erase)
                return -EINVAL;
 
-       if (!port || !is_cxl_endpoint(port))
-               return -EINVAL;
-
-       /* ensure no regions are mapped to this memdev */
-       if (port->commit_end != -1)
-               return -EBUSY;
-
-       rc = cxl_mem_sanitize(mds, CXL_MBOX_OP_SECURE_ERASE);
+       rc = cxl_mem_sanitize(cxlmd, CXL_MBOX_OP_SECURE_ERASE);
+       if (rc)
+               return rc;
 
-       return rc ? rc : len;
+       return len;
 }
 static struct device_attribute dev_attr_security_erase =
        __ATTR(erase, 0200, NULL, security_erase_store);
@@ -242,7 +231,7 @@ int cxl_trigger_poison_list(struct cxl_memdev *cxlmd)
        if (rc)
                return rc;
 
-       if (port->commit_end == -1) {
+       if (cxl_num_decoders_committed(port) == 0) {
                /* No regions mapped to this memdev */
                rc = cxl_get_poison_by_memdev(cxlmd);
        } else {
@@ -293,7 +282,7 @@ static struct cxl_region *cxl_dpa_to_region(struct cxl_memdev *cxlmd, u64 dpa)
                .dpa = dpa,
        };
        port = cxlmd->endpoint;
-       if (port && is_cxl_endpoint(port) && port->commit_end != -1)
+       if (port && is_cxl_endpoint(port) && cxl_num_decoders_committed(port))
                device_for_each_child(&port->dev, &ctx, __cxl_dpa_to_region);
 
        return ctx.cxlr;
@@ -556,21 +545,11 @@ void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
 }
 EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL);
 
-static void cxl_memdev_security_shutdown(struct device *dev)
-{
-       struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
-       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
-
-       if (mds->security.poll)
-               cancel_delayed_work_sync(&mds->security.poll_dwork);
-}
-
 static void cxl_memdev_shutdown(struct device *dev)
 {
        struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
 
        down_write(&cxl_memdev_rwsem);
-       cxl_memdev_security_shutdown(dev);
        cxlmd->cxlds = NULL;
        up_write(&cxl_memdev_rwsem);
 }
@@ -580,8 +559,8 @@ static void cxl_memdev_unregister(void *_cxlmd)
        struct cxl_memdev *cxlmd = _cxlmd;
        struct device *dev = &cxlmd->dev;
 
-       cxl_memdev_shutdown(dev);
        cdev_device_del(&cxlmd->cdev, dev);
+       cxl_memdev_shutdown(dev);
        put_device(dev);
 }
 
@@ -961,17 +940,16 @@ static const struct fw_upload_ops cxl_memdev_fw_ops = {
         .cleanup = cxl_fw_cleanup,
 };
 
-static void devm_cxl_remove_fw_upload(void *fwl)
+static void cxl_remove_fw_upload(void *fwl)
 {
        firmware_upload_unregister(fwl);
 }
 
-int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds)
+int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds)
 {
        struct cxl_dev_state *cxlds = &mds->cxlds;
        struct device *dev = &cxlds->cxlmd->dev;
        struct fw_upload *fwl;
-       int rc;
 
        if (!test_bit(CXL_MEM_COMMAND_ID_GET_FW_INFO, mds->enabled_cmds))
                return 0;
@@ -979,19 +957,10 @@ int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds)
        fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev),
                                       &cxl_memdev_fw_ops, mds);
        if (IS_ERR(fwl))
-               return dev_err_probe(dev, PTR_ERR(fwl),
-                                    "Failed to register firmware loader\n");
-
-       rc = devm_add_action_or_reset(cxlds->dev, devm_cxl_remove_fw_upload,
-                                     fwl);
-       if (rc)
-               dev_err(dev,
-                       "Failed to add firmware loader remove action: %d\n",
-                       rc);
-
-       return rc;
+               return PTR_ERR(fwl);
+       return devm_add_action_or_reset(host, cxl_remove_fw_upload, fwl);
 }
-EXPORT_SYMBOL_NS_GPL(cxl_memdev_setup_fw_upload, CXL);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_setup_fw_upload, CXL);
 
 static const struct file_operations cxl_memdev_fops = {
        .owner = THIS_MODULE,
@@ -1002,36 +971,8 @@ static const struct file_operations cxl_memdev_fops = {
        .llseek = noop_llseek,
 };
 
-static void put_sanitize(void *data)
-{
-       struct cxl_memdev_state *mds = data;
-
-       sysfs_put(mds->security.sanitize_node);
-}
-
-static int cxl_memdev_security_init(struct cxl_memdev *cxlmd)
-{
-       struct cxl_dev_state *cxlds = cxlmd->cxlds;
-       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
-       struct device *dev = &cxlmd->dev;
-       struct kernfs_node *sec;
-
-       sec = sysfs_get_dirent(dev->kobj.sd, "security");
-       if (!sec) {
-               dev_err(dev, "sysfs_get_dirent 'security' failed\n");
-               return -ENODEV;
-       }
-       mds->security.sanitize_node = sysfs_get_dirent(sec, "state");
-       sysfs_put(sec);
-       if (!mds->security.sanitize_node) {
-               dev_err(dev, "sysfs_get_dirent 'state' failed\n");
-               return -ENODEV;
-       }
-
-       return devm_add_action_or_reset(cxlds->dev, put_sanitize, mds);
- }
-
-struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
+struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
+                                      struct cxl_dev_state *cxlds)
 {
        struct cxl_memdev *cxlmd;
        struct device *dev;
@@ -1059,11 +1000,7 @@ struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
        if (rc)
                goto err;
 
-       rc = cxl_memdev_security_init(cxlmd);
-       if (rc)
-               goto err;
-
-       rc = devm_add_action_or_reset(cxlds->dev, cxl_memdev_unregister, cxlmd);
+       rc = devm_add_action_or_reset(host, cxl_memdev_unregister, cxlmd);
        if (rc)
                return ERR_PTR(rc);
        return cxlmd;
@@ -1079,6 +1016,50 @@ err:
 }
 EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL);
 
+static void sanitize_teardown_notifier(void *data)
+{
+       struct cxl_memdev_state *mds = data;
+       struct kernfs_node *state;
+
+       /*
+        * Prevent new irq triggered invocations of the workqueue and
+        * flush inflight invocations.
+        */
+       mutex_lock(&mds->mbox_mutex);
+       state = mds->security.sanitize_node;
+       mds->security.sanitize_node = NULL;
+       mutex_unlock(&mds->mbox_mutex);
+
+       cancel_delayed_work_sync(&mds->security.poll_dwork);
+       sysfs_put(state);
+}
+
+int devm_cxl_sanitize_setup_notifier(struct device *host,
+                                    struct cxl_memdev *cxlmd)
+{
+       struct cxl_dev_state *cxlds = cxlmd->cxlds;
+       struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
+       struct kernfs_node *sec;
+
+       if (!test_bit(CXL_SEC_ENABLED_SANITIZE, mds->security.enabled_cmds))
+               return 0;
+
+       /*
+        * Note, the expectation is that @cxlmd would have failed to be
+        * created if these sysfs_get_dirent calls fail.
+        */
+       sec = sysfs_get_dirent(cxlmd->dev.kobj.sd, "security");
+       if (!sec)
+               return -ENOENT;
+       mds->security.sanitize_node = sysfs_get_dirent(sec, "state");
+       sysfs_put(sec);
+       if (!mds->security.sanitize_node)
+               return -ENOENT;
+
+       return devm_add_action_or_reset(host, sanitize_teardown_notifier, mds);
+}
+EXPORT_SYMBOL_NS_GPL(devm_cxl_sanitize_setup_notifier, CXL);
+
 __init int cxl_memdev_init(void)
 {
        dev_t devt;
index c7a7887ebdcff859bac675e6f41322aae1b50794..eff20e83d0a64e8ba791a214f8fb4564135baded 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/delay.h>
 #include <linux/pci.h>
 #include <linux/pci-doe.h>
+#include <linux/aer.h>
 #include <cxlpci.h>
 #include <cxlmem.h>
 #include <cxl.h>
@@ -595,6 +596,16 @@ static int cxl_cdat_read_table(struct device *dev,
        return 0;
 }
 
+static unsigned char cdat_checksum(void *buf, size_t size)
+{
+       unsigned char sum, *data = buf;
+       size_t i;
+
+       for (sum = 0, i = 0; i < size; i++)
+               sum += data[i];
+       return sum;
+}
+
 /**
  * read_cdat_data - Read the CDAT data on this port
  * @port: Port to read data from
@@ -603,18 +614,30 @@ static int cxl_cdat_read_table(struct device *dev,
  */
 void read_cdat_data(struct cxl_port *port)
 {
-       struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
-       struct device *host = cxlmd->dev.parent;
+       struct device *uport = port->uport_dev;
        struct device *dev = &port->dev;
        struct pci_doe_mb *cdat_doe;
+       struct pci_dev *pdev = NULL;
+       struct cxl_memdev *cxlmd;
        size_t cdat_length;
        void *cdat_table;
        int rc;
 
-       if (!dev_is_pci(host))
+       if (is_cxl_memdev(uport)) {
+               struct device *host;
+
+               cxlmd = to_cxl_memdev(uport);
+               host = cxlmd->dev.parent;
+               if (dev_is_pci(host))
+                       pdev = to_pci_dev(host);
+       } else if (dev_is_pci(uport)) {
+               pdev = to_pci_dev(uport);
+       }
+
+       if (!pdev)
                return;
-       cdat_doe = pci_find_doe_mailbox(to_pci_dev(host),
-                                       PCI_DVSEC_VENDOR_ID_CXL,
+
+       cdat_doe = pci_find_doe_mailbox(pdev, PCI_DVSEC_VENDOR_ID_CXL,
                                        CXL_DOE_PROTOCOL_TABLE_ACCESS);
        if (!cdat_doe) {
                dev_dbg(dev, "No CDAT mailbox\n");
@@ -634,44 +657,54 @@ void read_cdat_data(struct cxl_port *port)
                return;
 
        rc = cxl_cdat_read_table(dev, cdat_doe, cdat_table, &cdat_length);
-       if (rc) {
-               /* Don't leave table data allocated on error */
-               devm_kfree(dev, cdat_table);
-               dev_err(dev, "CDAT data read error\n");
-               return;
-       }
+       if (rc)
+               goto err;
+
+       cdat_table = cdat_table + sizeof(__le32);
+       if (cdat_checksum(cdat_table, cdat_length))
+               goto err;
 
-       port->cdat.table = cdat_table + sizeof(__le32);
+       port->cdat.table = cdat_table;
        port->cdat.length = cdat_length;
+       return;
+
+err:
+       /* Don't leave table data allocated on error */
+       devm_kfree(dev, cdat_table);
+       dev_err(dev, "Failed to read/validate CDAT.\n");
 }
 EXPORT_SYMBOL_NS_GPL(read_cdat_data, CXL);
 
-void cxl_cor_error_detected(struct pci_dev *pdev)
+static void __cxl_handle_cor_ras(struct cxl_dev_state *cxlds,
+                                void __iomem *ras_base)
 {
-       struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
        void __iomem *addr;
        u32 status;
 
-       if (!cxlds->regs.ras)
+       if (!ras_base)
                return;
 
-       addr = cxlds->regs.ras + CXL_RAS_CORRECTABLE_STATUS_OFFSET;
+       addr = ras_base + CXL_RAS_CORRECTABLE_STATUS_OFFSET;
        status = readl(addr);
        if (status & CXL_RAS_CORRECTABLE_STATUS_MASK) {
                writel(status & CXL_RAS_CORRECTABLE_STATUS_MASK, addr);
                trace_cxl_aer_correctable_error(cxlds->cxlmd, status);
        }
 }
-EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL);
+
+static void cxl_handle_endpoint_cor_ras(struct cxl_dev_state *cxlds)
+{
+       return __cxl_handle_cor_ras(cxlds, cxlds->regs.ras);
+}
 
 /* CXL spec rev3.0 8.2.4.16.1 */
-static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log)
+static void header_log_copy(void __iomem *ras_base, u32 *log)
 {
        void __iomem *addr;
        u32 *log_addr;
        int i, log_u32_size = CXL_HEADERLOG_SIZE / sizeof(u32);
 
-       addr = cxlds->regs.ras + CXL_RAS_HEADER_LOG_OFFSET;
+       addr = ras_base + CXL_RAS_HEADER_LOG_OFFSET;
        log_addr = log;
 
        for (i = 0; i < log_u32_size; i++) {
@@ -685,17 +718,18 @@ static void header_log_copy(struct cxl_dev_state *cxlds, u32 *log)
  * Log the state of the RAS status registers and prepare them to log the
  * next error status. Return 1 if reset needed.
  */
-static bool cxl_report_and_clear(struct cxl_dev_state *cxlds)
+static bool __cxl_handle_ras(struct cxl_dev_state *cxlds,
+                                 void __iomem *ras_base)
 {
        u32 hl[CXL_HEADERLOG_SIZE_U32];
        void __iomem *addr;
        u32 status;
        u32 fe;
 
-       if (!cxlds->regs.ras)
+       if (!ras_base)
                return false;
 
-       addr = cxlds->regs.ras + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET;
+       addr = ras_base + CXL_RAS_UNCORRECTABLE_STATUS_OFFSET;
        status = readl(addr);
        if (!(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK))
                return false;
@@ -703,7 +737,7 @@ static bool cxl_report_and_clear(struct cxl_dev_state *cxlds)
        /* If multiple errors, log header points to first error from ctrl reg */
        if (hweight32(status) > 1) {
                void __iomem *rcc_addr =
-                       cxlds->regs.ras + CXL_RAS_CAP_CONTROL_OFFSET;
+                       ras_base + CXL_RAS_CAP_CONTROL_OFFSET;
 
                fe = BIT(FIELD_GET(CXL_RAS_CAP_CONTROL_FE_MASK,
                                   readl(rcc_addr)));
@@ -711,13 +745,201 @@ static bool cxl_report_and_clear(struct cxl_dev_state *cxlds)
                fe = status;
        }
 
-       header_log_copy(cxlds, hl);
+       header_log_copy(ras_base, hl);
        trace_cxl_aer_uncorrectable_error(cxlds->cxlmd, status, fe, hl);
        writel(status & CXL_RAS_UNCORRECTABLE_STATUS_MASK, addr);
 
        return true;
 }
 
+static bool cxl_handle_endpoint_ras(struct cxl_dev_state *cxlds)
+{
+       return __cxl_handle_ras(cxlds, cxlds->regs.ras);
+}
+
+#ifdef CONFIG_PCIEAER_CXL
+
+static void cxl_dport_map_rch_aer(struct cxl_dport *dport)
+{
+       struct cxl_rcrb_info *ri = &dport->rcrb;
+       void __iomem *dport_aer = NULL;
+       resource_size_t aer_phys;
+       struct device *host;
+
+       if (dport->rch && ri->aer_cap) {
+               host = dport->reg_map.host;
+               aer_phys = ri->aer_cap + ri->base;
+               dport_aer = devm_cxl_iomap_block(host, aer_phys,
+                               sizeof(struct aer_capability_regs));
+       }
+
+       dport->regs.dport_aer = dport_aer;
+}
+
+static void cxl_dport_map_regs(struct cxl_dport *dport)
+{
+       struct cxl_register_map *map = &dport->reg_map;
+       struct device *dev = dport->dport_dev;
+
+       if (!map->component_map.ras.valid)
+               dev_dbg(dev, "RAS registers not found\n");
+       else if (cxl_map_component_regs(map, &dport->regs.component,
+                                       BIT(CXL_CM_CAP_CAP_ID_RAS)))
+               dev_dbg(dev, "Failed to map RAS capability.\n");
+
+       if (dport->rch)
+               cxl_dport_map_rch_aer(dport);
+}
+
+static void cxl_disable_rch_root_ints(struct cxl_dport *dport)
+{
+       void __iomem *aer_base = dport->regs.dport_aer;
+       struct pci_host_bridge *bridge;
+       u32 aer_cmd_mask, aer_cmd;
+
+       if (!aer_base)
+               return;
+
+       bridge = to_pci_host_bridge(dport->dport_dev);
+
+       /*
+        * Disable RCH root port command interrupts.
+        * CXL 3.0 12.2.1.1 - RCH Downstream Port-detected Errors
+        *
+        * This sequence may not be necessary. CXL spec states disabling
+        * the root cmd register's interrupts is required. But, PCI spec
+        * shows these are disabled by default on reset.
+        */
+       if (bridge->native_aer) {
+               aer_cmd_mask = (PCI_ERR_ROOT_CMD_COR_EN |
+                               PCI_ERR_ROOT_CMD_NONFATAL_EN |
+                               PCI_ERR_ROOT_CMD_FATAL_EN);
+               aer_cmd = readl(aer_base + PCI_ERR_ROOT_COMMAND);
+               aer_cmd &= ~aer_cmd_mask;
+               writel(aer_cmd, aer_base + PCI_ERR_ROOT_COMMAND);
+       }
+}
+
+void cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport)
+{
+       struct device *dport_dev = dport->dport_dev;
+       struct pci_host_bridge *host_bridge;
+
+       host_bridge = to_pci_host_bridge(dport_dev);
+       if (host_bridge->native_aer)
+               dport->rcrb.aer_cap = cxl_rcrb_to_aer(dport_dev, dport->rcrb.base);
+
+       dport->reg_map.host = host;
+       cxl_dport_map_regs(dport);
+
+       if (dport->rch)
+               cxl_disable_rch_root_ints(dport);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_setup_parent_dport, CXL);
+
+static void cxl_handle_rdport_cor_ras(struct cxl_dev_state *cxlds,
+                                         struct cxl_dport *dport)
+{
+       return __cxl_handle_cor_ras(cxlds, dport->regs.ras);
+}
+
+static bool cxl_handle_rdport_ras(struct cxl_dev_state *cxlds,
+                                      struct cxl_dport *dport)
+{
+       return __cxl_handle_ras(cxlds, dport->regs.ras);
+}
+
+/*
+ * Copy the AER capability registers using 32 bit read accesses.
+ * This is necessary because RCRB AER capability is MMIO mapped. Clear the
+ * status after copying.
+ *
+ * @aer_base: base address of AER capability block in RCRB
+ * @aer_regs: destination for copying AER capability
+ */
+static bool cxl_rch_get_aer_info(void __iomem *aer_base,
+                                struct aer_capability_regs *aer_regs)
+{
+       int read_cnt = sizeof(struct aer_capability_regs) / sizeof(u32);
+       u32 *aer_regs_buf = (u32 *)aer_regs;
+       int n;
+
+       if (!aer_base)
+               return false;
+
+       /* Use readl() to guarantee 32-bit accesses */
+       for (n = 0; n < read_cnt; n++)
+               aer_regs_buf[n] = readl(aer_base + n * sizeof(u32));
+
+       writel(aer_regs->uncor_status, aer_base + PCI_ERR_UNCOR_STATUS);
+       writel(aer_regs->cor_status, aer_base + PCI_ERR_COR_STATUS);
+
+       return true;
+}
+
+/* Get AER severity. Return false if there is no error. */
+static bool cxl_rch_get_aer_severity(struct aer_capability_regs *aer_regs,
+                                    int *severity)
+{
+       if (aer_regs->uncor_status & ~aer_regs->uncor_mask) {
+               if (aer_regs->uncor_status & PCI_ERR_ROOT_FATAL_RCV)
+                       *severity = AER_FATAL;
+               else
+                       *severity = AER_NONFATAL;
+               return true;
+       }
+
+       if (aer_regs->cor_status & ~aer_regs->cor_mask) {
+               *severity = AER_CORRECTABLE;
+               return true;
+       }
+
+       return false;
+}
+
+static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds)
+{
+       struct pci_dev *pdev = to_pci_dev(cxlds->dev);
+       struct aer_capability_regs aer_regs;
+       struct cxl_dport *dport;
+       struct cxl_port *port;
+       int severity;
+
+       port = cxl_pci_find_port(pdev, &dport);
+       if (!port)
+               return;
+
+       put_device(&port->dev);
+
+       if (!cxl_rch_get_aer_info(dport->regs.dport_aer, &aer_regs))
+               return;
+
+       if (!cxl_rch_get_aer_severity(&aer_regs, &severity))
+               return;
+
+       pci_print_aer(pdev, severity, &aer_regs);
+
+       if (severity == AER_CORRECTABLE)
+               cxl_handle_rdport_cor_ras(cxlds, dport);
+       else
+               cxl_handle_rdport_ras(cxlds, dport);
+}
+
+#else
+static void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds) { }
+#endif
+
+void cxl_cor_error_detected(struct pci_dev *pdev)
+{
+       struct cxl_dev_state *cxlds = pci_get_drvdata(pdev);
+
+       if (cxlds->rcd)
+               cxl_handle_rdport_errors(cxlds);
+
+       cxl_handle_endpoint_cor_ras(cxlds);
+}
+EXPORT_SYMBOL_NS_GPL(cxl_cor_error_detected, CXL);
+
 pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
                                    pci_channel_state_t state)
 {
@@ -726,13 +948,16 @@ pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
        struct device *dev = &cxlmd->dev;
        bool ue;
 
+       if (cxlds->rcd)
+               cxl_handle_rdport_errors(cxlds);
+
        /*
         * A frozen channel indicates an impending reset which is fatal to
         * CXL.mem operation, and will likely crash the system. On the off
         * chance the situation is recoverable dump the status of the RAS
         * capability registers and bounce the active state of the memdev.
         */
-       ue = cxl_report_and_clear(cxlds);
+       ue = cxl_handle_endpoint_ras(cxlds);
 
        switch (state) {
        case pci_channel_io_normal:
index 7ca01a834e188c8f09f2676b8c79689279acbabe..38441634e4c68371fa7fc03aee8979e581303fde 100644 (file)
  * instantiated by the core.
  */
 
+/*
+ * All changes to the interleave configuration occur with this lock held
+ * for write.
+ */
+DECLARE_RWSEM(cxl_region_rwsem);
+
 static DEFINE_IDA(cxl_port_ida);
 static DEFINE_XARRAY(cxl_root_buses);
 
+int cxl_num_decoders_committed(struct cxl_port *port)
+{
+       lockdep_assert_held(&cxl_region_rwsem);
+
+       return port->commit_end + 1;
+}
+
 static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
                            char *buf)
 {
@@ -278,6 +291,15 @@ static ssize_t interleave_ways_show(struct device *dev,
 
 static DEVICE_ATTR_RO(interleave_ways);
 
+static ssize_t qos_class_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
+
+       return sysfs_emit(buf, "%d\n", cxlrd->qos_class);
+}
+static DEVICE_ATTR_RO(qos_class);
+
 static struct attribute *cxl_decoder_base_attrs[] = {
        &dev_attr_start.attr,
        &dev_attr_size.attr,
@@ -297,6 +319,7 @@ static struct attribute *cxl_decoder_root_attrs[] = {
        &dev_attr_cap_type2.attr,
        &dev_attr_cap_type3.attr,
        &dev_attr_target_list.attr,
+       &dev_attr_qos_class.attr,
        SET_CXL_REGION_ATTR(create_pmem_region)
        SET_CXL_REGION_ATTR(create_ram_region)
        SET_CXL_REGION_ATTR(delete_region)
@@ -521,8 +544,33 @@ static void cxl_port_release(struct device *dev)
        kfree(port);
 }
 
+static ssize_t decoders_committed_show(struct device *dev,
+                                      struct device_attribute *attr, char *buf)
+{
+       struct cxl_port *port = to_cxl_port(dev);
+       int rc;
+
+       down_read(&cxl_region_rwsem);
+       rc = sysfs_emit(buf, "%d\n", cxl_num_decoders_committed(port));
+       up_read(&cxl_region_rwsem);
+
+       return rc;
+}
+
+static DEVICE_ATTR_RO(decoders_committed);
+
+static struct attribute *cxl_port_attrs[] = {
+       &dev_attr_decoders_committed.attr,
+       NULL,
+};
+
+static struct attribute_group cxl_port_attribute_group = {
+       .attrs = cxl_port_attrs,
+};
+
 static const struct attribute_group *cxl_port_attribute_groups[] = {
        &cxl_base_attribute_group,
+       &cxl_port_attribute_group,
        NULL,
 };
 
@@ -619,7 +667,6 @@ static int devm_cxl_link_parent_dport(struct device *host,
 static struct lock_class_key cxl_port_key;
 
 static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
-                                      resource_size_t component_reg_phys,
                                       struct cxl_dport *parent_dport)
 {
        struct cxl_port *port;
@@ -670,7 +717,6 @@ static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
        } else
                dev->parent = uport_dev;
 
-       port->component_reg_phys = component_reg_phys;
        ida_init(&port->decoder_ida);
        port->hdm_end = -1;
        port->commit_end = -1;
@@ -691,19 +737,21 @@ err:
        return ERR_PTR(rc);
 }
 
-static int cxl_setup_comp_regs(struct device *dev, struct cxl_register_map *map,
+static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map,
                               resource_size_t component_reg_phys)
 {
-       if (component_reg_phys == CXL_RESOURCE_NONE)
-               return 0;
-
        *map = (struct cxl_register_map) {
-               .dev = dev,
-               .reg_type = CXL_REGLOC_RBI_COMPONENT,
+               .host = host,
+               .reg_type = CXL_REGLOC_RBI_EMPTY,
                .resource = component_reg_phys,
-               .max_size = CXL_COMPONENT_REG_BLOCK_SIZE,
        };
 
+       if (component_reg_phys == CXL_RESOURCE_NONE)
+               return 0;
+
+       map->reg_type = CXL_REGLOC_RBI_COMPONENT;
+       map->max_size = CXL_COMPONENT_REG_BLOCK_SIZE;
+
        return cxl_setup_regs(map);
 }
 
@@ -712,17 +760,27 @@ static int cxl_port_setup_regs(struct cxl_port *port,
 {
        if (dev_is_platform(port->uport_dev))
                return 0;
-       return cxl_setup_comp_regs(&port->dev, &port->comp_map,
+       return cxl_setup_comp_regs(&port->dev, &port->reg_map,
                                   component_reg_phys);
 }
 
-static int cxl_dport_setup_regs(struct cxl_dport *dport,
+static int cxl_dport_setup_regs(struct device *host, struct cxl_dport *dport,
                                resource_size_t component_reg_phys)
 {
+       int rc;
+
        if (dev_is_platform(dport->dport_dev))
                return 0;
-       return cxl_setup_comp_regs(dport->dport_dev, &dport->comp_map,
-                                  component_reg_phys);
+
+       /*
+        * use @dport->dport_dev for the context for error messages during
+        * register probing, and fixup @host after the fact, since @host may be
+        * NULL.
+        */
+       rc = cxl_setup_comp_regs(dport->dport_dev, &dport->reg_map,
+                                component_reg_phys);
+       dport->reg_map.host = host;
+       return rc;
 }
 
 static struct cxl_port *__devm_cxl_add_port(struct device *host,
@@ -734,21 +792,36 @@ static struct cxl_port *__devm_cxl_add_port(struct device *host,
        struct device *dev;
        int rc;
 
-       port = cxl_port_alloc(uport_dev, component_reg_phys, parent_dport);
+       port = cxl_port_alloc(uport_dev, parent_dport);
        if (IS_ERR(port))
                return port;
 
        dev = &port->dev;
-       if (is_cxl_memdev(uport_dev))
+       if (is_cxl_memdev(uport_dev)) {
+               struct cxl_memdev *cxlmd = to_cxl_memdev(uport_dev);
+               struct cxl_dev_state *cxlds = cxlmd->cxlds;
+
                rc = dev_set_name(dev, "endpoint%d", port->id);
-       else if (parent_dport)
+               if (rc)
+                       goto err;
+
+               /*
+                * The endpoint driver already enumerated the component and RAS
+                * registers. Reuse that enumeration while prepping them to be
+                * mapped by the cxl_port driver.
+                */
+               port->reg_map = cxlds->reg_map;
+               port->reg_map.host = &port->dev;
+       } else if (parent_dport) {
                rc = dev_set_name(dev, "port%d", port->id);
-       else
-               rc = dev_set_name(dev, "root%d", port->id);
-       if (rc)
-               goto err;
+               if (rc)
+                       goto err;
 
-       rc = cxl_port_setup_regs(port, component_reg_phys);
+               rc = cxl_port_setup_regs(port, component_reg_phys);
+               if (rc)
+                       goto err;
+       } else
+               rc = dev_set_name(dev, "root%d", port->id);
        if (rc)
                goto err;
 
@@ -983,7 +1056,16 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
        if (!dport)
                return ERR_PTR(-ENOMEM);
 
-       if (rcrb != CXL_RESOURCE_NONE) {
+       dport->dport_dev = dport_dev;
+       dport->port_id = port_id;
+       dport->port = port;
+
+       if (rcrb == CXL_RESOURCE_NONE) {
+               rc = cxl_dport_setup_regs(&port->dev, dport,
+                                         component_reg_phys);
+               if (rc)
+                       return ERR_PTR(rc);
+       } else {
                dport->rcrb.base = rcrb;
                component_reg_phys = __rcrb_to_component(dport_dev, &dport->rcrb,
                                                         CXL_RCRB_DOWNSTREAM);
@@ -992,6 +1074,14 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
                        return ERR_PTR(-ENXIO);
                }
 
+               /*
+                * RCH @dport is not ready to map until associated with its
+                * memdev
+                */
+               rc = cxl_dport_setup_regs(NULL, dport, component_reg_phys);
+               if (rc)
+                       return ERR_PTR(rc);
+
                dport->rch = true;
        }
 
@@ -999,14 +1089,6 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
                dev_dbg(dport_dev, "Component Registers found for dport: %pa\n",
                        &component_reg_phys);
 
-       dport->dport_dev = dport_dev;
-       dport->port_id = port_id;
-       dport->port = port;
-
-       rc = cxl_dport_setup_regs(dport, component_reg_phys);
-       if (rc)
-               return ERR_PTR(rc);
-
        cond_cxl_root_lock(port);
        rc = add_dport(port, dport);
        cond_cxl_root_unlock(port);
@@ -1217,35 +1299,39 @@ static struct device *grandparent(struct device *dev)
        return NULL;
 }
 
+static struct device *endpoint_host(struct cxl_port *endpoint)
+{
+       struct cxl_port *port = to_cxl_port(endpoint->dev.parent);
+
+       if (is_cxl_root(port))
+               return port->uport_dev;
+       return &port->dev;
+}
+
 static void delete_endpoint(void *data)
 {
        struct cxl_memdev *cxlmd = data;
        struct cxl_port *endpoint = cxlmd->endpoint;
-       struct cxl_port *parent_port;
-       struct device *parent;
+       struct device *host = endpoint_host(endpoint);
 
-       parent_port = cxl_mem_find_port(cxlmd, NULL);
-       if (!parent_port)
-               goto out;
-       parent = &parent_port->dev;
-
-       device_lock(parent);
-       if (parent->driver && !endpoint->dead) {
-               devm_release_action(parent, cxl_unlink_parent_dport, endpoint);
-               devm_release_action(parent, cxl_unlink_uport, endpoint);
-               devm_release_action(parent, unregister_port, endpoint);
+       device_lock(host);
+       if (host->driver && !endpoint->dead) {
+               devm_release_action(host, cxl_unlink_parent_dport, endpoint);
+               devm_release_action(host, cxl_unlink_uport, endpoint);
+               devm_release_action(host, unregister_port, endpoint);
        }
        cxlmd->endpoint = NULL;
-       device_unlock(parent);
-       put_device(parent);
-out:
+       device_unlock(host);
        put_device(&endpoint->dev);
+       put_device(host);
 }
 
 int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint)
 {
+       struct device *host = endpoint_host(endpoint);
        struct device *dev = &cxlmd->dev;
 
+       get_device(host);
        get_device(&endpoint->dev);
        cxlmd->endpoint = endpoint;
        cxlmd->depth = endpoint->depth;
@@ -1468,7 +1554,11 @@ retry:
                struct cxl_dport *dport;
                struct cxl_port *port;
 
-               if (!dport_dev)
+               /*
+                * The terminal "grandparent" in PCI is NULL and @platform_bus
+                * for platform devices
+                */
+               if (!dport_dev || dport_dev == &platform_bus)
                        return 0;
 
                uport_dev = dport_dev->parent;
@@ -1691,6 +1781,7 @@ struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
        }
 
        atomic_set(&cxlrd->region_id, rc);
+       cxlrd->qos_class = CXL_QOS_CLASS_INVALID;
        return cxlrd;
 }
 EXPORT_SYMBOL_NS_GPL(cxl_root_decoder_alloc, CXL);
@@ -2062,3 +2153,4 @@ static void cxl_core_exit(void)
 subsys_initcall(cxl_core_init);
 module_exit(cxl_core_exit);
 MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(CXL);
index 6d63b8798c29921470e37c9bf0e9d116c834508e..56e575c79bb49187f909aa87d4f6c5d9894c3b75 100644 (file)
  * 3. Decoder targets
  */
 
-/*
- * All changes to the interleave configuration occur with this lock held
- * for write.
- */
-static DECLARE_RWSEM(cxl_region_rwsem);
-
 static struct cxl_region *to_cxl_region(struct device *dev);
 
 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr,
@@ -129,7 +123,7 @@ static int cxl_region_invalidate_memregion(struct cxl_region *cxlr)
 {
        if (!cpu_cache_has_invalidate_memregion()) {
                if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) {
-                       dev_warn_once(
+                       dev_info_once(
                                &cxlr->dev,
                                "Bypassing cpu_cache_invalidate_memregion() for testing!\n");
                        return 0;
@@ -294,7 +288,7 @@ static ssize_t commit_store(struct device *dev, struct device_attribute *attr,
         */
        rc = cxl_region_invalidate_memregion(cxlr);
        if (rc)
-               return rc;
+               goto out;
 
        if (commit) {
                rc = cxl_region_decode_commit(cxlr);
@@ -1133,7 +1127,14 @@ static int cxl_port_setup_targets(struct cxl_port *port,
        }
 
        if (is_cxl_root(parent_port)) {
-               parent_ig = cxlrd->cxlsd.cxld.interleave_granularity;
+               /*
+                * Root decoder IG is always set to value in CFMWS which
+                * may be different than this region's IG.  We can use the
+                * region's IG here since interleave_granularity_store()
+                * does not allow interleaved host-bridges with
+                * root IG != region IG.
+                */
+               parent_ig = p->interleave_granularity;
                parent_iw = cxlrd->cxlsd.cxld.interleave_ways;
                /*
                 * For purposes of address bit routing, use power-of-2 math for
@@ -1195,6 +1196,14 @@ static int cxl_port_setup_targets(struct cxl_port *port,
                return rc;
        }
 
+       if (iw > 8 || iw > cxlsd->nr_targets) {
+               dev_dbg(&cxlr->dev,
+                       "%s:%s:%s: ways: %d overflows targets: %d\n",
+                       dev_name(port->uport_dev), dev_name(&port->dev),
+                       dev_name(&cxld->dev), iw, cxlsd->nr_targets);
+               return -ENXIO;
+       }
+
        if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) {
                if (cxld->interleave_ways != iw ||
                    cxld->interleave_granularity != ig ||
@@ -1480,6 +1489,14 @@ static int cxl_region_attach_auto(struct cxl_region *cxlr,
        return 0;
 }
 
+static int cmp_interleave_pos(const void *a, const void *b)
+{
+       struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a;
+       struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b;
+
+       return cxled_a->pos - cxled_b->pos;
+}
+
 static struct cxl_port *next_port(struct cxl_port *port)
 {
        if (!port->parent_dport)
@@ -1487,119 +1504,127 @@ static struct cxl_port *next_port(struct cxl_port *port)
        return port->parent_dport->port;
 }
 
-static int decoder_match_range(struct device *dev, void *data)
+static int match_switch_decoder_by_range(struct device *dev, void *data)
 {
-       struct cxl_endpoint_decoder *cxled = data;
        struct cxl_switch_decoder *cxlsd;
+       struct range *r1, *r2 = data;
 
        if (!is_switch_decoder(dev))
                return 0;
 
        cxlsd = to_cxl_switch_decoder(dev);
-       return range_contains(&cxlsd->cxld.hpa_range, &cxled->cxld.hpa_range);
-}
-
-static void find_positions(const struct cxl_switch_decoder *cxlsd,
-                          const struct cxl_port *iter_a,
-                          const struct cxl_port *iter_b, int *a_pos,
-                          int *b_pos)
-{
-       int i;
+       r1 = &cxlsd->cxld.hpa_range;
 
-       for (i = 0, *a_pos = -1, *b_pos = -1; i < cxlsd->nr_targets; i++) {
-               if (cxlsd->target[i] == iter_a->parent_dport)
-                       *a_pos = i;
-               else if (cxlsd->target[i] == iter_b->parent_dport)
-                       *b_pos = i;
-               if (*a_pos >= 0 && *b_pos >= 0)
-                       break;
-       }
+       if (is_root_decoder(dev))
+               return range_contains(r1, r2);
+       return (r1->start == r2->start && r1->end == r2->end);
 }
 
-static int cmp_decode_pos(const void *a, const void *b)
+static int find_pos_and_ways(struct cxl_port *port, struct range *range,
+                            int *pos, int *ways)
 {
-       struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a;
-       struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b;
-       struct cxl_memdev *cxlmd_a = cxled_to_memdev(cxled_a);
-       struct cxl_memdev *cxlmd_b = cxled_to_memdev(cxled_b);
-       struct cxl_port *port_a = cxled_to_port(cxled_a);
-       struct cxl_port *port_b = cxled_to_port(cxled_b);
-       struct cxl_port *iter_a, *iter_b, *port = NULL;
        struct cxl_switch_decoder *cxlsd;
+       struct cxl_port *parent;
        struct device *dev;
-       int a_pos, b_pos;
-       unsigned int seq;
-
-       /* Exit early if any prior sorting failed */
-       if (cxled_a->pos < 0 || cxled_b->pos < 0)
-               return 0;
+       int rc = -ENXIO;
 
-       /*
-        * Walk up the hierarchy to find a shared port, find the decoder that
-        * maps the range, compare the relative position of those dport
-        * mappings.
-        */
-       for (iter_a = port_a; iter_a; iter_a = next_port(iter_a)) {
-               struct cxl_port *next_a, *next_b;
+       parent = next_port(port);
+       if (!parent)
+               return rc;
 
-               next_a = next_port(iter_a);
-               if (!next_a)
-                       break;
+       dev = device_find_child(&parent->dev, range,
+                               match_switch_decoder_by_range);
+       if (!dev) {
+               dev_err(port->uport_dev,
+                       "failed to find decoder mapping %#llx-%#llx\n",
+                       range->start, range->end);
+               return rc;
+       }
+       cxlsd = to_cxl_switch_decoder(dev);
+       *ways = cxlsd->cxld.interleave_ways;
 
-               for (iter_b = port_b; iter_b; iter_b = next_port(iter_b)) {
-                       next_b = next_port(iter_b);
-                       if (next_a != next_b)
-                               continue;
-                       port = next_a;
+       for (int i = 0; i < *ways; i++) {
+               if (cxlsd->target[i] == port->parent_dport) {
+                       *pos = i;
+                       rc = 0;
                        break;
                }
-
-               if (port)
-                       break;
        }
+       put_device(dev);
 
-       if (!port) {
-               dev_err(cxlmd_a->dev.parent,
-                       "failed to find shared port with %s\n",
-                       dev_name(cxlmd_b->dev.parent));
-               goto err;
-       }
+       return rc;
+}
 
-       dev = device_find_child(&port->dev, cxled_a, decoder_match_range);
-       if (!dev) {
-               struct range *range = &cxled_a->cxld.hpa_range;
+/**
+ * cxl_calc_interleave_pos() - calculate an endpoint position in a region
+ * @cxled: endpoint decoder member of given region
+ *
+ * The endpoint position is calculated by traversing the topology from
+ * the endpoint to the root decoder and iteratively applying this
+ * calculation:
+ *
+ *    position = position * parent_ways + parent_pos;
+ *
+ * ...where @position is inferred from switch and root decoder target lists.
+ *
+ * Return: position >= 0 on success
+ *        -ENXIO on failure
+ */
+static int cxl_calc_interleave_pos(struct cxl_endpoint_decoder *cxled)
+{
+       struct cxl_port *iter, *port = cxled_to_port(cxled);
+       struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
+       struct range *range = &cxled->cxld.hpa_range;
+       int parent_ways = 0, parent_pos = 0, pos = 0;
+       int rc;
 
-               dev_err(port->uport_dev,
-                       "failed to find decoder that maps %#llx-%#llx\n",
-                       range->start, range->end);
-               goto err;
-       }
+       /*
+        * Example: the expected interleave order of the 4-way region shown
+        * below is: mem0, mem2, mem1, mem3
+        *
+        *                root_port
+        *                 /      \
+        *      host_bridge_0    host_bridge_1
+        *        |    |           |    |
+        *       mem0 mem1        mem2 mem3
+        *
+        * In the example the calculator will iterate twice. The first iteration
+        * uses the mem position in the host-bridge and the ways of the host-
+        * bridge to generate the first, or local, position. The second
+        * iteration uses the host-bridge position in the root_port and the ways
+        * of the root_port to refine the position.
+        *
+        * A trace of the calculation per endpoint looks like this:
+        * mem0: pos = 0 * 2 + 0    mem2: pos = 0 * 2 + 0
+        *       pos = 0 * 2 + 0          pos = 0 * 2 + 1
+        *       pos: 0                   pos: 1
+        *
+        * mem1: pos = 0 * 2 + 1    mem3: pos = 0 * 2 + 1
+        *       pos = 1 * 2 + 0          pos = 1 * 2 + 1
+        *       pos: 2                   pos = 3
+        *
+        * Note that while this example is simple, the method applies to more
+        * complex topologies, including those with switches.
+        */
 
-       cxlsd = to_cxl_switch_decoder(dev);
-       do {
-               seq = read_seqbegin(&cxlsd->target_lock);
-               find_positions(cxlsd, iter_a, iter_b, &a_pos, &b_pos);
-       } while (read_seqretry(&cxlsd->target_lock, seq));
+       /* Iterate from endpoint to root_port refining the position */
+       for (iter = port; iter; iter = next_port(iter)) {
+               if (is_cxl_root(iter))
+                       break;
 
-       put_device(dev);
+               rc = find_pos_and_ways(iter, range, &parent_pos, &parent_ways);
+               if (rc)
+                       return rc;
 
-       if (a_pos < 0 || b_pos < 0) {
-               dev_err(port->uport_dev,
-                       "failed to find shared decoder for %s and %s\n",
-                       dev_name(cxlmd_a->dev.parent),
-                       dev_name(cxlmd_b->dev.parent));
-               goto err;
+               pos = pos * parent_ways + parent_pos;
        }
 
-       dev_dbg(port->uport_dev, "%s comes %s %s\n",
-               dev_name(cxlmd_a->dev.parent),
-               a_pos - b_pos < 0 ? "before" : "after",
-               dev_name(cxlmd_b->dev.parent));
+       dev_dbg(&cxlmd->dev,
+               "decoder:%s parent:%s port:%s range:%#llx-%#llx pos:%d\n",
+               dev_name(&cxled->cxld.dev), dev_name(cxlmd->dev.parent),
+               dev_name(&port->dev), range->start, range->end, pos);
 
-       return a_pos - b_pos;
-err:
-       cxled_a->pos = -1;
-       return 0;
+       return pos;
 }
 
 static int cxl_region_sort_targets(struct cxl_region *cxlr)
@@ -1607,22 +1632,21 @@ static int cxl_region_sort_targets(struct cxl_region *cxlr)
        struct cxl_region_params *p = &cxlr->params;
        int i, rc = 0;
 
-       sort(p->targets, p->nr_targets, sizeof(p->targets[0]), cmp_decode_pos,
-            NULL);
-
        for (i = 0; i < p->nr_targets; i++) {
                struct cxl_endpoint_decoder *cxled = p->targets[i];
 
+               cxled->pos = cxl_calc_interleave_pos(cxled);
                /*
-                * Record that sorting failed, but still continue to restore
-                * cxled->pos with its ->targets[] position so that follow-on
-                * code paths can reliably do p->targets[cxled->pos] to
-                * self-reference their entry.
+                * Record that sorting failed, but still continue to calc
+                * cxled->pos so that follow-on code paths can reliably
+                * do p->targets[cxled->pos] to self-reference their entry.
                 */
                if (cxled->pos < 0)
                        rc = -ENXIO;
-               cxled->pos = i;
        }
+       /* Keep the cxlr target list in interleave position order */
+       sort(p->targets, p->nr_targets, sizeof(p->targets[0]),
+            cmp_interleave_pos, NULL);
 
        dev_dbg(&cxlr->dev, "region sort %s\n", rc ? "failed" : "successful");
        return rc;
@@ -1658,6 +1682,12 @@ static int cxl_region_attach(struct cxl_region *cxlr,
                return -ENXIO;
        }
 
+       if (p->nr_targets >= p->interleave_ways) {
+               dev_dbg(&cxlr->dev, "region already has %d endpoints\n",
+                       p->nr_targets);
+               return -EINVAL;
+       }
+
        ep_port = cxled_to_port(cxled);
        root_port = cxlrd_to_port(cxlrd);
        dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge);
@@ -1750,7 +1780,7 @@ static int cxl_region_attach(struct cxl_region *cxlr,
        if (p->nr_targets == p->interleave_ways) {
                rc = cxl_region_setup_targets(cxlr);
                if (rc)
-                       goto err_decrement;
+                       return rc;
                p->state = CXL_CONFIG_ACTIVE;
        }
 
@@ -1761,13 +1791,27 @@ static int cxl_region_attach(struct cxl_region *cxlr,
                .end = p->res->end,
        };
 
-       return 0;
+       if (p->nr_targets != p->interleave_ways)
+               return 0;
 
-err_decrement:
-       p->nr_targets--;
-       cxled->pos = -1;
-       p->targets[pos] = NULL;
-       return rc;
+       /*
+        * Test the auto-discovery position calculator function
+        * against this successfully created user-defined region.
+        * A fail message here means that this interleave config
+        * will fail when presented as CXL_REGION_F_AUTO.
+        */
+       for (int i = 0; i < p->nr_targets; i++) {
+               struct cxl_endpoint_decoder *cxled = p->targets[i];
+               int test_pos;
+
+               test_pos = cxl_calc_interleave_pos(cxled);
+               dev_dbg(&cxled->cxld.dev,
+                       "Test cxl_calc_interleave_pos(): %s test_pos:%d cxled->pos:%d\n",
+                       (test_pos == cxled->pos) ? "success" : "fail",
+                       test_pos, cxled->pos);
+       }
+
+       return 0;
 }
 
 static int cxl_region_detach(struct cxl_endpoint_decoder *cxled)
@@ -2696,7 +2740,7 @@ err:
        return rc;
 }
 
-static int match_decoder_by_range(struct device *dev, void *data)
+static int match_root_decoder_by_range(struct device *dev, void *data)
 {
        struct range *r1, *r2 = data;
        struct cxl_root_decoder *cxlrd;
@@ -2827,7 +2871,7 @@ int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled)
        int rc;
 
        cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range,
-                                     match_decoder_by_range);
+                                     match_root_decoder_by_range);
        if (!cxlrd_dev) {
                dev_err(cxlmd->dev.parent,
                        "%s:%s no CXL window for range %#llx:%#llx\n",
index 6281127b3e9d97223d9a4c733c98b9a1fb2271c7..372786f809555f66509186c3e3476af2fad0d7f8 100644 (file)
@@ -204,7 +204,7 @@ int cxl_map_component_regs(const struct cxl_register_map *map,
                           struct cxl_component_regs *regs,
                           unsigned long map_mask)
 {
-       struct device *dev = map->dev;
+       struct device *host = map->host;
        struct mapinfo {
                const struct cxl_reg_map *rmap;
                void __iomem **addr;
@@ -216,16 +216,16 @@ int cxl_map_component_regs(const struct cxl_register_map *map,
 
        for (i = 0; i < ARRAY_SIZE(mapinfo); i++) {
                struct mapinfo *mi = &mapinfo[i];
-               resource_size_t phys_addr;
+               resource_size_t addr;
                resource_size_t length;
 
                if (!mi->rmap->valid)
                        continue;
                if (!test_bit(mi->rmap->id, &map_mask))
                        continue;
-               phys_addr = map->resource + mi->rmap->offset;
+               addr = map->resource + mi->rmap->offset;
                length = mi->rmap->size;
-               *(mi->addr) = devm_cxl_iomap_block(dev, phys_addr, length);
+               *(mi->addr) = devm_cxl_iomap_block(host, addr, length);
                if (!*(mi->addr))
                        return -ENOMEM;
        }
@@ -237,7 +237,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs, CXL);
 int cxl_map_device_regs(const struct cxl_register_map *map,
                        struct cxl_device_regs *regs)
 {
-       struct device *dev = map->dev;
+       struct device *host = map->host;
        resource_size_t phys_addr = map->resource;
        struct mapinfo {
                const struct cxl_reg_map *rmap;
@@ -259,7 +259,7 @@ int cxl_map_device_regs(const struct cxl_register_map *map,
 
                addr = phys_addr + mi->rmap->offset;
                length = mi->rmap->size;
-               *(mi->addr) = devm_cxl_iomap_block(dev, addr, length);
+               *(mi->addr) = devm_cxl_iomap_block(host, addr, length);
                if (!*(mi->addr))
                        return -ENOMEM;
        }
@@ -309,7 +309,7 @@ int cxl_find_regblock_instance(struct pci_dev *pdev, enum cxl_regloc_type type,
        int regloc, i;
 
        *map = (struct cxl_register_map) {
-               .dev = &pdev->dev,
+               .host = &pdev->dev,
                .resource = CXL_RESOURCE_NONE,
        };
 
@@ -386,10 +386,9 @@ int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type)
 }
 EXPORT_SYMBOL_NS_GPL(cxl_count_regblock, CXL);
 
-int cxl_map_pmu_regs(struct pci_dev *pdev, struct cxl_pmu_regs *regs,
-                    struct cxl_register_map *map)
+int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs)
 {
-       struct device *dev = &pdev->dev;
+       struct device *dev = map->host;
        resource_size_t phys_addr;
 
        phys_addr = map->resource;
@@ -403,15 +402,15 @@ EXPORT_SYMBOL_NS_GPL(cxl_map_pmu_regs, CXL);
 
 static int cxl_map_regblock(struct cxl_register_map *map)
 {
-       struct device *dev = map->dev;
+       struct device *host = map->host;
 
        map->base = ioremap(map->resource, map->max_size);
        if (!map->base) {
-               dev_err(dev, "failed to map registers\n");
+               dev_err(host, "failed to map registers\n");
                return -ENOMEM;
        }
 
-       dev_dbg(dev, "Mapped CXL Memory Device resource %pa\n", &map->resource);
+       dev_dbg(host, "Mapped CXL Memory Device resource %pa\n", &map->resource);
        return 0;
 }
 
@@ -425,28 +424,28 @@ static int cxl_probe_regs(struct cxl_register_map *map)
 {
        struct cxl_component_reg_map *comp_map;
        struct cxl_device_reg_map *dev_map;
-       struct device *dev = map->dev;
+       struct device *host = map->host;
        void __iomem *base = map->base;
 
        switch (map->reg_type) {
        case CXL_REGLOC_RBI_COMPONENT:
                comp_map = &map->component_map;
-               cxl_probe_component_regs(dev, base, comp_map);
-               dev_dbg(dev, "Set up component registers\n");
+               cxl_probe_component_regs(host, base, comp_map);
+               dev_dbg(host, "Set up component registers\n");
                break;
        case CXL_REGLOC_RBI_MEMDEV:
                dev_map = &map->device_map;
-               cxl_probe_device_regs(dev, base, dev_map);
+               cxl_probe_device_regs(host, base, dev_map);
                if (!dev_map->status.valid || !dev_map->mbox.valid ||
                    !dev_map->memdev.valid) {
-                       dev_err(dev, "registers not found: %s%s%s\n",
+                       dev_err(host, "registers not found: %s%s%s\n",
                                !dev_map->status.valid ? "status " : "",
                                !dev_map->mbox.valid ? "mbox " : "",
                                !dev_map->memdev.valid ? "memdev " : "");
                        return -ENXIO;
                }
 
-               dev_dbg(dev, "Probing device registers...\n");
+               dev_dbg(host, "Probing device registers...\n");
                break;
        default:
                break;
@@ -470,6 +469,42 @@ int cxl_setup_regs(struct cxl_register_map *map)
 }
 EXPORT_SYMBOL_NS_GPL(cxl_setup_regs, CXL);
 
+u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb)
+{
+       void __iomem *addr;
+       u16 offset = 0;
+       u32 cap_hdr;
+
+       if (WARN_ON_ONCE(rcrb == CXL_RESOURCE_NONE))
+               return 0;
+
+       if (!request_mem_region(rcrb, SZ_4K, dev_name(dev)))
+               return 0;
+
+       addr = ioremap(rcrb, SZ_4K);
+       if (!addr)
+               goto out;
+
+       cap_hdr = readl(addr + offset);
+       while (PCI_EXT_CAP_ID(cap_hdr) != PCI_EXT_CAP_ID_ERR) {
+               offset = PCI_EXT_CAP_NEXT(cap_hdr);
+
+               /* Offset 0 terminates capability list. */
+               if (!offset)
+                       break;
+               cap_hdr = readl(addr + offset);
+       }
+
+       if (offset)
+               dev_dbg(dev, "found AER extended capability (0x%x)\n", offset);
+
+       iounmap(addr);
+out:
+       release_mem_region(rcrb, SZ_4K);
+
+       return offset;
+}
+
 resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri,
                                    enum cxl_rcrb which)
 {
index 76d92561af29499d2fe14629bdd11247039d5dab..687043ece1018c41c256c02cd697749d7916a42f 100644 (file)
@@ -221,6 +221,14 @@ struct cxl_regs {
        struct_group_tagged(cxl_pmu_regs, pmu_regs,
                void __iomem *pmu;
        );
+
+       /*
+        * RCH downstream port specific RAS register
+        * @aer: CXL 3.0 8.2.1.1 RCH Downstream Port RCRB
+        */
+       struct_group_tagged(cxl_rch_regs, rch_regs,
+               void __iomem *dport_aer;
+       );
 };
 
 struct cxl_reg_map {
@@ -247,7 +255,7 @@ struct cxl_pmu_reg_map {
 
 /**
  * struct cxl_register_map - DVSEC harvested register block mapping parameters
- * @dev: device for devm operations and logging
+ * @host: device for devm operations and logging
  * @base: virtual base of the register-block-BAR + @block_offset
  * @resource: physical resource base of the register block
  * @max_size: maximum mapping size to perform register search
@@ -257,7 +265,7 @@ struct cxl_pmu_reg_map {
  * @pmu_map: cxl_reg_maps for CXL Performance Monitoring Units
  */
 struct cxl_register_map {
-       struct device *dev;
+       struct device *host;
        void __iomem *base;
        resource_size_t resource;
        resource_size_t max_size;
@@ -278,8 +286,7 @@ int cxl_map_component_regs(const struct cxl_register_map *map,
                           unsigned long map_mask);
 int cxl_map_device_regs(const struct cxl_register_map *map,
                        struct cxl_device_regs *regs);
-int cxl_map_pmu_regs(struct pci_dev *pdev, struct cxl_pmu_regs *regs,
-                    struct cxl_register_map *map);
+int cxl_map_pmu_regs(struct cxl_register_map *map, struct cxl_pmu_regs *regs);
 
 enum cxl_regloc_type;
 int cxl_count_regblock(struct pci_dev *pdev, enum cxl_regloc_type type);
@@ -321,6 +328,7 @@ enum cxl_decoder_type {
  */
 #define CXL_DECODER_MAX_INTERLEAVE 16
 
+#define CXL_QOS_CLASS_INVALID -1
 
 /**
  * struct cxl_decoder - Common CXL HDM Decoder Attributes
@@ -432,6 +440,7 @@ typedef struct cxl_dport *(*cxl_calc_hb_fn)(struct cxl_root_decoder *cxlrd,
  * @calc_hb: which host bridge covers the n'th position by granularity
  * @platform_data: platform specific configuration data
  * @range_lock: sync region autodiscovery by address range
+ * @qos_class: QoS performance class cookie
  * @cxlsd: base cxl switch decoder
  */
 struct cxl_root_decoder {
@@ -440,6 +449,7 @@ struct cxl_root_decoder {
        cxl_calc_hb_fn calc_hb;
        void *platform_data;
        struct mutex range_lock;
+       int qos_class;
        struct cxl_switch_decoder cxlsd;
 };
 
@@ -572,11 +582,10 @@ struct cxl_dax_region {
  * @regions: cxl_region_ref instances, regions mapped by this port
  * @parent_dport: dport that points to this port in the parent
  * @decoder_ida: allocator for decoder ids
- * @comp_map: component register capability mappings
+ * @reg_map: component and ras register mapping parameters
  * @nr_dports: number of entries in @dports
  * @hdm_end: track last allocated HDM decoder instance for allocation ordering
  * @commit_end: cursor to track highest committed decoder for commit ordering
- * @component_reg_phys: component register capability base address (optional)
  * @dead: last ep has been removed, force port re-creation
  * @depth: How deep this port is relative to the root. depth 0 is the root.
  * @cdat: Cached CDAT data
@@ -592,11 +601,10 @@ struct cxl_port {
        struct xarray regions;
        struct cxl_dport *parent_dport;
        struct ida decoder_ida;
-       struct cxl_register_map comp_map;
+       struct cxl_register_map reg_map;
        int nr_dports;
        int hdm_end;
        int commit_end;
-       resource_size_t component_reg_phys;
        bool dead;
        unsigned int depth;
        struct cxl_cdat {
@@ -620,19 +628,21 @@ struct cxl_rcrb_info {
 /**
  * struct cxl_dport - CXL downstream port
  * @dport_dev: PCI bridge or firmware device representing the downstream link
- * @comp_map: component register capability mappings
+ * @reg_map: component and ras register mapping parameters
  * @port_id: unique hardware identifier for dport in decoder target list
  * @rcrb: Data about the Root Complex Register Block layout
  * @rch: Indicate whether this dport was enumerated in RCH or VH mode
  * @port: reference to cxl_port that contains this downstream port
+ * @regs: Dport parsed register blocks
  */
 struct cxl_dport {
        struct device *dport_dev;
-       struct cxl_register_map comp_map;
+       struct cxl_register_map reg_map;
        int port_id;
        struct cxl_rcrb_info rcrb;
        bool rch;
        struct cxl_port *port;
+       struct cxl_regs regs;
 };
 
 /**
@@ -679,6 +689,7 @@ static inline bool is_cxl_root(struct cxl_port *port)
        return port->uport_dev == port->dev.parent;
 }
 
+int cxl_num_decoders_committed(struct cxl_port *port);
 bool is_cxl_port(const struct device *dev);
 struct cxl_port *to_cxl_port(const struct device *dev);
 struct pci_bus;
@@ -706,6 +717,13 @@ struct cxl_dport *devm_cxl_add_rch_dport(struct cxl_port *port,
                                         struct device *dport_dev, int port_id,
                                         resource_size_t rcrb);
 
+#ifdef CONFIG_PCIEAER_CXL
+void cxl_setup_parent_dport(struct device *host, struct cxl_dport *dport);
+#else
+static inline void cxl_setup_parent_dport(struct device *host,
+                                         struct cxl_dport *dport) { }
+#endif
+
 struct cxl_decoder *to_cxl_decoder(struct device *dev);
 struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev);
 struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev);
index 706f8a6d1ef43c78dfdebd62cd5b55347186fdbb..a2fcbca253f3983a6c4bfb1b2f964314e1b250d6 100644 (file)
@@ -84,9 +84,12 @@ static inline bool is_cxl_endpoint(struct cxl_port *port)
        return is_cxl_memdev(port->uport_dev);
 }
 
-struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
+struct cxl_memdev *devm_cxl_add_memdev(struct device *host,
+                                      struct cxl_dev_state *cxlds);
+int devm_cxl_sanitize_setup_notifier(struct device *host,
+                                    struct cxl_memdev *cxlmd);
 struct cxl_memdev_state;
-int cxl_memdev_setup_fw_upload(struct cxl_memdev_state *mds);
+int devm_cxl_setup_fw_upload(struct device *host, struct cxl_memdev_state *mds);
 int devm_cxl_dpa_reserve(struct cxl_endpoint_decoder *cxled,
                         resource_size_t base, resource_size_t len,
                         resource_size_t skipped);
@@ -360,16 +363,16 @@ struct cxl_fw_state {
  *
  * @state: state of last security operation
  * @enabled_cmds: All security commands enabled in the CEL
- * @poll: polling for sanitization is enabled, device has no mbox irq support
  * @poll_tmo_secs: polling timeout
+ * @sanitize_active: sanitize completion pending
  * @poll_dwork: polling work item
  * @sanitize_node: sanitation sysfs file to notify
  */
 struct cxl_security_state {
        unsigned long state;
        DECLARE_BITMAP(enabled_cmds, CXL_SEC_ENABLED_MAX);
-       bool poll;
        int poll_tmo_secs;
+       bool sanitize_active;
        struct delayed_work poll_dwork;
        struct kernfs_node *sanitize_node;
 };
@@ -397,6 +400,7 @@ enum cxl_devtype {
  *
  * @dev: The device associated with this CXL state
  * @cxlmd: The device representing the CXL.mem capabilities of @dev
+ * @reg_map: component and ras register mapping parameters
  * @regs: Parsed register blocks
  * @cxl_dvsec: Offset to the PCIe device DVSEC
  * @rcd: operating in RCD mode (CXL 3.0 9.11.8 CXL Devices Attached to an RCH)
@@ -404,13 +408,13 @@ enum cxl_devtype {
  * @dpa_res: Overall DPA resource tree for the device
  * @pmem_res: Active Persistent memory capacity configuration
  * @ram_res: Active Volatile memory capacity configuration
- * @component_reg_phys: register base of component registers
  * @serial: PCIe Device Serial Number
  * @type: Generic Memory Class device or Vendor Specific Memory device
  */
 struct cxl_dev_state {
        struct device *dev;
        struct cxl_memdev *cxlmd;
+       struct cxl_register_map reg_map;
        struct cxl_regs regs;
        int cxl_dvsec;
        bool rcd;
@@ -418,7 +422,6 @@ struct cxl_dev_state {
        struct resource dpa_res;
        struct resource pmem_res;
        struct resource ram_res;
-       resource_size_t component_reg_phys;
        u64 serial;
        enum cxl_devtype type;
 };
@@ -883,7 +886,7 @@ static inline void cxl_mem_active_dec(void)
 }
 #endif
 
-int cxl_mem_sanitize(struct cxl_memdev_state *mds, u16 cmd);
+int cxl_mem_sanitize(struct cxl_memdev *cxlmd, u16 cmd);
 
 struct cxl_hdm {
        struct cxl_component_regs regs;
index 317c7548e4e9b4596686a8dfb04c96e3140bf783..e087febf9af047c81dfb11b06d91d92d442d586c 100644 (file)
@@ -49,7 +49,6 @@ static int devm_cxl_add_endpoint(struct device *host, struct cxl_memdev *cxlmd,
                                 struct cxl_dport *parent_dport)
 {
        struct cxl_port *parent_port = parent_dport->port;
-       struct cxl_dev_state *cxlds = cxlmd->cxlds;
        struct cxl_port *endpoint, *iter, *down;
        int rc;
 
@@ -65,8 +64,8 @@ static int devm_cxl_add_endpoint(struct device *host, struct cxl_memdev *cxlmd,
                ep->next = down;
        }
 
-       endpoint = devm_cxl_add_port(host, &cxlmd->dev,
-                                    cxlds->component_reg_phys,
+       /* Note: endpoint port component registers are derived from @cxlds */
+       endpoint = devm_cxl_add_port(host, &cxlmd->dev, CXL_RESOURCE_NONE,
                                     parent_dport);
        if (IS_ERR(endpoint))
                return PTR_ERR(endpoint);
@@ -158,6 +157,8 @@ static int cxl_mem_probe(struct device *dev)
        else
                endpoint_parent = &parent_port->dev;
 
+       cxl_setup_parent_dport(dev, dport);
+
        device_lock(endpoint_parent);
        if (!endpoint_parent->driver) {
                dev_err(dev, "CXL port topology %s not enabled\n",
index 44a21ab7add51b70d17c645434934af347a93e58..0155fb66b580d7f939e3f2d92a34b5c3c8a89586 100644 (file)
@@ -85,25 +85,28 @@ static int cxl_pci_mbox_wait_for_doorbell(struct cxl_dev_state *cxlds)
                            status & CXLMDEV_DEV_FATAL ? " fatal" : "",        \
                            status & CXLMDEV_FW_HALT ? " firmware-halt" : "")
 
+/*
+ * Threaded irq dev_id's must be globally unique.  cxl_dev_id provides a unique
+ * wrapper object for each irq within the same cxlds.
+ */
 struct cxl_dev_id {
        struct cxl_dev_state *cxlds;
 };
 
 static int cxl_request_irq(struct cxl_dev_state *cxlds, int irq,
-                          irq_handler_t handler, irq_handler_t thread_fn)
+                          irq_handler_t thread_fn)
 {
        struct device *dev = cxlds->dev;
        struct cxl_dev_id *dev_id;
 
-       /* dev_id must be globally unique and must contain the cxlds */
        dev_id = devm_kzalloc(dev, sizeof(*dev_id), GFP_KERNEL);
        if (!dev_id)
                return -ENOMEM;
        dev_id->cxlds = cxlds;
 
-       return devm_request_threaded_irq(dev, irq, handler, thread_fn,
-                                        IRQF_SHARED | IRQF_ONESHOT,
-                                        NULL, dev_id);
+       return devm_request_threaded_irq(dev, irq, NULL, thread_fn,
+                                        IRQF_SHARED | IRQF_ONESHOT, NULL,
+                                        dev_id);
 }
 
 static bool cxl_mbox_background_complete(struct cxl_dev_state *cxlds)
@@ -128,10 +131,10 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
        reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
        opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
        if (opcode == CXL_MBOX_OP_SANITIZE) {
+               mutex_lock(&mds->mbox_mutex);
                if (mds->security.sanitize_node)
-                       sysfs_notify_dirent(mds->security.sanitize_node);
-
-               dev_dbg(cxlds->dev, "Sanitization operation ended\n");
+                       mod_delayed_work(system_wq, &mds->security.poll_dwork, 0);
+               mutex_unlock(&mds->mbox_mutex);
        } else {
                /* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
                rcuwait_wake_up(&mds->mbox_wait);
@@ -152,18 +155,16 @@ static void cxl_mbox_sanitize_work(struct work_struct *work)
        mutex_lock(&mds->mbox_mutex);
        if (cxl_mbox_background_complete(cxlds)) {
                mds->security.poll_tmo_secs = 0;
-               put_device(cxlds->dev);
-
                if (mds->security.sanitize_node)
                        sysfs_notify_dirent(mds->security.sanitize_node);
+               mds->security.sanitize_active = false;
 
                dev_dbg(cxlds->dev, "Sanitization operation ended\n");
        } else {
                int timeout = mds->security.poll_tmo_secs + 10;
 
                mds->security.poll_tmo_secs = min(15 * 60, timeout);
-               queue_delayed_work(system_wq, &mds->security.poll_dwork,
-                                  timeout * HZ);
+               schedule_delayed_work(&mds->security.poll_dwork, timeout * HZ);
        }
        mutex_unlock(&mds->mbox_mutex);
 }
@@ -295,18 +296,15 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds,
                 * and allow userspace to poll(2) for completion.
                 */
                if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) {
-                       if (mds->security.poll) {
-                               /* hold the device throughout */
-                               get_device(cxlds->dev);
-
-                               /* give first timeout a second */
-                               timeout = 1;
-                               mds->security.poll_tmo_secs = timeout;
-                               queue_delayed_work(system_wq,
-                                                  &mds->security.poll_dwork,
-                                                  timeout * HZ);
-                       }
-
+                       if (mds->security.sanitize_active)
+                               return -EBUSY;
+
+                       /* give first timeout a second */
+                       timeout = 1;
+                       mds->security.poll_tmo_secs = timeout;
+                       mds->security.sanitize_active = true;
+                       schedule_delayed_work(&mds->security.poll_dwork,
+                                             timeout * HZ);
                        dev_dbg(dev, "Sanitization operation started\n");
                        goto success;
                }
@@ -389,7 +387,9 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
        const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
        struct device *dev = cxlds->dev;
        unsigned long timeout;
+       int irq, msgnum;
        u64 md_status;
+       u32 ctrl;
 
        timeout = jiffies + mbox_ready_timeout * HZ;
        do {
@@ -437,33 +437,26 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
        dev_dbg(dev, "Mailbox payload sized %zu", mds->payload_size);
 
        rcuwait_init(&mds->mbox_wait);
+       INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
 
-       if (cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) {
-               u32 ctrl;
-               int irq, msgnum;
-               struct pci_dev *pdev = to_pci_dev(cxlds->dev);
-
-               msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
-               irq = pci_irq_vector(pdev, msgnum);
-               if (irq < 0)
-                       goto mbox_poll;
-
-               if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq, NULL))
-                       goto mbox_poll;
+       /* background command interrupts are optional */
+       if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ))
+               return 0;
 
-               /* enable background command mbox irq support */
-               ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
-               ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ;
-               writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
+       msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
+       irq = pci_irq_vector(to_pci_dev(cxlds->dev), msgnum);
+       if (irq < 0)
+               return 0;
 
+       if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq))
                return 0;
-       }
 
-mbox_poll:
-       mds->security.poll = true;
-       INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
+       dev_dbg(cxlds->dev, "Mailbox interrupts enabled\n");
+       /* enable background command mbox irq support */
+       ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
+       ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ;
+       writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
 
-       dev_dbg(cxlds->dev, "Mailbox interrupts are unsupported");
        return 0;
 }
 
@@ -484,7 +477,7 @@ static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev,
        resource_size_t component_reg_phys;
 
        *map = (struct cxl_register_map) {
-               .dev = &pdev->dev,
+               .host = &pdev->dev,
                .resource = CXL_RESOURCE_NONE,
        };
 
@@ -653,7 +646,7 @@ static int cxl_event_req_irq(struct cxl_dev_state *cxlds, u8 setting)
        if (irq < 0)
                return irq;
 
-       return cxl_request_irq(cxlds, irq, NULL, cxl_event_thread);
+       return cxl_request_irq(cxlds, irq, cxl_event_thread);
 }
 
 static int cxl_event_get_int_policy(struct cxl_memdev_state *mds,
@@ -834,16 +827,14 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
         * If the component registers can't be found, the cxl_pci driver may
         * still be useful for management functions so don't return an error.
         */
-       cxlds->component_reg_phys = CXL_RESOURCE_NONE;
-       rc = cxl_pci_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT, &map);
+       rc = cxl_pci_setup_regs(pdev, CXL_REGLOC_RBI_COMPONENT,
+                               &cxlds->reg_map);
        if (rc)
                dev_warn(&pdev->dev, "No component registers (%d)\n", rc);
-       else if (!map.component_map.ras.valid)
+       else if (!cxlds->reg_map.component_map.ras.valid)
                dev_dbg(&pdev->dev, "RAS registers not found\n");
 
-       cxlds->component_reg_phys = map.resource;
-
-       rc = cxl_map_component_regs(&map, &cxlds->regs.component,
+       rc = cxl_map_component_regs(&cxlds->reg_map, &cxlds->regs.component,
                                    BIT(CXL_CM_CAP_CAP_ID_RAS));
        if (rc)
                dev_dbg(&pdev->dev, "Failed to map RAS capability.\n");
@@ -882,11 +873,15 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (rc)
                return rc;
 
-       cxlmd = devm_cxl_add_memdev(cxlds);
+       cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
        if (IS_ERR(cxlmd))
                return PTR_ERR(cxlmd);
 
-       rc = cxl_memdev_setup_fw_upload(mds);
+       rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
+       if (rc)
+               return rc;
+
+       rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd);
        if (rc)
                return rc;
 
@@ -900,7 +895,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                        break;
                }
 
-               rc = cxl_map_pmu_regs(pdev, &pmu_regs, &map);
+               rc = cxl_map_pmu_regs(&map, &pmu_regs);
                if (rc) {
                        dev_dbg(&pdev->dev, "Could not map PMU regs\n");
                        break;
index 6240e05b95424b9c9627fcde4a0e50171bde1761..47bc8e0b859077776c06fc1daee901ab49cbdd2d 100644 (file)
@@ -62,6 +62,9 @@ static int cxl_switch_port_probe(struct cxl_port *port)
        struct cxl_hdm *cxlhdm;
        int rc;
 
+       /* Cache the data early to ensure is_visible() works */
+       read_cdat_data(port);
+
        rc = devm_cxl_port_enumerate_dports(port);
        if (rc < 0)
                return rc;
index 2a05f411328fd45c81bd2a9e55c10370d504d40e..95c10f3d2282f59c814b27c3440de2b69b34484d 100644 (file)
@@ -191,7 +191,7 @@ struct fw_node {
        /* Upper layer specific data. */
        void *data;
 
-       struct fw_node *ports[];
+       struct fw_node *ports[] __counted_by(port_count);
 };
 
 static inline struct fw_node *fw_node_get(struct fw_node *node)
index ef4c12f0877ba428f55db3510771da69b0c7cad9..06964a3c130f6addeed20eca1ed26153a2260854 100644 (file)
@@ -28,7 +28,7 @@ cflags-$(CONFIG_ARM)          += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \
                                   -DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \
                                   -DEFI_HAVE_STRCMP -fno-builtin -fpic \
                                   $(call cc-option,-mno-single-pic-base)
-cflags-$(CONFIG_RISCV)         += -fpic
+cflags-$(CONFIG_RISCV)         += -fpic -DNO_ALTERNATIVE
 cflags-$(CONFIG_LOONGARCH)     += -fpie
 
 cflags-$(CONFIG_EFI_PARAMS_FROM_FDT)   += -I$(srctree)/scripts/dtc/libfdt
index 58f107194fdafdfae6d86318fdaf2f206065757a..04c03402db6ddf3c3971c309a2bcb68b31ed7824 100644 (file)
@@ -750,12 +750,12 @@ static int aspeed_gpio_request(struct gpio_chip *chip, unsigned int offset)
        if (!have_gpio(gpiochip_get_data(chip), offset))
                return -ENODEV;
 
-       return pinctrl_gpio_request(chip->base + offset);
+       return pinctrl_gpio_request(chip, offset);
 }
 
 static void aspeed_gpio_free(struct gpio_chip *chip, unsigned int offset)
 {
-       pinctrl_gpio_free(chip->base + offset);
+       pinctrl_gpio_free(chip, offset);
 }
 
 static int usecs_to_cycles(struct aspeed_gpio *gpio, unsigned long usecs,
@@ -973,7 +973,7 @@ static int aspeed_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
        else if (param == PIN_CONFIG_BIAS_DISABLE ||
                        param == PIN_CONFIG_BIAS_PULL_DOWN ||
                        param == PIN_CONFIG_DRIVE_STRENGTH)
-               return pinctrl_gpio_set_config(chip->base + offset, config);
+               return pinctrl_gpio_set_config(chip, offset, config);
        else if (param == PIN_CONFIG_DRIVE_OPEN_DRAIN ||
                        param == PIN_CONFIG_DRIVE_OPEN_SOURCE)
                /* Return -ENOTSUPP to trigger emulation, as per datasheet */
index 858e6ebbb584c27505a54842391898e53b467e8b..6c862c57232277adf48865da61fd802f8c7cb627 100644 (file)
@@ -227,14 +227,9 @@ static int em_gio_to_irq(struct gpio_chip *chip, unsigned offset)
        return irq_create_mapping(gpio_to_priv(chip)->irq_domain, offset);
 }
 
-static int em_gio_request(struct gpio_chip *chip, unsigned offset)
-{
-       return pinctrl_gpio_request(chip->base + offset);
-}
-
 static void em_gio_free(struct gpio_chip *chip, unsigned offset)
 {
-       pinctrl_gpio_free(chip->base + offset);
+       pinctrl_gpio_free(chip, offset);
 
        /* Set the GPIO as an input to ensure that the next GPIO request won't
        * drive the GPIO pin as an output.
@@ -311,7 +306,7 @@ static int em_gio_probe(struct platform_device *pdev)
        gpio_chip->direction_output = em_gio_direction_output;
        gpio_chip->set = em_gio_set;
        gpio_chip->to_irq = em_gio_to_irq;
-       gpio_chip->request = em_gio_request;
+       gpio_chip->request = pinctrl_gpio_request;
        gpio_chip->free = em_gio_free;
        gpio_chip->label = name;
        gpio_chip->parent = dev;
index 8f80ca8ec1eda55a74fe1232b04c5876d9e1263d..a13f3c18ccd4aea55e033bf03b5f38ee5bb9d772 100644 (file)
@@ -346,7 +346,7 @@ static int mvebu_gpio_direction_input(struct gpio_chip *chip, unsigned int pin)
         * Check with the pinctrl driver whether this pin is usable as
         * an input GPIO
         */
-       ret = pinctrl_gpio_direction_input(chip->base + pin);
+       ret = pinctrl_gpio_direction_input(chip, pin);
        if (ret)
                return ret;
 
@@ -366,7 +366,7 @@ static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned int pin,
         * Check with the pinctrl driver whether this pin is usable as
         * an output GPIO
         */
-       ret = pinctrl_gpio_direction_output(chip->base + pin);
+       ret = pinctrl_gpio_direction_output(chip, pin);
        if (ret)
                return ret;
 
@@ -757,7 +757,6 @@ static const struct pwm_ops mvebu_pwm_ops = {
        .free = mvebu_pwm_free,
        .get_state = mvebu_pwm_get_state,
        .apply = mvebu_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static void __maybe_unused mvebu_pwm_suspend(struct mvebu_gpio_chip *mvchip)
index cae9661862fe1ddade8676596244d4a474fbf8ce..91cea97255fa6dd851bb2036fcf76f81956b8fe7 100644 (file)
@@ -260,7 +260,7 @@ static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
        int ret;
 
        if (pxa_gpio_has_pinctrl()) {
-               ret = pinctrl_gpio_direction_input(chip->base + offset);
+               ret = pinctrl_gpio_direction_input(chip, offset);
                if (ret)
                        return ret;
        }
@@ -289,7 +289,7 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
        writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
 
        if (pxa_gpio_has_pinctrl()) {
-               ret = pinctrl_gpio_direction_output(chip->base + offset);
+               ret = pinctrl_gpio_direction_output(chip, offset);
                if (ret)
                        return ret;
        }
index d8b1baae63575917eb1dec614338496f73e06943..6159fda38d5da1bb19b72d83f31c33e503c433bb 100644 (file)
@@ -275,7 +275,7 @@ static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
                return error;
        }
 
-       error = pinctrl_gpio_request(chip->base + offset);
+       error = pinctrl_gpio_request(chip, offset);
        if (error)
                pm_runtime_put(p->dev);
 
@@ -286,7 +286,7 @@ static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
 {
        struct gpio_rcar_priv *p = gpiochip_get_data(chip);
 
-       pinctrl_gpio_free(chip->base + offset);
+       pinctrl_gpio_free(chip, offset);
 
        /*
         * Set the GPIO as an input to ensure that the next GPIO request won't
index 23040a8cea34e36fa58b2000d7426fe8e4f7986b..0bd339813110e0ca0120d619c62cbde20f3b733c 100644 (file)
@@ -159,9 +159,9 @@ static int rockchip_gpio_set_direction(struct gpio_chip *chip,
 
 
        if (input)
-               pinctrl_gpio_direction_input(bank->pin_base + offset);
+               pinctrl_gpio_direction_input(chip, offset);
        else
-               pinctrl_gpio_direction_output(bank->pin_base + offset);
+               pinctrl_gpio_direction_output(chip, offset);
 
        raw_spin_lock_irqsave(&bank->slock, flags);
        rockchip_gpio_writel_bit(bank, offset, data, bank->gpio_regs->port_ddr);
index ea715582bcf34ab0213473aa48caf1c47adc0811..ea5f9cc14bc48651b27a890faf2b8f4465b5fba6 100644 (file)
@@ -137,16 +137,11 @@ static void tegra_gpio_disable(struct tegra_gpio_info *tgi, unsigned int gpio)
        tegra_gpio_mask_write(tgi, GPIO_MSK_CNF(tgi, gpio), gpio, 0);
 }
 
-static int tegra_gpio_request(struct gpio_chip *chip, unsigned int offset)
-{
-       return pinctrl_gpio_request(chip->base + offset);
-}
-
 static void tegra_gpio_free(struct gpio_chip *chip, unsigned int offset)
 {
        struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
 
-       pinctrl_gpio_free(chip->base + offset);
+       pinctrl_gpio_free(chip, offset);
        tegra_gpio_disable(tgi, offset);
 }
 
@@ -179,7 +174,7 @@ static int tegra_gpio_direction_input(struct gpio_chip *chip,
        tegra_gpio_mask_write(tgi, GPIO_MSK_OE(tgi, offset), offset, 0);
        tegra_gpio_enable(tgi, offset);
 
-       ret = pinctrl_gpio_direction_input(chip->base + offset);
+       ret = pinctrl_gpio_direction_input(chip, offset);
        if (ret < 0)
                dev_err(tgi->dev,
                        "Failed to set pinctrl input direction of GPIO %d: %d",
@@ -199,7 +194,7 @@ static int tegra_gpio_direction_output(struct gpio_chip *chip,
        tegra_gpio_mask_write(tgi, GPIO_MSK_OE(tgi, offset), offset, 1);
        tegra_gpio_enable(tgi, offset);
 
-       ret = pinctrl_gpio_direction_output(chip->base + offset);
+       ret = pinctrl_gpio_direction_output(chip, offset);
        if (ret < 0)
                dev_err(tgi->dev,
                        "Failed to set pinctrl output direction of GPIO %d: %d",
@@ -717,7 +712,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
        }
 
        tgi->gc.label                   = "tegra-gpio";
-       tgi->gc.request                 = tegra_gpio_request;
+       tgi->gc.request                 = pinctrl_gpio_request;
        tgi->gc.free                    = tegra_gpio_free;
        tgi->gc.direction_input         = tegra_gpio_direction_input;
        tgi->gc.get                     = tegra_gpio_get;
index 444501c56a3bc10e71e6e46e2078c1c7e8d28c93..07e5e6323e86ad32960904834ccec06d23e3648e 100644 (file)
@@ -130,7 +130,7 @@ static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
                vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
        }
 
-       return pinctrl_gpio_direction_input(chip->base + gpio);
+       return pinctrl_gpio_direction_input(chip, gpio);
 }
 
 static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
@@ -148,7 +148,7 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
                vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
        }
 
-       return pinctrl_gpio_direction_output(chip->base + gpio);
+       return pinctrl_gpio_direction_output(chip, gpio);
 }
 
 static void vf610_gpio_irq_handler(struct irq_desc *desc)
index 31fc71a612c2d3b8a4cc047367310e0fe5a47409..02ffda6c1e51244497c384f3964eaea6607a6f08 100644 (file)
@@ -2287,8 +2287,7 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
         * FIXME: find a non-racy way to retrieve this information. Maybe a
         * lock common to both frameworks?
         */
-       ok_for_pinctrl =
-               pinctrl_gpio_can_use_line(gc->base + info->offset);
+       ok_for_pinctrl = pinctrl_gpio_can_use_line(gc, info->offset);
 
        spin_lock_irqsave(&gpio_lock, flags);
 
index cbafcd95243e8fcb2c29912a9073afb350d8203a..95d2a7b2ea3e21239ea04c1875c2870ccedb213a 100644 (file)
@@ -1092,28 +1092,6 @@ void gpiochip_remove(struct gpio_chip *gc)
 }
 EXPORT_SYMBOL_GPL(gpiochip_remove);
 
-/*
- * FIXME: This will be removed soon.
- *
- * This function is depracated, don't use.
- */
-struct gpio_chip *gpiochip_find(void *data,
-                               int (*match)(struct gpio_chip *gc,
-                                            void *data))
-{
-       struct gpio_device *gdev;
-       struct gpio_chip *gc = NULL;
-
-       gdev = gpio_device_find(data, match);
-       if (gdev) {
-               gc = gdev->chip;
-               gpio_device_put(gdev);
-       }
-
-       return gc;
-}
-EXPORT_SYMBOL_GPL(gpiochip_find);
-
 /**
  * gpio_device_find() - find a specific GPIO device
  * @data: data to pass to match function
@@ -2036,7 +2014,7 @@ int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset)
                return 0;
 #endif
 
-       return pinctrl_gpio_request(gc->gpiodev->base + offset);
+       return pinctrl_gpio_request(gc, offset);
 }
 EXPORT_SYMBOL_GPL(gpiochip_generic_request);
 
@@ -2052,7 +2030,7 @@ void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset)
                return;
 #endif
 
-       pinctrl_gpio_free(gc->gpiodev->base + offset);
+       pinctrl_gpio_free(gc, offset);
 }
 EXPORT_SYMBOL_GPL(gpiochip_generic_free);
 
@@ -2065,7 +2043,7 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_free);
 int gpiochip_generic_config(struct gpio_chip *gc, unsigned int offset,
                            unsigned long config)
 {
-       return pinctrl_gpio_set_config(gc->gpiodev->base + offset, config);
+       return pinctrl_gpio_set_config(gc, offset, config);
 }
 EXPORT_SYMBOL_GPL(gpiochip_generic_config);
 
index 91820838b63b3ec0d7d90b892edaa895600dd0f8..afec09930efa953c88f34f94dc6818c3c6f2c448 100644 (file)
@@ -363,9 +363,6 @@ struct amdgpu_ip_block_version {
        const struct amd_ip_funcs *funcs;
 };
 
-#define HW_REV(_Major, _Minor, _Rev) \
-       ((((uint32_t) (_Major)) << 16) | ((uint32_t) (_Minor) << 8) | ((uint32_t) (_Rev)))
-
 struct amdgpu_ip_block {
        struct amdgpu_ip_block_status status;
        const struct amdgpu_ip_block_version *version;
@@ -1162,11 +1159,18 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
                            uint32_t reg, uint32_t acc_flags);
 u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev,
                                    u64 reg_addr);
+uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
+                               uint32_t reg, uint32_t acc_flags,
+                               uint32_t xcc_id);
 void amdgpu_device_wreg(struct amdgpu_device *adev,
                        uint32_t reg, uint32_t v,
                        uint32_t acc_flags);
 void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev,
                                     u64 reg_addr, u32 reg_data);
+void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
+                           uint32_t reg, uint32_t v,
+                           uint32_t acc_flags,
+                           uint32_t xcc_id);
 void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
                             uint32_t reg, uint32_t v, uint32_t xcc_id);
 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
@@ -1207,8 +1211,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
 #define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
 #define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
 
-#define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg))
-#define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v))
+#define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg), 0)
+#define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v), 0)
 
 #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
 #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
@@ -1218,6 +1222,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
 #define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0)
 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
+#define RREG32_XCC(reg, inst) amdgpu_device_xcc_rreg(adev, (reg), 0, inst)
+#define WREG32_XCC(reg, v, inst) amdgpu_device_xcc_wreg(adev, (reg), (v), 0, inst)
 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
 #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
 #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
index 4da82fc64fef458ab78e1642b5e1fa2f73405ff7..2deebece810e78a7ce039772a839684f570bceca 100644 (file)
@@ -1494,6 +1494,9 @@ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
        if (adev->asic_type < CHIP_RAVEN)
                return false;
 
+       if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
+               return false;
+
        /*
         * If ACPI_FADT_LOW_POWER_S0 is not set in the FADT, it is generally
         * risky to do any special firmware-related preparations for entering
index 490c8f5ddb602a71299deca0c95d0f53903c0e6e..f6598b9e4faa35b7fc51b9def3637e7e273189b1 100644 (file)
@@ -300,14 +300,13 @@ static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd,
        hqd_end = SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_AQL_DISPATCH_ID_HI);
 
        for (reg = hqd_base; reg <= hqd_end; reg++)
-               WREG32_RLC(reg, mqd_hqd[reg - hqd_base]);
+               WREG32_XCC(reg, mqd_hqd[reg - hqd_base], inst);
 
 
        /* Activate doorbell logic before triggering WPTR poll. */
        data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
                             CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
-       WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_DOORBELL_CONTROL),
-                               data);
+       WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_DOORBELL_CONTROL, data);
 
        if (wptr) {
                /* Don't read wptr with get_user because the user
@@ -336,27 +335,24 @@ static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd,
                guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
                guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
 
-               WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_LO),
-                      lower_32_bits(guessed_wptr));
-               WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_HI),
-                      upper_32_bits(guessed_wptr));
-               WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_POLL_ADDR),
-                      lower_32_bits((uintptr_t)wptr));
-               WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
-                       regCP_HQD_PQ_WPTR_POLL_ADDR_HI),
+               WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_LO,
+                       lower_32_bits(guessed_wptr));
+               WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_HI,
+                       upper_32_bits(guessed_wptr));
+               WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_POLL_ADDR,
+                       lower_32_bits((uintptr_t)wptr));
+               WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_PQ_WPTR_POLL_ADDR_HI,
                        upper_32_bits((uintptr_t)wptr));
-               WREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_PQ_WPTR_POLL_CNTL1),
-                      (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id,
-                              queue_id));
+               WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_PQ_WPTR_POLL_CNTL1,
+                       (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id));
        }
 
        /* Start the EOP fetcher */
-       WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_EOP_RPTR),
-              REG_SET_FIELD(m->cp_hqd_eop_rptr,
-                            CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
+       WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_EOP_RPTR,
+              REG_SET_FIELD(m->cp_hqd_eop_rptr, CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
 
        data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
-       WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), regCP_HQD_ACTIVE), data);
+       WREG32_SOC15_RLC(GC, GET_INST(GC, inst), regCP_HQD_ACTIVE, data);
 
        kgd_gfx_v9_release_queue(adev, inst);
 
@@ -494,15 +490,15 @@ static uint32_t kgd_gfx_v9_4_3_set_address_watch(
                        VALID,
                        1);
 
-       WREG32_RLC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
+       WREG32_XCC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
                        regTCP_WATCH0_ADDR_H) +
                        (watch_id * TCP_WATCH_STRIDE)),
-                       watch_address_high);
+                       watch_address_high, inst);
 
-       WREG32_RLC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
+       WREG32_XCC((SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
                        regTCP_WATCH0_ADDR_L) +
                        (watch_id * TCP_WATCH_STRIDE)),
-                       watch_address_low);
+                       watch_address_low, inst);
 
        return watch_address_cntl;
 }
index 51011e8ee90dcfd820921fb742ec19b5d48b6a9e..00fbc0f44c929bee0e34af62d96b5ff8c36d6ec6 100644 (file)
@@ -91,8 +91,8 @@ void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmi
 {
        kgd_gfx_v9_lock_srbm(adev, 0, 0, 0, vmid, inst);
 
-       WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmSH_MEM_CONFIG), sh_mem_config);
-       WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmSH_MEM_BASES), sh_mem_bases);
+       WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmSH_MEM_CONFIG, sh_mem_config);
+       WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmSH_MEM_BASES, sh_mem_bases);
        /* APE1 no longer exists on GFX9 */
 
        kgd_gfx_v9_unlock_srbm(adev, inst);
@@ -239,14 +239,13 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd,
 
        for (reg = hqd_base;
             reg <= SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI); reg++)
-               WREG32_RLC(reg, mqd_hqd[reg - hqd_base]);
+               WREG32_XCC(reg, mqd_hqd[reg - hqd_base], inst);
 
 
        /* Activate doorbell logic before triggering WPTR poll. */
        data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
                             CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
-       WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_DOORBELL_CONTROL),
-                                       data);
+       WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_DOORBELL_CONTROL, data);
 
        if (wptr) {
                /* Don't read wptr with get_user because the user
@@ -275,25 +274,24 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd,
                guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
                guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
 
-               WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_LO),
-                      lower_32_bits(guessed_wptr));
-               WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI),
-                      upper_32_bits(guessed_wptr));
-               WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR),
-                      lower_32_bits((uintptr_t)wptr));
-               WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
-                      upper_32_bits((uintptr_t)wptr));
-               WREG32_SOC15(GC, GET_INST(GC, inst), mmCP_PQ_WPTR_POLL_CNTL1,
-                      (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id));
+               WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_LO,
+                       lower_32_bits(guessed_wptr));
+               WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_HI,
+                       upper_32_bits(guessed_wptr));
+               WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR,
+                       lower_32_bits((uintptr_t)wptr));
+               WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
+                       upper_32_bits((uintptr_t)wptr));
+               WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_PQ_WPTR_POLL_CNTL1,
+                       (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id));
        }
 
        /* Start the EOP fetcher */
-       WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_EOP_RPTR),
-              REG_SET_FIELD(m->cp_hqd_eop_rptr,
-                            CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
+       WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_EOP_RPTR,
+              REG_SET_FIELD(m->cp_hqd_eop_rptr, CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
 
        data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
-       WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE), data);
+       WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_ACTIVE, data);
 
        kgd_gfx_v9_release_queue(adev, inst);
 
@@ -556,7 +554,7 @@ int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd,
                break;
        }
 
-       WREG32_RLC(SOC15_REG_OFFSET(GC, GET_INST(GC, inst), mmCP_HQD_DEQUEUE_REQUEST), type);
+       WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_DEQUEUE_REQUEST, type);
 
        end_jiffies = (utimeout * HZ / 1000) + jiffies;
        while (true) {
@@ -908,8 +906,8 @@ void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev,
                                        uint32_t inst)
 
 {
-       *wait_times = RREG32(SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
-                       mmCP_IQ_WAIT_TIME2));
+       *wait_times = RREG32_SOC15_RLC(GC, GET_INST(GC, inst),
+                       mmCP_IQ_WAIT_TIME2);
 }
 
 void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev,
index 1eccad4ce2434b17d60daf7a284a92a74356a876..41fbc4fd0fac303176d7ecb386f81d8af8cac2c6 100644 (file)
@@ -425,6 +425,32 @@ validate_fail:
        return ret;
 }
 
+static int amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo,
+                                              uint32_t domain,
+                                              struct dma_fence *fence)
+{
+       int ret = amdgpu_bo_reserve(bo, false);
+
+       if (ret)
+               return ret;
+
+       ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
+       if (ret)
+               goto unreserve_out;
+
+       ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
+       if (ret)
+               goto unreserve_out;
+
+       dma_resv_add_fence(bo->tbo.base.resv, fence,
+                          DMA_RESV_USAGE_BOOKKEEP);
+
+unreserve_out:
+       amdgpu_bo_unreserve(bo);
+
+       return ret;
+}
+
 static int amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo)
 {
        return amdgpu_amdkfd_bo_validate(bo, bo->allowed_domains, false);
@@ -1784,6 +1810,15 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
                }
                bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
                bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
+       } else {
+               mutex_lock(&avm->process_info->lock);
+               if (avm->process_info->eviction_fence &&
+                   !dma_fence_is_signaled(&avm->process_info->eviction_fence->base))
+                       ret = amdgpu_amdkfd_bo_validate_and_fence(bo, domain,
+                               &avm->process_info->eviction_fence->base);
+               mutex_unlock(&avm->process_info->lock);
+               if (ret)
+                       goto err_validate_bo;
        }
 
        if (offset)
@@ -1793,6 +1828,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
 
 allocate_init_user_pages_failed:
 err_pin_bo:
+err_validate_bo:
        remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
        drm_vma_node_revoke(&gobj->vma_node, drm_priv);
 err_node_allow:
@@ -1866,10 +1902,6 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
        if (unlikely(ret))
                return ret;
 
-       /* The eviction fence should be removed by the last unmap.
-        * TODO: Log an error condition if the bo still has the eviction fence
-        * attached
-        */
        amdgpu_amdkfd_remove_eviction_fence(mem->bo,
                                        process_info->eviction_fence);
        pr_debug("Release VA 0x%llx - 0x%llx\n", mem->va,
@@ -1998,19 +2030,6 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
        if (unlikely(ret))
                goto out_unreserve;
 
-       if (mem->mapped_to_gpu_memory == 0 &&
-           !amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
-               /* Validate BO only once. The eviction fence gets added to BO
-                * the first time it is mapped. Validate will wait for all
-                * background evictions to complete.
-                */
-               ret = amdgpu_amdkfd_bo_validate(bo, domain, true);
-               if (ret) {
-                       pr_debug("Validate failed\n");
-                       goto out_unreserve;
-               }
-       }
-
        list_for_each_entry(entry, &mem->attachments, list) {
                if (entry->bo_va->base.vm != avm || entry->is_mapped)
                        continue;
@@ -2037,10 +2056,6 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
                         mem->mapped_to_gpu_memory);
        }
 
-       if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
-               dma_resv_add_fence(bo->tbo.base.resv,
-                                  &avm->process_info->eviction_fence->base,
-                                  DMA_RESV_USAGE_BOOKKEEP);
        ret = unreserve_bo_and_vms(&ctx, false, false);
 
        goto out;
@@ -2074,7 +2089,6 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
                struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv)
 {
        struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
-       struct amdkfd_process_info *process_info = avm->process_info;
        unsigned long bo_size = mem->bo->tbo.base.size;
        struct kfd_mem_attachment *entry;
        struct bo_vm_reservation_context ctx;
@@ -2115,15 +2129,6 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
                         mem->mapped_to_gpu_memory);
        }
 
-       /* If BO is unmapped from all VMs, unfence it. It can be evicted if
-        * required.
-        */
-       if (mem->mapped_to_gpu_memory == 0 &&
-           !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
-           !mem->bo->tbo.pin_count)
-               amdgpu_amdkfd_remove_eviction_fence(mem->bo,
-                                               process_info->eviction_fence);
-
 unreserve_out:
        unreserve_bo_and_vms(&ctx, false, false);
 out:
@@ -2351,8 +2356,20 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev,
        amdgpu_sync_create(&(*mem)->sync);
        (*mem)->is_imported = true;
 
+       mutex_lock(&avm->process_info->lock);
+       if (avm->process_info->eviction_fence &&
+           !dma_fence_is_signaled(&avm->process_info->eviction_fence->base))
+               ret = amdgpu_amdkfd_bo_validate_and_fence(bo, (*mem)->domain,
+                               &avm->process_info->eviction_fence->base);
+       mutex_unlock(&avm->process_info->lock);
+       if (ret)
+               goto err_remove_mem;
+
        return 0;
 
+err_remove_mem:
+       remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
+       drm_vma_node_revoke(&obj->vma_node, drm_priv);
 err_free_mem:
        kfree(*mem);
 err_put_obj:
index 5bbb23e102ba0fec448ad73c382965e788ed8cbd..618e469e36222be2af443951e95555d6e2dfb92f 100644 (file)
@@ -29,6 +29,7 @@
 #include "amdgpu.h"
 #include "atom.h"
 
+#include <linux/device.h>
 #include <linux/pci.h>
 #include <linux/slab.h>
 #include <linux/acpi.h>
@@ -287,6 +288,10 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
        if (adev->flags & AMD_IS_APU)
                return false;
 
+       /* ATRM is for on-platform devices only */
+       if (dev_is_removable(&adev->pdev->dev))
+               return false;
+
        while ((pdev = pci_get_base_class(PCI_BASE_CLASS_DISPLAY, pdev))) {
                if ((pdev->class != PCI_CLASS_DISPLAY_VGA << 8) &&
                    (pdev->class != PCI_CLASS_DISPLAY_OTHER << 8))
index 781e5c5ce04d220daee8f4f1f34ce4a45f5c4f85..702f6610d02435faa84a5853b6fed0491ea437c5 100644 (file)
@@ -172,6 +172,7 @@ int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
        }
 
        rcu_read_unlock();
+       *result = NULL;
        return -ENOENT;
 }
 
index e210fe5c22a04814ed3c09e0c7c13ce43772db61..df3ecfa9e13f5d87d3e67397e22d0d339c62a809 100644 (file)
@@ -1117,6 +1117,11 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
                        return r;
        }
 
+       /* FIXME: In theory this loop shouldn't be needed any more when
+        * amdgpu_vm_handle_moved handles all moved BOs that are reserved
+        * with p->ticket. But removing it caused test regressions, so I'm
+        * leaving it here for now.
+        */
        amdgpu_bo_list_for_each_entry(e, p->bo_list) {
                bo_va = e->bo_va;
                if (bo_va == NULL)
@@ -1131,7 +1136,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
                        return r;
        }
 
-       r = amdgpu_vm_handle_moved(adev, vm);
+       r = amdgpu_vm_handle_moved(adev, vm, &p->exec.ticket);
        if (r)
                return r;
 
@@ -1410,7 +1415,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                if (r == -ENOMEM)
                        DRM_ERROR("Not enough memory for command submission!\n");
                else if (r != -ERESTARTSYS && r != -EAGAIN)
-                       DRM_ERROR("Failed to process the buffer list %d!\n", r);
+                       DRM_DEBUG("Failed to process the buffer list %d!\n", r);
                goto error_fini;
        }
 
index d5f78179b2b6efa79818a666553ba3b6f5923371..7eeaf0aa7f8121fc59dcd30e48a00dc9750d5e5f 100644 (file)
@@ -41,6 +41,7 @@
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/amdgpu_drm.h>
+#include <linux/device.h>
 #include <linux/vgaarb.h>
 #include <linux/vga_switcheroo.h>
 #include <linux/efi.h>
@@ -72,6 +73,7 @@
 #include "amdgpu_pmu.h"
 #include "amdgpu_fru_eeprom.h"
 #include "amdgpu_reset.h"
+#include "amdgpu_virt.h"
 
 #include <linux/suspend.h>
 #include <drm/task_barrier.h>
@@ -471,7 +473,7 @@ uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
                if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
                    amdgpu_sriov_runtime(adev) &&
                    down_read_trylock(&adev->reset_domain->sem)) {
-                       ret = amdgpu_kiq_rreg(adev, reg);
+                       ret = amdgpu_kiq_rreg(adev, reg, 0);
                        up_read(&adev->reset_domain->sem);
                } else {
                        ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
@@ -508,6 +510,49 @@ uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
        BUG();
 }
 
+
+/**
+ * amdgpu_device_xcc_rreg - read a memory mapped IO or indirect register with specific XCC
+ *
+ * @adev: amdgpu_device pointer
+ * @reg: dword aligned register offset
+ * @acc_flags: access flags which require special behavior
+ * @xcc_id: xcc accelerated compute core id
+ *
+ * Returns the 32 bit value from the offset specified.
+ */
+uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev,
+                               uint32_t reg, uint32_t acc_flags,
+                               uint32_t xcc_id)
+{
+       uint32_t ret, rlcg_flag;
+
+       if (amdgpu_device_skip_hw_access(adev))
+               return 0;
+
+       if ((reg * 4) < adev->rmmio_size) {
+               if (amdgpu_sriov_vf(adev) &&
+                   !amdgpu_sriov_runtime(adev) &&
+                   adev->gfx.rlc.rlcg_reg_access_supported &&
+                   amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
+                                                        GC_HWIP, false,
+                                                        &rlcg_flag)) {
+                       ret = amdgpu_virt_rlcg_reg_rw(adev, reg, 0, rlcg_flag, xcc_id);
+               } else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
+                   amdgpu_sriov_runtime(adev) &&
+                   down_read_trylock(&adev->reset_domain->sem)) {
+                       ret = amdgpu_kiq_rreg(adev, reg, xcc_id);
+                       up_read(&adev->reset_domain->sem);
+               } else {
+                       ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
+               }
+       } else {
+               ret = adev->pcie_rreg(adev, reg * 4);
+       }
+
+       return ret;
+}
+
 /*
  * MMIO register write with bytes helper functions
  * @offset:bytes offset from MMIO start
@@ -555,7 +600,7 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
                if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
                    amdgpu_sriov_runtime(adev) &&
                    down_read_trylock(&adev->reset_domain->sem)) {
-                       amdgpu_kiq_wreg(adev, reg, v);
+                       amdgpu_kiq_wreg(adev, reg, v, 0);
                        up_read(&adev->reset_domain->sem);
                } else {
                        writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
@@ -596,6 +641,47 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
        }
 }
 
+/**
+ * amdgpu_device_xcc_wreg - write to a memory mapped IO or indirect register with specific XCC
+ *
+ * @adev: amdgpu_device pointer
+ * @reg: dword aligned register offset
+ * @v: 32 bit value to write to the register
+ * @acc_flags: access flags which require special behavior
+ * @xcc_id: xcc accelerated compute core id
+ *
+ * Writes the value specified to the offset specified.
+ */
+void amdgpu_device_xcc_wreg(struct amdgpu_device *adev,
+                       uint32_t reg, uint32_t v,
+                       uint32_t acc_flags, uint32_t xcc_id)
+{
+       uint32_t rlcg_flag;
+
+       if (amdgpu_device_skip_hw_access(adev))
+               return;
+
+       if ((reg * 4) < adev->rmmio_size) {
+               if (amdgpu_sriov_vf(adev) &&
+                   !amdgpu_sriov_runtime(adev) &&
+                   adev->gfx.rlc.rlcg_reg_access_supported &&
+                   amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags,
+                                                        GC_HWIP, true,
+                                                        &rlcg_flag)) {
+                       amdgpu_virt_rlcg_reg_rw(adev, reg, v, rlcg_flag, xcc_id);
+               } else if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
+                   amdgpu_sriov_runtime(adev) &&
+                   down_read_trylock(&adev->reset_domain->sem)) {
+                       amdgpu_kiq_wreg(adev, reg, v, xcc_id);
+                       up_read(&adev->reset_domain->sem);
+               } else {
+                       writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
+               }
+       } else {
+               adev->pcie_wreg(adev, reg * 4, v);
+       }
+}
+
 /**
  * amdgpu_device_indirect_rreg - read an indirect register
  *
@@ -1073,6 +1159,8 @@ static int amdgpu_device_asic_init(struct amdgpu_device *adev)
            amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(11, 0, 0)) {
                amdgpu_psp_wait_for_bootloader(adev);
                ret = amdgpu_atomfirmware_asic_init(adev, true);
+               /* TODO: check the return val and stop device initialization if boot fails */
+               amdgpu_psp_query_boot_status(adev);
                return ret;
        } else {
                return amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -2223,7 +2311,6 @@ out:
  */
 static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
 {
-       struct drm_device *dev = adev_to_drm(adev);
        struct pci_dev *parent;
        int i, r;
        bool total;
@@ -2294,7 +2381,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
            (amdgpu_is_atpx_hybrid() ||
             amdgpu_has_atpx_dgpu_power_cntl()) &&
            ((adev->flags & AMD_IS_APU) == 0) &&
-           !pci_is_thunderbolt_attached(to_pci_dev(dev->dev)))
+           !dev_is_removable(&adev->pdev->dev))
                adev->flags |= AMD_IS_PX;
 
        if (!(adev->flags & AMD_IS_APU)) {
@@ -2497,6 +2584,18 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
                                  ring->name);
                        return r;
                }
+               r = amdgpu_uvd_entity_init(adev, ring);
+               if (r) {
+                       DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n",
+                                 ring->name);
+                       return r;
+               }
+               r = amdgpu_vce_entity_init(adev, ring);
+               if (r) {
+                       DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n",
+                                 ring->name);
+                       return r;
+               }
        }
 
        amdgpu_xcp_update_partition_sched_list(adev);
@@ -3962,13 +4061,23 @@ int amdgpu_device_init(struct amdgpu_device *adev,
                                }
                        }
                } else {
-                       tmp = amdgpu_reset_method;
-                       /* It should do a default reset when loading or reloading the driver,
-                        * regardless of the module parameter reset_method.
-                        */
-                       amdgpu_reset_method = AMD_RESET_METHOD_NONE;
-                       r = amdgpu_asic_reset(adev);
-                       amdgpu_reset_method = tmp;
+                       switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+                       case IP_VERSION(13, 0, 0):
+                       case IP_VERSION(13, 0, 7):
+                       case IP_VERSION(13, 0, 10):
+                               r = psp_gpu_reset(adev);
+                               break;
+                       default:
+                               tmp = amdgpu_reset_method;
+                               /* It should do a default reset when loading or reloading the driver,
+                                * regardless of the module parameter reset_method.
+                                */
+                               amdgpu_reset_method = AMD_RESET_METHOD_NONE;
+                               r = amdgpu_asic_reset(adev);
+                               amdgpu_reset_method = tmp;
+                               break;
+                       }
+
                        if (r) {
                                dev_err(adev->dev, "asic reset on init failed\n");
                                goto failed;
@@ -4132,7 +4241,7 @@ fence_driver_init:
 
        px = amdgpu_device_supports_px(ddev);
 
-       if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
+       if (px || (!dev_is_removable(&adev->pdev->dev) &&
                                apple_gmux_detect(NULL, NULL)))
                vga_switcheroo_register_client(adev->pdev,
                                               &amdgpu_switcheroo_ops, px);
@@ -4282,7 +4391,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
 
        px = amdgpu_device_supports_px(adev_to_drm(adev));
 
-       if (px || (!pci_is_thunderbolt_attached(adev->pdev) &&
+       if (px || (!dev_is_removable(&adev->pdev->dev) &&
                                apple_gmux_detect(NULL, NULL)))
                vga_switcheroo_unregister_client(adev->pdev);
 
@@ -4474,19 +4583,18 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
        }
        amdgpu_fence_driver_hw_init(adev);
 
-       r = amdgpu_device_ip_late_init(adev);
-       if (r)
-               goto exit;
-
-       queue_delayed_work(system_wq, &adev->delayed_init_work,
-                          msecs_to_jiffies(AMDGPU_RESUME_MS));
-
        if (!adev->in_s0ix) {
                r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
                if (r)
                        goto exit;
        }
 
+       r = amdgpu_device_ip_late_init(adev);
+       if (r)
+               goto exit;
+
+       queue_delayed_work(system_wq, &adev->delayed_init_work,
+                          msecs_to_jiffies(AMDGPU_RESUME_MS));
 exit:
        if (amdgpu_sriov_vf(adev)) {
                amdgpu_virt_init_data_exchange(adev);
@@ -5566,10 +5674,6 @@ skip_hw_reset:
                        drm_sched_start(&ring->sched, true);
                }
 
-               if (adev->enable_mes &&
-                   amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(11, 0, 3))
-                       amdgpu_mes_self_test(tmp_adev);
-
                if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled)
                        drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
 
index b6a53e8429b220f33a30a03534552e516dadb90a..0431eafa86b5324f4d63cc6060cea30baa03088b 100644 (file)
@@ -99,6 +99,7 @@
 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
 
 #define mmRCC_CONFIG_MEMSIZE   0xde3
+#define mmMP0_SMN_C2PMSG_33    0x16061
 #define mmMM_INDEX             0x0
 #define mmMM_INDEX_HI          0x6
 #define mmMM_DATA              0x1
@@ -239,8 +240,26 @@ static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev,
 static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
                                                 uint8_t *binary)
 {
-       uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
-       int ret = 0;
+       uint64_t vram_size;
+       u32 msg;
+       int i, ret = 0;
+
+       /* It can take up to a second for IFWI init to complete on some dGPUs,
+        * but generally it should be in the 60-100ms range.  Normally this starts
+        * as soon as the device gets power so by the time the OS loads this has long
+        * completed.  However, when a card is hotplugged via e.g., USB4, we need to
+        * wait for this to complete.  Once the C2PMSG is updated, we can
+        * continue.
+        */
+       if (dev_is_removable(&adev->pdev->dev)) {
+               for (i = 0; i < 1000; i++) {
+                       msg = RREG32(mmMP0_SMN_C2PMSG_33);
+                       if (msg & 0x80000000)
+                               break;
+                       msleep(1);
+               }
+       }
+       vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
 
        if (vram_size) {
                uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
@@ -2449,6 +2468,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
        if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(4, 8, 0))
                adev->gmc.xgmi.supported = true;
 
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
+               adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 4, 0);
+
        /* set NBIO version */
        switch (amdgpu_ip_version(adev, NBIO_HWIP, 0)) {
        case IP_VERSION(6, 1, 0):
index b5e28fa3f4144fccfe7db3a7e21181aaf0394e31..e7e87a3b2601eb130d9eaf13d8fab0c7b5e5c4cd 100644 (file)
@@ -409,7 +409,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
                if (!r)
                        r = amdgpu_vm_clear_freed(adev, vm, NULL);
                if (!r)
-                       r = amdgpu_vm_handle_moved(adev, vm);
+                       r = amdgpu_vm_handle_moved(adev, vm, ticket);
 
                if (r && r != -EBUSY)
                        DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
index 6cc6e3991410ed476e5dbf33c039f18705cb7430..3095a3a864af713c57ebcee2b192dbc99866e7fe 100644 (file)
@@ -2041,6 +2041,14 @@ static const struct pci_device_id pciidlist[] = {
 
 MODULE_DEVICE_TABLE(pci, pciidlist);
 
+static const struct amdgpu_asic_type_quirk asic_type_quirks[] = {
+       /* differentiate between P10 and P11 asics with the same DID */
+       {0x67FF, 0xE3, CHIP_POLARIS10},
+       {0x67FF, 0xE7, CHIP_POLARIS10},
+       {0x67FF, 0xF3, CHIP_POLARIS10},
+       {0x67FF, 0xF7, CHIP_POLARIS10},
+};
+
 static const struct drm_driver amdgpu_kms_driver;
 
 static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev)
@@ -2083,6 +2091,22 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
        }
 }
 
+static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(asic_type_quirks); i++) {
+               if (pdev->device == asic_type_quirks[i].device &&
+                       pdev->revision == asic_type_quirks[i].revision) {
+                               flags &= ~AMD_ASIC_MASK;
+                               flags |= asic_type_quirks[i].type;
+                               break;
+                       }
+       }
+
+       return flags;
+}
+
 static int amdgpu_pci_probe(struct pci_dev *pdev,
                            const struct pci_device_id *ent)
 {
@@ -2110,15 +2134,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
                         "See modparam exp_hw_support\n");
                return -ENODEV;
        }
-       /* differentiate between P10 and P11 asics with the same DID */
-       if (pdev->device == 0x67FF &&
-           (pdev->revision == 0xE3 ||
-            pdev->revision == 0xE7 ||
-            pdev->revision == 0xF3 ||
-            pdev->revision == 0xF7)) {
-               flags &= ~AMD_ASIC_MASK;
-               flags |= CHIP_POLARIS10;
-       }
+
+       flags = amdgpu_fix_asic_type(pdev, flags);
 
        /* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping,
         * however, SME requires an indirect IOMMU mapping because the encryption
index c92e0aba69e157a4c903d40aa4e35374e4283b5c..b9674c57c4365fb5ebdf9644fc4ac0a31b955da8 100644 (file)
@@ -385,9 +385,11 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
        struct amdgpu_ring *ring = &kiq->ring;
        u32 domain = AMDGPU_GEM_DOMAIN_GTT;
 
+#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64)
        /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */
        if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
                domain |= AMDGPU_GEM_DOMAIN_VRAM;
+#endif
 
        /* create MQD for KIQ */
        if (!adev->enable_mes_kiq && !ring->mqd_obj) {
@@ -929,12 +931,12 @@ void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev,
                func(adev, ras_error_status, i);
 }
 
-uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
+uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id)
 {
        signed long r, cnt = 0;
        unsigned long flags;
        uint32_t seq, reg_val_offs = 0, value = 0;
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
        struct amdgpu_ring *ring = &kiq->ring;
 
        if (amdgpu_device_skip_hw_access(adev))
@@ -997,12 +999,12 @@ failed_kiq_read:
        return ~0;
 }
 
-void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
+void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id)
 {
        signed long r, cnt = 0;
        unsigned long flags;
        uint32_t seq;
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
        struct amdgpu_ring *ring = &kiq->ring;
 
        BUG_ON(!ring->funcs->emit_wreg);
index 7088c5015675be2bfa62fa43b37c5ec4e542cb00..f23bafec71c5ff5e00c7a0072037179497241324 100644 (file)
@@ -521,8 +521,8 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
                                  struct amdgpu_irq_src *source,
                                  struct amdgpu_iv_entry *entry);
-uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
-void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
+uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id);
+void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id);
 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev);
 void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id);
 
index 2dce338b0f1e7441bf0ab8fa6c8f71b06993e3c8..5f71414190e9ab5744d040cc906757c546a580bb 100644 (file)
@@ -826,7 +826,10 @@ void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
                                gc_ver == IP_VERSION(9, 4, 3) ||
                                gc_ver >= IP_VERSION(10, 3, 0));
 
-       gmc->noretry = (amdgpu_noretry == -1) ? noretry_default : amdgpu_noretry;
+       if (!amdgpu_sriov_xnack_support(adev))
+               gmc->noretry = 1;
+       else
+               gmc->noretry = (amdgpu_noretry == -1) ? noretry_default : amdgpu_noretry;
 }
 
 void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
index 5a828c175e3a0a93ff925aa7e0a11dc294fa5c56..cf33eb219e25741bcbc27bda849b3642c70bd143 100644 (file)
@@ -143,6 +143,46 @@ int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev)
        return 0;
 }
 
+void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set)
+{
+       if (!mca_set)
+               return;
+
+       memset(mca_set, 0, sizeof(*mca_set));
+       INIT_LIST_HEAD(&mca_set->list);
+}
+
+int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_entry *entry)
+{
+       struct mca_bank_node *node;
+
+       if (!entry)
+               return -EINVAL;
+
+       node = kvzalloc(sizeof(*node), GFP_KERNEL);
+       if (!node)
+               return -ENOMEM;
+
+       memcpy(&node->entry, entry, sizeof(*entry));
+
+       INIT_LIST_HEAD(&node->node);
+       list_add_tail(&node->node, &mca_set->list);
+
+       mca_set->nr_entries++;
+
+       return 0;
+}
+
+void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set)
+{
+       struct mca_bank_node *node, *tmp;
+
+       list_for_each_entry_safe(node, tmp, &mca_set->list, node) {
+               list_del(&node->node);
+               kvfree(node);
+       }
+}
+
 void amdgpu_mca_smu_init_funcs(struct amdgpu_device *adev, const struct amdgpu_mca_smu_funcs *mca_funcs)
 {
        struct amdgpu_mca *mca = &adev->mca;
@@ -160,6 +200,65 @@ int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable)
        return -EOPNOTSUPP;
 }
 
+static void amdgpu_mca_smu_mca_bank_dump(struct amdgpu_device *adev, int idx, struct mca_bank_entry *entry)
+{
+       dev_info(adev->dev, "[Hardware error] Accelerator Check Architecture events logged\n");
+       dev_info(adev->dev, "[Hardware error] aca entry[%02d].STATUS=0x%016llx\n",
+                idx, entry->regs[MCA_REG_IDX_STATUS]);
+       dev_info(adev->dev, "[Hardware error] aca entry[%02d].ADDR=0x%016llx\n",
+                idx, entry->regs[MCA_REG_IDX_ADDR]);
+       dev_info(adev->dev, "[Hardware error] aca entry[%02d].MISC0=0x%016llx\n",
+                idx, entry->regs[MCA_REG_IDX_MISC0]);
+       dev_info(adev->dev, "[Hardware error] aca entry[%02d].IPID=0x%016llx\n",
+                idx, entry->regs[MCA_REG_IDX_IPID]);
+       dev_info(adev->dev, "[Hardware error] aca entry[%02d].SYND=0x%016llx\n",
+                idx, entry->regs[MCA_REG_IDX_SYND]);
+}
+
+int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data)
+{
+       struct amdgpu_smuio_mcm_config_info mcm_info;
+       struct mca_bank_set mca_set;
+       struct mca_bank_node *node;
+       struct mca_bank_entry *entry;
+       uint32_t count;
+       int ret, i = 0;
+
+       amdgpu_mca_bank_set_init(&mca_set);
+
+       ret = amdgpu_mca_smu_get_mca_set(adev, blk, type, &mca_set);
+       if (ret)
+               goto out_mca_release;
+
+       list_for_each_entry(node, &mca_set.list, node) {
+               entry = &node->entry;
+
+               amdgpu_mca_smu_mca_bank_dump(adev, i++, entry);
+
+               count = 0;
+               ret = amdgpu_mca_smu_parse_mca_error_count(adev, blk, type, entry, &count);
+               if (ret)
+                       goto out_mca_release;
+
+               if (!count)
+                       continue;
+
+               mcm_info.socket_id = entry->info.socket_id;
+               mcm_info.die_id = entry->info.aid;
+
+               if (type == AMDGPU_MCA_ERROR_TYPE_UE)
+                       amdgpu_ras_error_statistic_ue_count(err_data, &mcm_info, (uint64_t)count);
+               else
+                       amdgpu_ras_error_statistic_ce_count(err_data, &mcm_info, (uint64_t)count);
+       }
+
+out_mca_release:
+       amdgpu_mca_bank_set_release(&mca_set);
+
+       return ret;
+}
+
+
 int amdgpu_mca_smu_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count)
 {
        const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
@@ -173,17 +272,77 @@ int amdgpu_mca_smu_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_m
        return -EOPNOTSUPP;
 }
 
-int amdgpu_mca_smu_get_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
-                                  enum amdgpu_mca_error_type type, uint32_t *count)
+int amdgpu_mca_smu_get_mca_set_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
+                                           enum amdgpu_mca_error_type type, uint32_t *total)
 {
        const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
-       if (!count)
+       struct mca_bank_set mca_set;
+       struct mca_bank_node *node;
+       struct mca_bank_entry *entry;
+       uint32_t count;
+       int ret;
+
+       if (!total)
                return -EINVAL;
 
-       if (mca_funcs && mca_funcs->mca_get_error_count)
-               return mca_funcs->mca_get_error_count(adev, blk, type, count);
+       if (!mca_funcs)
+               return -EOPNOTSUPP;
 
-       return -EOPNOTSUPP;
+       if (!mca_funcs->mca_get_ras_mca_set || !mca_funcs->mca_get_valid_mca_count)
+               return -EOPNOTSUPP;
+
+       amdgpu_mca_bank_set_init(&mca_set);
+
+       ret = mca_funcs->mca_get_ras_mca_set(adev, blk, type, &mca_set);
+       if (ret)
+               goto err_mca_set_release;
+
+       *total = 0;
+       list_for_each_entry(node, &mca_set.list, node) {
+               entry = &node->entry;
+
+               count = 0;
+               ret = mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, &count);
+               if (ret)
+                       goto err_mca_set_release;
+
+               *total += count;
+       }
+
+err_mca_set_release:
+       amdgpu_mca_bank_set_release(&mca_set);
+
+       return ret;
+}
+
+int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
+                                        enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
+{
+       const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
+       if (!count || !entry)
+               return -EINVAL;
+
+       if (!mca_funcs || !mca_funcs->mca_parse_mca_error_count)
+               return -EOPNOTSUPP;
+
+
+       return mca_funcs->mca_parse_mca_error_count(adev, blk, type, entry, count);
+}
+
+int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
+                              enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set)
+{
+       const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
+
+       if (!mca_set)
+               return -EINVAL;
+
+       if (!mca_funcs || !mca_funcs->mca_get_ras_mca_set)
+               return -EOPNOTSUPP;
+
+       WARN_ON(!list_empty(&mca_set->list));
+
+       return mca_funcs->mca_get_ras_mca_set(adev, blk, type, mca_set);
 }
 
 int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
@@ -230,14 +389,21 @@ static int amdgpu_mca_smu_debug_mode_set(void *data, u64 val)
 static void mca_dump_entry(struct seq_file *m, struct mca_bank_entry *entry)
 {
        int i, idx = entry->idx;
+       int reg_idx_array[] = {
+               MCA_REG_IDX_STATUS,
+               MCA_REG_IDX_ADDR,
+               MCA_REG_IDX_MISC0,
+               MCA_REG_IDX_IPID,
+               MCA_REG_IDX_SYND,
+       };
 
        seq_printf(m, "mca entry[%d].type: %s\n", idx, entry->type == AMDGPU_MCA_ERROR_TYPE_UE ? "UE" : "CE");
        seq_printf(m, "mca entry[%d].ip: %d\n", idx, entry->ip);
        seq_printf(m, "mca entry[%d].info: socketid:%d aid:%d hwid:0x%03x mcatype:0x%04x\n",
                   idx, entry->info.socket_id, entry->info.aid, entry->info.hwid, entry->info.mcatype);
 
-       for (i = 0; i < ARRAY_SIZE(entry->regs); i++)
-               seq_printf(m, "mca entry[%d].regs[%d]: 0x%016llx\n", idx, i, entry->regs[i]);
+       for (i = 0; i < ARRAY_SIZE(reg_idx_array); i++)
+               seq_printf(m, "mca entry[%d].regs[%d]: 0x%016llx\n", idx, reg_idx_array[i], entry->regs[reg_idx_array[i]]);
 }
 
 static int mca_dump_show(struct seq_file *m, enum amdgpu_mca_error_type type)
index 28ad463cf5c945b4b27ea0866e33eea7db066e3d..2b488fcf2f95b2021872383c39c1c03b22004531 100644 (file)
 
 #define MCA_MAX_REGS_COUNT     (16)
 
+#define MCA_REG_FIELD(x, h, l)                 (((x) & GENMASK_ULL(h, l)) >> l)
+#define MCA_REG__STATUS__VAL(x)                        MCA_REG_FIELD(x, 63, 63)
+#define MCA_REG__STATUS__OVERFLOW(x)           MCA_REG_FIELD(x, 62, 62)
+#define MCA_REG__STATUS__UC(x)                 MCA_REG_FIELD(x, 61, 61)
+#define MCA_REG__STATUS__EN(x)                 MCA_REG_FIELD(x, 60, 60)
+#define MCA_REG__STATUS__MISCV(x)              MCA_REG_FIELD(x, 59, 59)
+#define MCA_REG__STATUS__ADDRV(x)              MCA_REG_FIELD(x, 58, 58)
+#define MCA_REG__STATUS__PCC(x)                        MCA_REG_FIELD(x, 57, 57)
+#define MCA_REG__STATUS__ERRCOREIDVAL(x)       MCA_REG_FIELD(x, 56, 56)
+#define MCA_REG__STATUS__TCC(x)                        MCA_REG_FIELD(x, 55, 55)
+#define MCA_REG__STATUS__SYNDV(x)              MCA_REG_FIELD(x, 53, 53)
+#define MCA_REG__STATUS__CECC(x)               MCA_REG_FIELD(x, 46, 46)
+#define MCA_REG__STATUS__UECC(x)               MCA_REG_FIELD(x, 45, 45)
+#define MCA_REG__STATUS__DEFERRED(x)           MCA_REG_FIELD(x, 44, 44)
+#define MCA_REG__STATUS__POISON(x)             MCA_REG_FIELD(x, 43, 43)
+#define MCA_REG__STATUS__SCRUB(x)              MCA_REG_FIELD(x, 40, 40)
+#define MCA_REG__STATUS__ERRCOREID(x)          MCA_REG_FIELD(x, 37, 32)
+#define MCA_REG__STATUS__ADDRLSB(x)            MCA_REG_FIELD(x, 29, 24)
+#define MCA_REG__STATUS__ERRORCODEEXT(x)       MCA_REG_FIELD(x, 21, 16)
+#define MCA_REG__STATUS__ERRORCODE(x)          MCA_REG_FIELD(x, 15, 0)
+
 enum amdgpu_mca_ip {
        AMDGPU_MCA_IP_UNKNOW = -1,
        AMDGPU_MCA_IP_PSP = 0,
@@ -33,6 +54,7 @@ enum amdgpu_mca_ip {
        AMDGPU_MCA_IP_SMU,
        AMDGPU_MCA_IP_MP5,
        AMDGPU_MCA_IP_UMC,
+       AMDGPU_MCA_IP_PCS_XGMI,
        AMDGPU_MCA_IP_COUNT,
 };
 
@@ -57,6 +79,15 @@ struct amdgpu_mca {
        const struct amdgpu_mca_smu_funcs *mca_funcs;
 };
 
+enum mca_reg_idx {
+       MCA_REG_IDX_STATUS              = 1,
+       MCA_REG_IDX_ADDR                = 2,
+       MCA_REG_IDX_MISC0               = 3,
+       MCA_REG_IDX_IPID                = 5,
+       MCA_REG_IDX_SYND                = 6,
+       MCA_REG_IDX_COUNT               = 16,
+};
+
 struct mca_bank_info {
        int socket_id;
        int aid;
@@ -72,18 +103,28 @@ struct mca_bank_entry {
        uint64_t regs[MCA_MAX_REGS_COUNT];
 };
 
+struct mca_bank_node {
+       struct mca_bank_entry entry;
+       struct list_head node;
+};
+
+struct mca_bank_set {
+       int nr_entries;
+       struct list_head list;
+};
+
 struct amdgpu_mca_smu_funcs {
        int max_ue_count;
        int max_ce_count;
        int (*mca_set_debug_mode)(struct amdgpu_device *adev, bool enable);
-       int (*mca_get_error_count)(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
-                                  enum amdgpu_mca_error_type type, uint32_t *count);
+       int (*mca_get_ras_mca_set)(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
+                                  struct mca_bank_set *mca_set);
+       int (*mca_parse_mca_error_count)(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
+                                        struct mca_bank_entry *entry, uint32_t *count);
        int (*mca_get_valid_mca_count)(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
                                       uint32_t *count);
        int (*mca_get_mca_entry)(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
                                 int idx, struct mca_bank_entry *entry);
-       int (*mca_get_ras_mca_idx_array)(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
-                                        enum amdgpu_mca_error_type type, int *idx_array, int *idx_array_size);
 };
 
 void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
@@ -107,11 +148,22 @@ int amdgpu_mca_mpio_ras_sw_init(struct amdgpu_device *adev);
 void amdgpu_mca_smu_init_funcs(struct amdgpu_device *adev, const struct amdgpu_mca_smu_funcs *mca_funcs);
 int amdgpu_mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable);
 int amdgpu_mca_smu_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, uint32_t *count);
+int amdgpu_mca_smu_get_mca_set_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
+                                          enum amdgpu_mca_error_type type, uint32_t *total);
 int amdgpu_mca_smu_get_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
                                   enum amdgpu_mca_error_type type, uint32_t *count);
+int amdgpu_mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
+                                        enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count);
+int amdgpu_mca_smu_get_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
+                              enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set);
 int amdgpu_mca_smu_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_type type,
                                 int idx, struct mca_bank_entry *entry);
 
 void amdgpu_mca_smu_debugfs_init(struct amdgpu_device *adev, struct dentry *root);
 
+void amdgpu_mca_bank_set_init(struct mca_bank_set *mca_set);
+int amdgpu_mca_bank_set_add_entry(struct mca_bank_set *mca_set, struct mca_bank_entry *entry);
+void amdgpu_mca_bank_set_release(struct mca_bank_set *mca_set);
+int amdgpu_mca_smu_log_ras_error(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type, struct ras_err_data *err_data);
+
 #endif
index 59f10b353b3ad3ec5a340324c64657cb7facb3a8..9ddbf1494326a0d7e6606f6f25fe06f32333917b 100644 (file)
@@ -557,8 +557,20 @@ static void amdgpu_mes_queue_init_mqd(struct amdgpu_device *adev,
        mqd_prop.hqd_queue_priority = p->hqd_queue_priority;
        mqd_prop.hqd_active = false;
 
+       if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
+           p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
+               mutex_lock(&adev->srbm_mutex);
+               amdgpu_gfx_select_me_pipe_q(adev, p->ring->me, p->ring->pipe, 0, 0, 0);
+       }
+
        mqd_mgr->init_mqd(adev, q->mqd_cpu_ptr, &mqd_prop);
 
+       if (p->queue_type == AMDGPU_RING_TYPE_GFX ||
+           p->queue_type == AMDGPU_RING_TYPE_COMPUTE) {
+               amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0, 0);
+               mutex_unlock(&adev->srbm_mutex);
+       }
+
        amdgpu_bo_unreserve(q->mqd_obj);
 }
 
@@ -994,9 +1006,13 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
        switch (queue_type) {
        case AMDGPU_RING_TYPE_GFX:
                ring->funcs = adev->gfx.gfx_ring[0].funcs;
+               ring->me = adev->gfx.gfx_ring[0].me;
+               ring->pipe = adev->gfx.gfx_ring[0].pipe;
                break;
        case AMDGPU_RING_TYPE_COMPUTE:
                ring->funcs = adev->gfx.compute_ring[0].funcs;
+               ring->me = adev->gfx.compute_ring[0].me;
+               ring->pipe = adev->gfx.compute_ring[0].pipe;
                break;
        case AMDGPU_RING_TYPE_SDMA:
                ring->funcs = adev->sdma.instance[0].ring.funcs;
index 0dcb6c36b02c0baa29c0c98347724ed0970f2c3a..cef920a93924b60140f40fb5020871a3e0c883eb 100644 (file)
@@ -1062,9 +1062,6 @@ static const char * const amdgpu_vram_names[] = {
  */
 int amdgpu_bo_init(struct amdgpu_device *adev)
 {
-       /* set the default AGP aperture state */
-       amdgpu_gmc_set_agp_default(adev, &adev->gmc);
-
        /* On A+A platform, VRAM can be mapped as WB */
        if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) {
                /* reserve PAT memory space to WC for VRAM */
index 648bd5e12830bce18a87afd496fea17fd7bf15de..32b701cc0376d3451f480e50dd0405e142ebedc9 100644 (file)
@@ -2120,6 +2120,21 @@ int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev)
        return ret;
 }
 
+int amdgpu_psp_query_boot_status(struct amdgpu_device *adev)
+{
+       struct psp_context *psp = &adev->psp;
+       int ret = 0;
+
+       if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU))
+               return 0;
+
+       if (psp->funcs &&
+           psp->funcs->query_boot_status)
+               ret = psp->funcs->query_boot_status(psp);
+
+       return ret;
+}
+
 static int psp_hw_start(struct psp_context *psp)
 {
        struct amdgpu_device *adev = psp->adev;
index 7111dd32e66f90e9ce1b55067ba0b4399cd3ba33..5d36ad3f48c74ac298ddec9c27cef9494c19979a 100644 (file)
@@ -134,6 +134,7 @@ struct psp_funcs {
        int (*update_spirom)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
        int (*vbflash_stat)(struct psp_context *psp);
        int (*fatal_error_recovery_quirk)(struct psp_context *psp);
+       int (*query_boot_status)(struct psp_context *psp);
 };
 
 struct ta_funcs {
@@ -537,4 +538,6 @@ int is_psp_fw_valid(struct psp_bin_desc bin);
 
 int amdgpu_psp_wait_for_bootloader(struct amdgpu_device *adev);
 
+int amdgpu_psp_query_boot_status(struct amdgpu_device *adev);
+
 #endif
index 303fbb6a48b66c073dbb4f3a9b0f911c4a939208..84e5987b14e05ecd2c52c9d93635f4d182a34a5f 100644 (file)
@@ -1165,13 +1165,53 @@ static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, s
        }
 }
 
-/* query/inject/cure begin */
-int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
-                                 struct ras_query_if *info)
+static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
+                                               struct ras_query_if *info,
+                                               struct ras_err_data *err_data,
+                                               unsigned int error_query_mode)
 {
+       enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT;
        struct amdgpu_ras_block_object *block_obj = NULL;
+
+       if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY)
+               return -EINVAL;
+
+       if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) {
+               if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
+                       amdgpu_ras_get_ecc_info(adev, err_data);
+               } else {
+                       block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
+                       if (!block_obj || !block_obj->hw_ops) {
+                               dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
+                                            get_ras_block_str(&info->head));
+                               return -EINVAL;
+                       }
+
+                       if (block_obj->hw_ops->query_ras_error_count)
+                               block_obj->hw_ops->query_ras_error_count(adev, &err_data);
+
+                       if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
+                           (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
+                           (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
+                               if (block_obj->hw_ops->query_ras_error_status)
+                                       block_obj->hw_ops->query_ras_error_status(adev);
+                       }
+               }
+       } else {
+               /* FIXME: add code to check return value later */
+               amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_UE, err_data);
+               amdgpu_mca_smu_log_ras_error(adev, blk, AMDGPU_MCA_ERROR_TYPE_CE, err_data);
+       }
+
+       return 0;
+}
+
+/* query/inject/cure begin */
+int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info)
+{
        struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
        struct ras_err_data err_data;
+       unsigned int error_query_mode;
        int ret;
 
        if (!obj)
@@ -1181,27 +1221,14 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
        if (ret)
                return ret;
 
-       if (info->head.block == AMDGPU_RAS_BLOCK__UMC) {
-               amdgpu_ras_get_ecc_info(adev, &err_data);
-       } else {
-               block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0);
-               if (!block_obj || !block_obj->hw_ops)   {
-                       dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
-                                    get_ras_block_str(&info->head));
-                       ret = -EINVAL;
-                       goto out_fini_err_data;
-               }
-
-               if (block_obj->hw_ops->query_ras_error_count)
-                       block_obj->hw_ops->query_ras_error_count(adev, &err_data);
+       if (!amdgpu_ras_get_error_query_mode(adev, &error_query_mode))
+               return -EINVAL;
 
-               if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
-                   (info->head.block == AMDGPU_RAS_BLOCK__GFX) ||
-                   (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) {
-                               if (block_obj->hw_ops->query_ras_error_status)
-                                       block_obj->hw_ops->query_ras_error_status(adev);
-                       }
-       }
+       ret = amdgpu_ras_query_error_status_helper(adev, info,
+                                                  &err_data,
+                                                  error_query_mode);
+       if (ret)
+               goto out_fini_err_data;
 
        amdgpu_rasmgr_error_data_statistic_update(obj, &err_data);
 
@@ -1222,6 +1249,8 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
        struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0);
        struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
        const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
+       struct amdgpu_hive_info *hive;
+       int hive_ras_recovery = 0;
 
        if (!block_obj || !block_obj->hw_ops) {
                dev_dbg_once(adev->dev, "%s doesn't config RAS function\n",
@@ -1229,15 +1258,22 @@ int amdgpu_ras_reset_error_count(struct amdgpu_device *adev,
                return -EOPNOTSUPP;
        }
 
-       /* skip ras error reset in gpu reset */
-       if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery)) &&
-           mca_funcs && mca_funcs->mca_set_debug_mode)
-               return -EOPNOTSUPP;
-
        if (!amdgpu_ras_is_supported(adev, block) ||
            !amdgpu_ras_get_mca_debug_mode(adev))
                return -EOPNOTSUPP;
 
+       hive = amdgpu_get_xgmi_hive(adev);
+       if (hive) {
+               hive_ras_recovery = atomic_read(&hive->ras_recovery);
+               amdgpu_put_xgmi_hive(hive);
+       }
+
+       /* skip ras error reset in gpu reset */
+       if ((amdgpu_in_reset(adev) || atomic_read(&ras->in_recovery) ||
+           hive_ras_recovery) &&
+           mca_funcs && mca_funcs->mca_set_debug_mode)
+               return -EOPNOTSUPP;
+
        if (block_obj->hw_ops->reset_ras_error_count)
                block_obj->hw_ops->reset_ras_error_count(adev);
 
@@ -1528,7 +1564,8 @@ static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
 {
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
 
-       sysfs_remove_file_from_group(&adev->dev->kobj,
+       if (adev->dev->kobj.sd)
+               sysfs_remove_file_from_group(&adev->dev->kobj,
                                &con->badpages_attr.attr,
                                RAS_FS_NAME);
 }
@@ -1547,7 +1584,8 @@ static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev)
                .attrs = attrs,
        };
 
-       sysfs_remove_group(&adev->dev->kobj, &group);
+       if (adev->dev->kobj.sd)
+               sysfs_remove_group(&adev->dev->kobj, &group);
 
        return 0;
 }
@@ -1594,7 +1632,8 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
        if (!obj || !obj->attr_inuse)
                return -EINVAL;
 
-       sysfs_remove_file_from_group(&adev->dev->kobj,
+       if (adev->dev->kobj.sd)
+               sysfs_remove_file_from_group(&adev->dev->kobj,
                                &obj->sysfs_attr.attr,
                                RAS_FS_NAME);
        obj->attr_inuse = 0;
@@ -3388,6 +3427,26 @@ bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev)
                return true;
 }
 
+bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
+                                    unsigned int *error_query_mode)
+{
+       struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+       const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs;
+
+       if (!con) {
+               *error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY;
+               return false;
+       }
+
+       if (mca_funcs && mca_funcs->mca_set_debug_mode)
+               *error_query_mode =
+                       (con->is_mca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY;
+       else
+               *error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY;
+
+       return true;
+}
+
 /* Register each ip ras block into amdgpu ras */
 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
                struct amdgpu_ras_block_object *ras_block_obj)
index 665414c22ca95f48d260e8f25a3ae30c442cab44..19161916ac46b904ee604e5ad23cd2dc6ff77701 100644 (file)
@@ -320,6 +320,12 @@ enum amdgpu_ras_ret {
        AMDGPU_RAS_PT,
 };
 
+enum amdgpu_ras_error_query_mode {
+       AMDGPU_RAS_INVALID_ERROR_QUERY          = 0,
+       AMDGPU_RAS_DIRECT_ERROR_QUERY           = 1,
+       AMDGPU_RAS_FIRMWARE_ERROR_QUERY         = 2,
+};
+
 /* ras error status reisger fields */
 #define ERR_STATUS_LO__ERR_STATUS_VALID_FLAG__SHIFT    0x0
 #define ERR_STATUS_LO__ERR_STATUS_VALID_FLAG_MASK      0x00000001L
@@ -769,6 +775,8 @@ int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_co
 
 void amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable);
 bool amdgpu_ras_get_mca_debug_mode(struct amdgpu_device *adev);
+bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev,
+                                    unsigned int *mode);
 
 int amdgpu_ras_register_ras_block(struct amdgpu_device *adev,
                                struct amdgpu_ras_block_object *ras_block_obj);
index f74347cc087a0d0d0e789da79b70881c15bfd7d9..d65e21914d8c4a9e8f7d03a59391504e08a46f63 100644 (file)
@@ -166,8 +166,12 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev,
                        }
                }
 
-               if (reset)
+               if (reset) {
+                       /* use mode-2 reset for poison consumption */
+                       if (!entry)
+                               con->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE2_RESET;
                        amdgpu_ras_reset_gpu(adev);
+               }
        }
 
        kfree(err_data->err_addr);
index 815b7c34ed33c0e6c4d52ffdace24641b5da083d..65949cc7abb93243aea94860cf87eef2327e849a 100644 (file)
@@ -399,20 +399,20 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
  *
  * @adev: amdgpu_device pointer
  *
+ * Initialize the entity used for handle management in the kernel driver.
  */
-int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
+int amdgpu_uvd_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 {
-       struct amdgpu_ring *ring;
-       struct drm_gpu_scheduler *sched;
-       int r;
+       if (ring == &adev->uvd.inst[0].ring) {
+               struct drm_gpu_scheduler *sched = &ring->sched;
+               int r;
 
-       ring = &adev->uvd.inst[0].ring;
-       sched = &ring->sched;
-       r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL,
-                                 &sched, 1, NULL);
-       if (r) {
-               DRM_ERROR("Failed setting up UVD kernel entity.\n");
-               return r;
+               r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL,
+                                         &sched, 1, NULL);
+               if (r) {
+                       DRM_ERROR("Failed setting up UVD kernel entity.\n");
+                       return r;
+               }
        }
 
        return 0;
index a9f342537c6834e8dea2af8566073690b2ab7bb6..9dfad2f48ef40cac6be41a35698a28dbf1e96875 100644 (file)
@@ -73,7 +73,7 @@ struct amdgpu_uvd {
 
 int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
 int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
-int amdgpu_uvd_entity_init(struct amdgpu_device *adev);
+int amdgpu_uvd_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring);
 int amdgpu_uvd_prepare_suspend(struct amdgpu_device *adev);
 int amdgpu_uvd_suspend(struct amdgpu_device *adev);
 int amdgpu_uvd_resume(struct amdgpu_device *adev);
index 1904edf684071675cf5f10e6c354ba69d20f7afe..0954447f689d9e5477c67a1bf71fb9f53cde063b 100644 (file)
@@ -231,20 +231,20 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
  *
  * @adev: amdgpu_device pointer
  *
+ * Initialize the entity used for handle management in the kernel driver.
  */
-int amdgpu_vce_entity_init(struct amdgpu_device *adev)
+int amdgpu_vce_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring)
 {
-       struct amdgpu_ring *ring;
-       struct drm_gpu_scheduler *sched;
-       int r;
-
-       ring = &adev->vce.ring[0];
-       sched = &ring->sched;
-       r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL,
-                                 &sched, 1, NULL);
-       if (r != 0) {
-               DRM_ERROR("Failed setting up VCE run queue.\n");
-               return r;
+       if (ring == &adev->vce.ring[0]) {
+               struct drm_gpu_scheduler *sched = &ring->sched;
+               int r;
+
+               r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL,
+                                         &sched, 1, NULL);
+               if (r != 0) {
+                       DRM_ERROR("Failed setting up VCE run queue.\n");
+                       return r;
+               }
        }
 
        return 0;
index ea680fc9a6c356aea9e5f7123cbd4bcba452c369..6e53f872d084add51b675a85f72beacf9ba34294 100644 (file)
@@ -55,7 +55,7 @@ struct amdgpu_vce {
 
 int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size);
 int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
-int amdgpu_vce_entity_init(struct amdgpu_device *adev);
+int amdgpu_vce_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring);
 int amdgpu_vce_suspend(struct amdgpu_device *adev);
 int amdgpu_vce_resume(struct amdgpu_device *adev);
 void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp);
index a0aa624f5a923c1b5abfa18107adeba22ca144e9..3a632c3b1a2cdc4a9d0fce475199c6b034795f42 100644 (file)
@@ -73,9 +73,10 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
 
 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
                                        uint32_t reg0, uint32_t reg1,
-                                       uint32_t ref, uint32_t mask)
+                                       uint32_t ref, uint32_t mask,
+                                       uint32_t xcc_inst)
 {
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_inst];
        struct amdgpu_ring *ring = &kiq->ring;
        signed long r, cnt = 0;
        unsigned long flags;
@@ -942,7 +943,7 @@ void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev,
        }
 }
 
-static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
+bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
                                                 u32 acc_flags, u32 hwip,
                                                 bool write, u32 *rlcg_flag)
 {
@@ -975,7 +976,7 @@ static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
        return ret;
 }
 
-static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id)
+u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id)
 {
        struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl;
        uint32_t timeout = 50000;
@@ -1093,3 +1094,13 @@ u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
        else
                return RREG32(offset);
 }
+
+bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev)
+{
+       bool xnack_mode = true;
+
+       if (amdgpu_sriov_vf(adev) && adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
+               xnack_mode = false;
+
+       return xnack_mode;
+}
index 858ef21ae51596f27291e7a75fe16c0ed262713a..d4207e44141f185bbcd28e28d638b084fda09ec9 100644 (file)
@@ -334,7 +334,8 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
 void amdgpu_virt_init_setting(struct amdgpu_device *adev);
 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
                                        uint32_t reg0, uint32_t rreg1,
-                                       uint32_t ref, uint32_t mask);
+                                       uint32_t ref, uint32_t mask,
+                                       uint32_t xcc_inst);
 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
@@ -365,4 +366,9 @@ u32 amdgpu_sriov_rreg(struct amdgpu_device *adev,
 bool amdgpu_virt_fw_load_skip_check(struct amdgpu_device *adev,
                        uint32_t ucode_id);
 void amdgpu_virt_post_reset(struct amdgpu_device *adev);
+bool amdgpu_sriov_xnack_support(struct amdgpu_device *adev);
+bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev,
+                                         u32 acc_flags, u32 hwip,
+                                         bool write, u32 *rlcg_flag);
+u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag, u32 xcc_id);
 #endif
index 3cd5977c0709a66634991714a43def35b3e962d4..d1b8afd105c9f6a8e56288f7b3c023171f7b2f75 100644 (file)
@@ -1098,8 +1098,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
                                bo = gem_to_amdgpu_bo(gobj);
                }
                mem = bo->tbo.resource;
-               if (mem->mem_type == TTM_PL_TT ||
-                   mem->mem_type == AMDGPU_PL_PREEMPT)
+               if (mem && (mem->mem_type == TTM_PL_TT ||
+                           mem->mem_type == AMDGPU_PL_PREEMPT))
                        pages_addr = bo->tbo.ttm->dma_address;
        }
 
@@ -1373,6 +1373,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
  *
  * @adev: amdgpu_device pointer
  * @vm: requested vm
+ * @ticket: optional reservation ticket used to reserve the VM
  *
  * Make sure all BOs which are moved are updated in the PTs.
  *
@@ -1382,11 +1383,12 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
  * PTs have to be reserved!
  */
 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
-                          struct amdgpu_vm *vm)
+                          struct amdgpu_vm *vm,
+                          struct ww_acquire_ctx *ticket)
 {
        struct amdgpu_bo_va *bo_va;
        struct dma_resv *resv;
-       bool clear;
+       bool clear, unlock;
        int r;
 
        spin_lock(&vm->status_lock);
@@ -1409,17 +1411,24 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
                spin_unlock(&vm->status_lock);
 
                /* Try to reserve the BO to avoid clearing its ptes */
-               if (!adev->debug_vm && dma_resv_trylock(resv))
+               if (!adev->debug_vm && dma_resv_trylock(resv)) {
                        clear = false;
+                       unlock = true;
+               /* The caller is already holding the reservation lock */
+               } else if (ticket && dma_resv_locking_ctx(resv) == ticket) {
+                       clear = false;
+                       unlock = false;
                /* Somebody else is using the BO right now */
-               else
+               } else {
                        clear = true;
+                       unlock = false;
+               }
 
                r = amdgpu_vm_bo_update(adev, bo_va, clear);
                if (r)
                        return r;
 
-               if (!clear)
+               if (unlock)
                        dma_resv_unlock(resv);
                spin_lock(&vm->status_lock);
        }
@@ -2130,7 +2139,8 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
  * Returns:
  * 0 for success, error for failure.
  */
-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id)
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+                  int32_t xcp_id)
 {
        struct amdgpu_bo *root_bo;
        struct amdgpu_bo_vm *root;
@@ -2149,6 +2159,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp
        INIT_LIST_HEAD(&vm->done);
        INIT_LIST_HEAD(&vm->pt_freed);
        INIT_WORK(&vm->pt_free_work, amdgpu_vm_pt_free_work);
+       INIT_KFIFO(vm->faults);
 
        r = amdgpu_vm_init_entities(adev, vm);
        if (r)
@@ -2183,34 +2194,33 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp
                                false, &root, xcp_id);
        if (r)
                goto error_free_delayed;
-       root_bo = &root->bo;
+
+       root_bo = amdgpu_bo_ref(&root->bo);
        r = amdgpu_bo_reserve(root_bo, true);
-       if (r)
-               goto error_free_root;
+       if (r) {
+               amdgpu_bo_unref(&root->shadow);
+               amdgpu_bo_unref(&root_bo);
+               goto error_free_delayed;
+       }
 
+       amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
        r = dma_resv_reserve_fences(root_bo->tbo.base.resv, 1);
        if (r)
-               goto error_unreserve;
-
-       amdgpu_vm_bo_base_init(&vm->root, vm, root_bo);
+               goto error_free_root;
 
        r = amdgpu_vm_pt_clear(adev, vm, root, false);
        if (r)
-               goto error_unreserve;
+               goto error_free_root;
 
        amdgpu_bo_unreserve(vm->root.bo);
-
-       INIT_KFIFO(vm->faults);
+       amdgpu_bo_unref(&root_bo);
 
        return 0;
 
-error_unreserve:
-       amdgpu_bo_unreserve(vm->root.bo);
-
 error_free_root:
-       amdgpu_bo_unref(&root->shadow);
+       amdgpu_vm_pt_free_root(adev, vm);
+       amdgpu_bo_unreserve(vm->root.bo);
        amdgpu_bo_unref(&root_bo);
-       vm->root.bo = NULL;
 
 error_free_delayed:
        dma_fence_put(vm->last_tlb_flush);
index 9c7b5d33b56e9368acd3c55ce2e702400df24c9f..2cd86d2bf73f7af67fd78be273cd57fc68521e11 100644 (file)
@@ -443,7 +443,8 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
                          struct amdgpu_vm *vm,
                          struct dma_fence **fence);
 int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
-                          struct amdgpu_vm *vm);
+                          struct amdgpu_vm *vm,
+                          struct ww_acquire_ctx *ticket);
 void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
                            struct amdgpu_vm *vm, struct amdgpu_bo *bo);
 int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,
index 18f58efc9dc7b2dae32c360c8549a758af9e1f20..08916538a615ff3d072eb5241a97495795c7e32a 100644 (file)
@@ -77,7 +77,16 @@ static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
        return true;
 }
 
+static inline u64 amdgpu_vram_mgr_blocks_size(struct list_head *head)
+{
+       struct drm_buddy_block *block;
+       u64 size = 0;
 
+       list_for_each_entry(block, head, link)
+               size += amdgpu_vram_mgr_block_size(block);
+
+       return size;
+}
 
 /**
  * DOC: mem_info_vram_total
@@ -516,6 +525,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
        mutex_unlock(&mgr->lock);
 
        vres->base.start = 0;
+       size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks),
+                    vres->base.size);
        list_for_each_entry(block, &vres->blocks, link) {
                unsigned long start;
 
@@ -523,8 +534,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
                        amdgpu_vram_mgr_block_size(block);
                start >>= PAGE_SHIFT;
 
-               if (start > PFN_UP(vres->base.size))
-                       start -= PFN_UP(vres->base.size);
+               if (start > PFN_UP(size))
+                       start -= PFN_UP(size);
                else
                        start = 0;
                vres->base.start = max(vres->base.start, start);
index 9d5d742ee9d366b0a9c068ae7ca842717bbf1e48..bd20cb3b981984035285b028f535a43002e37ec3 100644 (file)
@@ -103,6 +103,53 @@ static const int walf_pcs_err_noncorrectable_mask_reg_aldebaran[] = {
        smnPCS_GOPX1_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000
 };
 
+static const int xgmi3x16_pcs_err_status_reg_v6_4[] = {
+       smnPCS_XGMI3X16_PCS_ERROR_STATUS,
+       smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x100000
+};
+
+static const int xgmi3x16_pcs_err_noncorrectable_mask_reg_v6_4[] = {
+       smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK,
+       smnPCS_XGMI3X16_PCS_ERROR_NONCORRECTABLE_MASK + 0x100000
+};
+
+static const u64 xgmi_v6_4_0_mca_base_array[] = {
+       0x11a09200,
+       0x11b09200,
+};
+
+static const char *xgmi_v6_4_0_ras_error_code_ext[32] = {
+       [0x00] = "XGMI PCS DataLossErr",
+       [0x01] = "XGMI PCS TrainingErr",
+       [0x02] = "XGMI PCS FlowCtrlAckErr",
+       [0x03] = "XGMI PCS RxFifoUnderflowErr",
+       [0x04] = "XGMI PCS RxFifoOverflowErr",
+       [0x05] = "XGMI PCS CRCErr",
+       [0x06] = "XGMI PCS BERExceededErr",
+       [0x07] = "XGMI PCS TxMetaDataErr",
+       [0x08] = "XGMI PCS ReplayBufParityErr",
+       [0x09] = "XGMI PCS DataParityErr",
+       [0x0a] = "XGMI PCS ReplayFifoOverflowErr",
+       [0x0b] = "XGMI PCS ReplayFifoUnderflowErr",
+       [0x0c] = "XGMI PCS ElasticFifoOverflowErr",
+       [0x0d] = "XGMI PCS DeskewErr",
+       [0x0e] = "XGMI PCS FlowCtrlCRCErr",
+       [0x0f] = "XGMI PCS DataStartupLimitErr",
+       [0x10] = "XGMI PCS FCInitTimeoutErr",
+       [0x11] = "XGMI PCS RecoveryTimeoutErr",
+       [0x12] = "XGMI PCS ReadySerialTimeoutErr",
+       [0x13] = "XGMI PCS ReadySerialAttemptErr",
+       [0x14] = "XGMI PCS RecoveryAttemptErr",
+       [0x15] = "XGMI PCS RecoveryRelockAttemptErr",
+       [0x16] = "XGMI PCS ReplayAttemptErr",
+       [0x17] = "XGMI PCS SyncHdrErr",
+       [0x18] = "XGMI PCS TxReplayTimeoutErr",
+       [0x19] = "XGMI PCS RxReplayTimeoutErr",
+       [0x1a] = "XGMI PCS LinkSubTxTimeoutErr",
+       [0x1b] = "XGMI PCS LinkSubRxTimeoutErr",
+       [0x1c] = "XGMI PCS RxCMDPktErr",
+};
+
 static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
        {"XGMI PCS DataLossErr",
         SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
@@ -926,7 +973,7 @@ static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg
        WREG32_PCIE(pcs_status_reg, 0);
 }
 
-static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
+static void amdgpu_xgmi_legacy_reset_ras_error_count(struct amdgpu_device *adev)
 {
        uint32_t i;
 
@@ -952,6 +999,49 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
        default:
                break;
        }
+
+       switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+       case IP_VERSION(6, 4, 0):
+               for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_v6_4); i++)
+                       pcs_clear_status(adev,
+                                       xgmi3x16_pcs_err_status_reg_v6_4[i]);
+               break;
+       default:
+               break;
+       }
+}
+
+static void __xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst, u64 mca_base)
+{
+       WREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS, 0ULL);
+}
+
+static void xgmi_v6_4_0_reset_error_count(struct amdgpu_device *adev, int xgmi_inst)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(xgmi_v6_4_0_mca_base_array); i++)
+               __xgmi_v6_4_0_reset_error_count(adev, xgmi_inst, xgmi_v6_4_0_mca_base_array[i]);
+}
+
+static void xgmi_v6_4_0_reset_ras_error_count(struct amdgpu_device *adev)
+{
+       int i;
+
+       for_each_inst(i, adev->aid_mask)
+               xgmi_v6_4_0_reset_error_count(adev, i);
+}
+
+static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
+{
+       switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+       case IP_VERSION(6, 4, 0):
+               xgmi_v6_4_0_reset_ras_error_count(adev);
+               break;
+       default:
+               amdgpu_xgmi_legacy_reset_ras_error_count(adev);
+               break;
+       }
 }
 
 static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
@@ -969,7 +1059,9 @@ static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
 
        if (is_xgmi_pcs) {
                if (amdgpu_ip_version(adev, XGMI_HWIP, 0) ==
-                   IP_VERSION(6, 1, 0)) {
+                   IP_VERSION(6, 1, 0) ||
+                   amdgpu_ip_version(adev, XGMI_HWIP, 0) ==
+                   IP_VERSION(6, 4, 0)) {
                        pcs_ras_fields = &xgmi3x16_pcs_ras_fields[0];
                        field_array_size = ARRAY_SIZE(xgmi3x16_pcs_ras_fields);
                } else {
@@ -1003,11 +1095,11 @@ static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
        return 0;
 }
 
-static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
-                                            void *ras_error_status)
+static void amdgpu_xgmi_legacy_query_ras_error_count(struct amdgpu_device *adev,
+                                                    void *ras_error_status)
 {
        struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
-       int i;
+       int i, supported = 1;
        uint32_t data, mask_data = 0;
        uint32_t ue_cnt = 0, ce_cnt = 0;
 
@@ -1071,7 +1163,25 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
                }
                break;
        default:
-               dev_warn(adev->dev, "XGMI RAS error query not supported");
+               supported = 0;
+               break;
+       }
+
+       switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+       case IP_VERSION(6, 4, 0):
+               /* check xgmi3x16 pcs error */
+               for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_v6_4); i++) {
+                       data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_v6_4[i]);
+                       mask_data =
+                               RREG32_PCIE(xgmi3x16_pcs_err_noncorrectable_mask_reg_v6_4[i]);
+                       if (data)
+                               amdgpu_xgmi_query_pcs_error_status(adev, data,
+                                               mask_data, &ue_cnt, &ce_cnt, true, true);
+               }
+               break;
+       default:
+               if (!supported)
+                       dev_warn(adev->dev, "XGMI RAS error query not supported");
                break;
        }
 
@@ -1081,32 +1191,116 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
        err_data->ce_count += ce_cnt;
 }
 
+static enum amdgpu_mca_error_type xgmi_v6_4_0_pcs_mca_get_error_type(struct amdgpu_device *adev, u64 status)
+{
+       const char *error_str;
+       int ext_error_code;
+
+       ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(status);
+
+       error_str = ext_error_code < ARRAY_SIZE(xgmi_v6_4_0_ras_error_code_ext) ?
+               xgmi_v6_4_0_ras_error_code_ext[ext_error_code] : NULL;
+       if (error_str)
+               dev_info(adev->dev, "%s detected\n", error_str);
+
+       switch (ext_error_code) {
+       case 0:
+               return AMDGPU_MCA_ERROR_TYPE_UE;
+       case 6:
+               return AMDGPU_MCA_ERROR_TYPE_CE;
+       default:
+               return -EINVAL;
+       }
+
+       return -EINVAL;
+}
+
+static void __xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, struct amdgpu_smuio_mcm_config_info *mcm_info,
+                                           u64 mca_base, struct ras_err_data *err_data)
+{
+       int xgmi_inst = mcm_info->die_id;
+       u64 status = 0;
+
+       status = RREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS);
+       if (!MCA_REG__STATUS__VAL(status))
+               return;
+
+       switch (xgmi_v6_4_0_pcs_mca_get_error_type(adev, status)) {
+       case AMDGPU_MCA_ERROR_TYPE_UE:
+               amdgpu_ras_error_statistic_ue_count(err_data, mcm_info, 1ULL);
+               break;
+       case AMDGPU_MCA_ERROR_TYPE_CE:
+               amdgpu_ras_error_statistic_ce_count(err_data, mcm_info, 1ULL);
+               break;
+       default:
+               break;
+       }
+
+       WREG64_MCA(xgmi_inst, mca_base, MCA_REG_IDX_STATUS, 0ULL);
+}
+
+static void xgmi_v6_4_0_query_error_count(struct amdgpu_device *adev, int xgmi_inst, struct ras_err_data *err_data)
+{
+       struct amdgpu_smuio_mcm_config_info mcm_info = {
+               .socket_id = adev->smuio.funcs->get_socket_id(adev),
+               .die_id = xgmi_inst,
+       };
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(xgmi_v6_4_0_mca_base_array); i++)
+               __xgmi_v6_4_0_query_error_count(adev, &mcm_info, xgmi_v6_4_0_mca_base_array[i], err_data);
+}
+
+static void xgmi_v6_4_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status)
+{
+       struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+       int i;
+
+       for_each_inst(i, adev->aid_mask)
+               xgmi_v6_4_0_query_error_count(adev, i, err_data);
+}
+
+static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
+                                             void *ras_error_status)
+{
+       switch (amdgpu_ip_version(adev, XGMI_HWIP, 0)) {
+       case IP_VERSION(6, 4, 0):
+               xgmi_v6_4_0_query_ras_error_count(adev, ras_error_status);
+               break;
+       default:
+               amdgpu_xgmi_legacy_query_ras_error_count(adev, ras_error_status);
+               break;
+       }
+}
+
 /* Trigger XGMI/WAFL error */
 static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
                        void *inject_if, uint32_t instance_mask)
 {
-       int ret = 0;
+       int ret1, ret2;
        struct ta_ras_trigger_error_input *block_info =
                                (struct ta_ras_trigger_error_input *)inject_if;
 
        if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
                dev_warn(adev->dev, "Failed to disallow df cstate");
 
-       if (amdgpu_dpm_set_xgmi_plpd_mode(adev, XGMI_PLPD_DISALLOW))
+       ret1 = amdgpu_dpm_set_xgmi_plpd_mode(adev, XGMI_PLPD_DISALLOW);
+       if (ret1 && ret1 != -EOPNOTSUPP)
                dev_warn(adev->dev, "Failed to disallow XGMI power down");
 
-       ret = psp_ras_trigger_error(&adev->psp, block_info, instance_mask);
+       ret2 = psp_ras_trigger_error(&adev->psp, block_info, instance_mask);
 
        if (amdgpu_ras_intr_triggered())
-               return ret;
+               return ret2;
 
-       if (amdgpu_dpm_set_xgmi_plpd_mode(adev, XGMI_PLPD_DEFAULT))
+       ret1 = amdgpu_dpm_set_xgmi_plpd_mode(adev, XGMI_PLPD_DEFAULT);
+       if (ret1 && ret1 != -EOPNOTSUPP)
                dev_warn(adev->dev, "Failed to allow XGMI power down");
 
        if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
                dev_warn(adev->dev, "Failed to allow df cstate");
 
-       return ret;
+       return ret2;
 }
 
 struct amdgpu_ras_block_hw_ops  xgmi_ras_hw_ops = {
index d9ccacd06fba87906dc4e625b19463bc7856d209..c8a3bf01743f6381ec218077bcb42790f5a7b741 100644 (file)
@@ -3498,6 +3498,8 @@ static void gfx_v10_0_ring_invalidate_tlbs(struct amdgpu_ring *ring,
 static void gfx_v10_0_update_spm_vmid_internal(struct amdgpu_device *adev,
                                               unsigned int vmid);
 
+static int gfx_v10_0_set_powergating_state(void *handle,
+                                         enum amd_powergating_state state);
 static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
 {
        amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
@@ -6465,11 +6467,18 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
                nv_grbm_select(adev, 0, 0, 0, 0);
                mutex_unlock(&adev->srbm_mutex);
                if (adev->gfx.me.mqd_backup[mqd_idx])
-                       memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+                       memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
        } else {
+               mutex_lock(&adev->srbm_mutex);
+               nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+               if (ring->doorbell_index == adev->doorbell_index.gfx_ring0 << 1)
+                       gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
+
+               nv_grbm_select(adev, 0, 0, 0, 0);
+               mutex_unlock(&adev->srbm_mutex);
                /* restore mqd with the backup copy */
                if (adev->gfx.me.mqd_backup[mqd_idx])
-                       memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
+                       memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
                /* reset the ring */
                ring->wptr = 0;
                *ring->wptr_cpu_addr = 0;
@@ -6743,7 +6752,7 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
        if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
                /* reset MQD to a clean status */
                if (adev->gfx.kiq[0].mqd_backup)
-                       memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
+                       memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
 
                /* reset ring buffer */
                ring->wptr = 0;
@@ -6766,7 +6775,7 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
                mutex_unlock(&adev->srbm_mutex);
 
                if (adev->gfx.kiq[0].mqd_backup)
-                       memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
+                       memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
        }
 
        return 0;
@@ -6787,11 +6796,11 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
                mutex_unlock(&adev->srbm_mutex);
 
                if (adev->gfx.mec.mqd_backup[mqd_idx])
-                       memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+                       memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
        } else {
                /* restore MQD to a clean status */
                if (adev->gfx.mec.mqd_backup[mqd_idx])
-                       memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
+                       memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
                /* reset ring buffer */
                ring->wptr = 0;
                atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
@@ -7172,6 +7181,13 @@ static int gfx_v10_0_hw_fini(void *handle)
        amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
        amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
 
+       /* WA added for Vangogh asic fixing the SMU suspend failure
+        * It needs to set power gating again during gfxoff control
+        * otherwise the gfxoff disallowing will be failed to set.
+        */
+       if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 1))
+               gfx_v10_0_set_powergating_state(handle, AMD_PG_STATE_UNGATE);
+
        if (!adev->no_hw_access) {
                if (amdgpu_async_gfx_ring) {
                        if (amdgpu_gfx_disable_kgq(adev, 0))
index fd22943685f7d56c8da49220dddc0b92f6b3a6f8..0c6133cc5e5780b55e48b26b21e23df3fe11ebd6 100644 (file)
@@ -155,6 +155,7 @@ static void gfx11_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue
 {
        amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_RESOURCES, 6));
        amdgpu_ring_write(kiq_ring, PACKET3_SET_RESOURCES_VMID_MASK(0) |
+                         PACKET3_SET_RESOURCES_UNMAP_LATENTY(0xa) | /* unmap_latency: 0xa (~ 1s) */
                          PACKET3_SET_RESOURCES_QUEUE_TYPE(0)); /* vmid_mask:0 queue_type:0 (KIQ) */
        amdgpu_ring_write(kiq_ring, lower_32_bits(queue_mask)); /* queue mask lo */
        amdgpu_ring_write(kiq_ring, upper_32_bits(queue_mask)); /* queue mask hi */
@@ -3714,11 +3715,11 @@ static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring)
                soc21_grbm_select(adev, 0, 0, 0, 0);
                mutex_unlock(&adev->srbm_mutex);
                if (adev->gfx.me.mqd_backup[mqd_idx])
-                       memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+                       memcpy_fromio(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
        } else {
                /* restore mqd with the backup copy */
                if (adev->gfx.me.mqd_backup[mqd_idx])
-                       memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
+                       memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd));
                /* reset the ring */
                ring->wptr = 0;
                *ring->wptr_cpu_addr = 0;
@@ -4007,7 +4008,7 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
        if (amdgpu_in_reset(adev)) { /* for GPU_RESET case */
                /* reset MQD to a clean status */
                if (adev->gfx.kiq[0].mqd_backup)
-                       memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
+                       memcpy_toio(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(*mqd));
 
                /* reset ring buffer */
                ring->wptr = 0;
@@ -4030,7 +4031,7 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
                mutex_unlock(&adev->srbm_mutex);
 
                if (adev->gfx.kiq[0].mqd_backup)
-                       memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
+                       memcpy_fromio(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(*mqd));
        }
 
        return 0;
@@ -4051,11 +4052,11 @@ static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring)
                mutex_unlock(&adev->srbm_mutex);
 
                if (adev->gfx.mec.mqd_backup[mqd_idx])
-                       memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
+                       memcpy_fromio(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
        } else {
                /* restore MQD to a clean status */
                if (adev->gfx.mec.mqd_backup[mqd_idx])
-                       memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
+                       memcpy_toio(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
                /* reset ring buffer */
                ring->wptr = 0;
                atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0);
index 41bbabd9ad4db5c9324055d53b5765b92a8cafdf..40d06d32bb745dca44bad1cc0055c28f89483c72 100644 (file)
@@ -1102,6 +1102,7 @@ static void gfx_v9_4_3_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev)
                reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regGRBM_GFX_INDEX);
                reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, GET_INST(GC, xcc_id), regRLC_SPARE_INT);
        }
+       adev->gfx.rlc.rlcg_reg_access_supported = true;
 }
 
 static int gfx_v9_4_3_rlc_init(struct amdgpu_device *adev)
@@ -2738,16 +2739,16 @@ static void gfx_v9_4_3_xcc_set_compute_eop_interrupt_state(
 
        switch (state) {
        case AMDGPU_IRQ_STATE_DISABLE:
-               mec_int_cntl = RREG32(mec_int_cntl_reg);
+               mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
                mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
                                             TIME_STAMP_INT_ENABLE, 0);
-               WREG32(mec_int_cntl_reg, mec_int_cntl);
+               WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
                break;
        case AMDGPU_IRQ_STATE_ENABLE:
-               mec_int_cntl = RREG32(mec_int_cntl_reg);
+               mec_int_cntl = RREG32_XCC(mec_int_cntl_reg, xcc_id);
                mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
                                             TIME_STAMP_INT_ENABLE, 1);
-               WREG32(mec_int_cntl_reg, mec_int_cntl);
+               WREG32_XCC(mec_int_cntl_reg, mec_int_cntl, xcc_id);
                break;
        default:
                break;
@@ -3799,6 +3800,27 @@ static void gfx_v9_4_3_inst_query_ras_err_count(struct amdgpu_device *adev,
                }
        }
 
+       /* handle extra register entries of UE */
+       for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) {
+               for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) {
+                       for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) {
+                               /* no need to select if instance number is 1 */
+                               if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 ||
+                                       gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1)
+                                       gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
+
+                               amdgpu_ras_inst_query_ras_error_count(adev,
+                                       &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
+                                       1,
+                                       gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].mem_id_ent,
+                                       gfx_v9_4_3_ras_mem_list_array[gfx_v9_4_3_ue_reg_list[i].mem_id_type].size,
+                                       GET_INST(GC, xcc_id),
+                                       AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
+                                       &ue_count);
+                       }
+               }
+       }
+
        gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
                        xcc_id);
        mutex_unlock(&adev->grbm_idx_mutex);
@@ -3838,6 +3860,23 @@ static void gfx_v9_4_3_inst_reset_ras_err_count(struct amdgpu_device *adev,
                }
        }
 
+       /* handle extra register entries of UE */
+       for (; i < ARRAY_SIZE(gfx_v9_4_3_ue_reg_list); i++) {
+               for (j = 0; j < gfx_v9_4_3_ue_reg_list[i].se_num; j++) {
+                       for (k = 0; k < gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst; k++) {
+                               /* no need to select if instance number is 1 */
+                               if (gfx_v9_4_3_ue_reg_list[i].se_num > 1 ||
+                                       gfx_v9_4_3_ue_reg_list[i].reg_entry.reg_inst > 1)
+                                       gfx_v9_4_3_xcc_select_se_sh(adev, j, 0, k, xcc_id);
+
+                               amdgpu_ras_inst_reset_ras_error_count(adev,
+                                       &(gfx_v9_4_3_ue_reg_list[i].reg_entry),
+                                       1,
+                                       GET_INST(GC, xcc_id));
+                       }
+               }
+       }
+
        gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
                        xcc_id);
        mutex_unlock(&adev->grbm_idx_mutex);
@@ -4300,7 +4339,7 @@ const struct amdgpu_ip_block_version gfx_v9_4_3_ip_block = {
        .type = AMD_IP_BLOCK_TYPE_GFX,
        .major = 9,
        .minor = 4,
-       .rev = 0,
+       .rev = 3,
        .funcs = &gfx_v9_4_3_ip_funcs,
 };
 
index d8a4fddab9c1d074855c6f8bf52def44cb254a38..0ec7b061d7c2035ac21a1a8b9c858de58d126396 100644 (file)
@@ -268,7 +268,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
        if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes &&
            (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
                amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
-                               1 << vmid);
+                               1 << vmid, GET_INST(GC, 0));
                return;
        }
 
@@ -672,6 +672,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
        /* add the xgmi offset of the physical node */
        base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
 
+       amdgpu_gmc_set_agp_default(adev, mc);
        amdgpu_gmc_vram_location(adev, &adev->gmc, base);
        amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
        if (!amdgpu_sriov_vf(adev))
index 4713a62ad586f9a54a283030f3bca0a8259c8ed4..6dce9b29f675631c2049d1f2ef50b5ea64bff7fc 100644 (file)
@@ -229,7 +229,7 @@ static void gmc_v11_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
        if ((adev->gfx.kiq[0].ring.sched.ready || adev->mes.ring.sched.ready) &&
            (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
                amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
-                               1 << vmid);
+                               1 << vmid, GET_INST(GC, 0));
                return;
        }
 
@@ -637,6 +637,7 @@ static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev,
 
        base = adev->mmhub.funcs->get_fb_location(adev);
 
+       amdgpu_gmc_set_agp_default(adev, mc);
        amdgpu_gmc_vram_location(adev, &adev->gmc, base);
        amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_HIGH);
        if (!amdgpu_sriov_vf(adev) ||
index 7f66954fd3027c2c1a683ecfc67bf5343129fc5c..42e103d7077d52d5bbe556f70f2b03bb0d5ae8db 100644 (file)
@@ -211,6 +211,7 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
 
        base <<= 24;
 
+       amdgpu_gmc_set_agp_default(adev, mc);
        amdgpu_gmc_vram_location(adev, mc, base);
        amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
 }
index 61ca1a82b651aa5f603f3a8bfeea2ad70390fd61..efc16e580f1e27e384b7c80323c72d0e59fba473 100644 (file)
@@ -239,6 +239,7 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
 
        base <<= 24;
 
+       amdgpu_gmc_set_agp_default(adev, mc);
        amdgpu_gmc_vram_location(adev, mc, base);
        amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
 }
index fa59749c2aefa63c91d1b82c8339890725a6a5ae..ff4ae73d27ecd26aaf399bdfe158e22c1de3009f 100644 (file)
@@ -413,6 +413,7 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
                base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
        base <<= 24;
 
+       amdgpu_gmc_set_agp_default(adev, mc);
        amdgpu_gmc_vram_location(adev, mc, base);
        amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
 }
index b66c5f7e1c56572e183f3aed4ffd105526c26f98..bde25eb4ed8e2cb1e3f0adf62897cfab076db7db 100644 (file)
@@ -817,7 +817,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
                                        uint32_t vmhub, uint32_t flush_type)
 {
        bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
-       u32 j, inv_req, tmp, sem, req, ack;
+       u32 j, inv_req, tmp, sem, req, ack, inst;
        const unsigned int eng = 17;
        struct amdgpu_vmhub *hub;
 
@@ -832,13 +832,17 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
        /* This is necessary for a HW workaround under SRIOV as well
         * as GFXOFF under bare metal
         */
-       if (adev->gfx.kiq[0].ring.sched.ready &&
+       if (vmhub >= AMDGPU_MMHUB0(0))
+               inst = GET_INST(GC, 0);
+       else
+               inst = vmhub;
+       if (adev->gfx.kiq[inst].ring.sched.ready &&
            (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
                uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
                uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
 
                amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
-                                                  1 << vmid);
+                                                  1 << vmid, inst);
                return;
        }
 
@@ -856,9 +860,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
                for (j = 0; j < adev->usec_timeout; j++) {
                        /* a read return value of 1 means semaphore acquire */
                        if (vmhub >= AMDGPU_MMHUB0(0))
-                               tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, sem);
+                               tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, sem, inst);
                        else
-                               tmp = RREG32_SOC15_IP_NO_KIQ(GC, sem);
+                               tmp = RREG32_SOC15_IP_NO_KIQ(GC, sem, inst);
                        if (tmp & 0x1)
                                break;
                        udelay(1);
@@ -869,9 +873,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
        }
 
        if (vmhub >= AMDGPU_MMHUB0(0))
-               WREG32_SOC15_IP_NO_KIQ(MMHUB, req, inv_req);
+               WREG32_SOC15_IP_NO_KIQ(MMHUB, req, inv_req, inst);
        else
-               WREG32_SOC15_IP_NO_KIQ(GC, req, inv_req);
+               WREG32_SOC15_IP_NO_KIQ(GC, req, inv_req, inst);
 
        /*
         * Issue a dummy read to wait for the ACK register to
@@ -884,9 +888,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 
        for (j = 0; j < adev->usec_timeout; j++) {
                if (vmhub >= AMDGPU_MMHUB0(0))
-                       tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, ack);
+                       tmp = RREG32_SOC15_IP_NO_KIQ(MMHUB, ack, inst);
                else
-                       tmp = RREG32_SOC15_IP_NO_KIQ(GC, ack);
+                       tmp = RREG32_SOC15_IP_NO_KIQ(GC, ack, inst);
                if (tmp & (1 << vmid))
                        break;
                udelay(1);
@@ -899,9 +903,9 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
                 * write with 0 means semaphore release
                 */
                if (vmhub >= AMDGPU_MMHUB0(0))
-                       WREG32_SOC15_IP_NO_KIQ(MMHUB, sem, 0);
+                       WREG32_SOC15_IP_NO_KIQ(MMHUB, sem, 0, inst);
                else
-                       WREG32_SOC15_IP_NO_KIQ(GC, sem, 0);
+                       WREG32_SOC15_IP_NO_KIQ(GC, sem, 0, inst);
        }
 
        spin_unlock(&adev->gmc.invalidate_lock);
@@ -1176,7 +1180,10 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev,
                if (uncached) {
                        mtype = MTYPE_UC;
                } else if (ext_coherent) {
-                       mtype = is_local ? MTYPE_CC : MTYPE_UC;
+                       if (adev->rev_id)
+                               mtype = is_local ? MTYPE_CC : MTYPE_UC;
+                       else
+                               mtype = MTYPE_UC;
                } else if (adev->flags & AMD_IS_APU) {
                        mtype = is_local ? mtype_local : MTYPE_NC;
                } else {
@@ -1297,7 +1304,7 @@ static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev,
 
                        *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
                                 AMDGPU_PTE_MTYPE_VG10(mtype_local);
-               } else {
+               } else if (adev->rev_id) {
                        /* MTYPE_UC case */
                        *flags = (*flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) |
                                 AMDGPU_PTE_MTYPE_VG10(MTYPE_CC);
@@ -1614,6 +1621,8 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
 {
        u64 base = adev->mmhub.funcs->get_fb_location(adev);
 
+       amdgpu_gmc_set_agp_default(adev, mc);
+
        /* add the xgmi offset of the physical node */
        base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
        if (adev->gmc.xgmi.connected_to_cpu) {
index 3f3a6445c006f2731e72b02198d2d9c4a5008ec6..49e934975719772ca3195747c24b9c941ca057ab 100644 (file)
@@ -145,6 +145,10 @@ static void hdp_v4_0_init_registers(struct amdgpu_device *adev)
                break;
        }
 
+       /* Do not program registers if VF */
+       if (amdgpu_sriov_vf(adev))
+               return;
+
        WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
 
        if (amdgpu_ip_version(adev, HDP_HWIP, 0) == IP_VERSION(4, 4, 0))
index 9a8ec4d7e3334c84d29c81b5ad625193a9edddbd..82b6b62c170ba4f7d56be90e0733611894325cae 100644 (file)
@@ -654,9 +654,11 @@ static void jpeg_v4_0_3_dec_ring_set_wptr(struct amdgpu_ring *ring)
  */
 static void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring)
 {
-       amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
-               0, 0, PACKETJ_TYPE0));
-       amdgpu_ring_write(ring, 0x62a04); /* PCTL0_MMHUB_DEEPSLEEP_IB */
+       if (!amdgpu_sriov_vf(ring->adev)) {
+               amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+                       0, 0, PACKETJ_TYPE0));
+               amdgpu_ring_write(ring, 0x62a04); /* PCTL0_MMHUB_DEEPSLEEP_IB */
+       }
 
        amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
                0, 0, PACKETJ_TYPE0));
@@ -672,9 +674,11 @@ static void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring)
  */
 static void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring)
 {
-       amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
-               0, 0, PACKETJ_TYPE0));
-       amdgpu_ring_write(ring, 0x62a04);
+       if (!amdgpu_sriov_vf(ring->adev)) {
+               amdgpu_ring_write(ring, PACKETJ(regUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+                       0, 0, PACKETJ_TYPE0));
+               amdgpu_ring_write(ring, 0x62a04);
+       }
 
        amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
                0, 0, PACKETJ_TYPE0));
index e523627cfe255f1c4cf1ba51fb10aad9ee4e5bad..df218d5ca775cd17c367cacedc9be49dc103196b 100644 (file)
@@ -28,6 +28,7 @@
 #include "nbio/nbio_2_3_offset.h"
 #include "nbio/nbio_2_3_sh_mask.h"
 #include <uapi/linux/kfd_ioctl.h>
+#include <linux/device.h>
 #include <linux/pci.h>
 
 #define smnPCIE_CONFIG_CNTL    0x11180044
@@ -361,7 +362,7 @@ static void nbio_v2_3_enable_aspm(struct amdgpu_device *adev,
 
                data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
 
-               if (pci_is_thunderbolt_attached(adev->pdev))
+               if (dev_is_removable(&adev->pdev->dev))
                        data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT  << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
                else
                        data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
@@ -480,7 +481,7 @@ static void nbio_v2_3_program_aspm(struct amdgpu_device *adev)
 
        def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
        data |= NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT;
-       if (pci_is_thunderbolt_attached(adev->pdev))
+       if (dev_is_removable(&adev->pdev->dev))
                data |= NAVI10_PCIE__LC_L1_INACTIVITY_TBT_DEFAULT  << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
        else
                data |= NAVI10_PCIE__LC_L1_INACTIVITY_DEFAULT << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT;
index 4142e2fcd8667571d34649dfb26e2c29431c9bad..3cf4684d0d3f3c2cef3138faa4bc264d838f6b0c 100644 (file)
@@ -759,6 +759,83 @@ static int psp_v13_0_fatal_error_recovery_quirk(struct psp_context *psp)
        return 0;
 }
 
+
+static void psp_v13_0_boot_error_reporting(struct amdgpu_device *adev,
+                                          uint32_t inst,
+                                          uint32_t boot_error)
+{
+       uint32_t socket_id;
+       uint32_t aid_id;
+       uint32_t hbm_id;
+       uint32_t reg_data;
+
+       socket_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, SOCKET_ID);
+       aid_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, AID_ID);
+       hbm_id = REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, HBM_ID);
+
+       reg_data = RREG32_SOC15(MP0, inst, regMP0_SMN_C2PMSG_109);
+       dev_info(adev->dev, "socket: %d, aid: %d, firmware boot failed, fw status is 0x%x\n",
+                socket_id, aid_id, reg_data);
+
+       if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_MEM_TRAINING))
+               dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, memory training failed\n",
+                        socket_id, aid_id, hbm_id);
+
+       if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_FW_LOAD))
+               dev_info(adev->dev, "socket: %d, aid: %d, firmware load failed at boot time\n",
+                        socket_id, aid_id);
+
+       if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_WAFL_LINK_TRAINING))
+               dev_info(adev->dev, "socket: %d, aid: %d, wafl link training failed\n",
+                        socket_id, aid_id);
+
+       if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_XGMI_LINK_TRAINING))
+               dev_info(adev->dev, "socket: %d, aid: %d, xgmi link training failed\n",
+                        socket_id, aid_id);
+
+       if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_USR_CP_LINK_TRAINING))
+               dev_info(adev->dev, "socket: %d, aid: %d, usr cp link training failed\n",
+                        socket_id, aid_id);
+
+       if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_USR_DP_LINK_TRAINING))
+               dev_info(adev->dev, "socket: %d, aid: %d, usr dp link training failed\n",
+                        socket_id, aid_id);
+
+       if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_HBM_MEM_TEST))
+               dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm memory test failed\n",
+                        socket_id, aid_id, hbm_id);
+
+       if (REG_GET_FIELD(boot_error, MP0_SMN_C2PMSG_126, GPU_ERR_HBM_BIST_TEST))
+               dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm bist test failed\n",
+                        socket_id, aid_id, hbm_id);
+}
+
+static int psp_v13_0_query_boot_status(struct psp_context *psp)
+{
+       struct amdgpu_device *adev = psp->adev;
+       int inst_mask = adev->aid_mask;
+       uint32_t reg_data;
+       uint32_t i;
+       int ret = 0;
+
+       if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(13, 0, 6))
+               return 0;
+
+       if (RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_59) < 0x00a10007)
+               return 0;
+
+       for_each_inst(i, inst_mask) {
+               reg_data = RREG32_SOC15(MP0, i, regMP0_SMN_C2PMSG_126);
+               if (!REG_GET_FIELD(reg_data, MP0_SMN_C2PMSG_126, BOOT_STATUS)) {
+                       psp_v13_0_boot_error_reporting(adev, i, reg_data);
+                       ret = -EINVAL;
+                       break;
+               }
+       }
+
+       return ret;
+}
+
 static const struct psp_funcs psp_v13_0_funcs = {
        .init_microcode = psp_v13_0_init_microcode,
        .wait_for_bootloader = psp_v13_0_wait_for_bootloader_steady_state,
@@ -781,6 +858,7 @@ static const struct psp_funcs psp_v13_0_funcs = {
        .update_spirom = psp_v13_0_update_spirom,
        .vbflash_stat = psp_v13_0_vbflash_status,
        .fatal_error_recovery_quirk = psp_v13_0_fatal_error_recovery_quirk,
+       .query_boot_status = psp_v13_0_query_boot_status,
 };
 
 void psp_v13_0_set_psp_funcs(struct psp_context *psp)
index c46bc6aa4f48f8d61a7ce28b1fd3c89a55c761cf..0f24af6f28102bc490d6bc2ecdc890294e5f1905 100644 (file)
@@ -427,6 +427,7 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
                                      uint32_t inst_mask)
 {
        struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
+       u32 doorbell_offset, doorbell;
        u32 rb_cntl, ib_cntl;
        int i, unset = 0;
 
@@ -444,6 +445,18 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
                ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL);
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 0);
                WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl);
+
+               if (sdma[i]->use_doorbell) {
+                       doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
+                       doorbell_offset = RREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET);
+
+                       doorbell = REG_SET_FIELD(doorbell, SDMA_GFX_DOORBELL, ENABLE, 0);
+                       doorbell_offset = REG_SET_FIELD(doorbell_offset,
+                                       SDMA_GFX_DOORBELL_OFFSET,
+                                       OFFSET, 0);
+                       WREG32_SDMA(i, regSDMA_GFX_DOORBELL, doorbell);
+                       WREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET, doorbell_offset);
+               }
        }
 }
 
@@ -631,12 +644,6 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
        rb_cntl = sdma_v4_4_2_rb_cntl(ring, rb_cntl);
        WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
 
-       /* Initialize the ring buffer's read and write pointers */
-       WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0);
-       WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0);
-       WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, 0);
-       WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, 0);
-
        /* set the wb address whether it's enabled or not */
        WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_ADDR_HI,
               upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
@@ -654,6 +661,12 @@ static void sdma_v4_4_2_gfx_resume(struct amdgpu_device *adev, unsigned int i)
        /* before programing wptr to a less value, need set minor_ptr_update first */
        WREG32_SDMA(i, regSDMA_GFX_MINOR_PTR_UPDATE, 1);
 
+       /* Initialize the ring buffer's read and write pointers */
+       WREG32_SDMA(i, regSDMA_GFX_RB_RPTR, 0);
+       WREG32_SDMA(i, regSDMA_GFX_RB_RPTR_HI, 0);
+       WREG32_SDMA(i, regSDMA_GFX_RB_WPTR, 0);
+       WREG32_SDMA(i, regSDMA_GFX_RB_WPTR_HI, 0);
+
        doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL);
        doorbell_offset = RREG32_SDMA(i, regSDMA_GFX_DOORBELL_OFFSET);
 
@@ -2048,7 +2061,7 @@ const struct amdgpu_ip_block_version sdma_v4_4_2_ip_block = {
        .type = AMD_IP_BLOCK_TYPE_SDMA,
        .major = 4,
        .minor = 4,
-       .rev = 0,
+       .rev = 2,
        .funcs = &sdma_v4_4_2_ip_funcs,
 };
 
index da683afa0222f188dda74788863fd82ec5a35bcd..242b24f73c1744c8743dd703322a0657aba953fb 100644 (file)
@@ -69,7 +69,7 @@
 
 #define RREG32_SOC15_IP(ip, reg) __RREG32_SOC15_RLC__(reg, 0, ip##_HWIP, 0)
 
-#define RREG32_SOC15_IP_NO_KIQ(ip, reg) __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ, ip##_HWIP, 0)
+#define RREG32_SOC15_IP_NO_KIQ(ip, reg, inst) __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ, ip##_HWIP, inst)
 
 #define RREG32_SOC15_NO_KIQ(ip, inst, reg) \
        __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
@@ -86,8 +86,8 @@
 #define WREG32_SOC15_IP(ip, reg, value) \
         __WREG32_SOC15_RLC__(reg, value, 0, ip##_HWIP, 0)
 
-#define WREG32_SOC15_IP_NO_KIQ(ip, reg, value) \
-        __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ, ip##_HWIP, 0)
+#define WREG32_SOC15_IP_NO_KIQ(ip, reg, value, inst) \
+        __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ, ip##_HWIP, inst)
 
 #define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \
        __WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
 
 /* for GC only */
 #define RREG32_RLC(reg) \
-       __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_RLC, GC_HWIP)
+       __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_RLC, GC_HWIP, 0)
 
 #define WREG32_RLC_NO_KIQ(reg, value, hwip) \
        __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip, 0)
                        + adev->asic_funcs->encode_ext_smn_addressing(ext), \
                        value) \
 
+#define RREG64_MCA(ext, mca_base, idx) \
+       RREG64_PCIE_EXT(adev->asic_funcs->encode_ext_smn_addressing(ext) + mca_base + (idx * 8))
+
+#define WREG64_MCA(ext, mca_base, idx, val) \
+       WREG64_PCIE_EXT(adev->asic_funcs->encode_ext_smn_addressing(ext) + mca_base + (idx * 8), val)
+
 #endif
index d5083c54933015d038b59a3b05e1b404372ede8b..48c6efcdeac974ba109224510442b0488e1875d0 100644 (file)
@@ -381,6 +381,7 @@ soc21_asic_reset_method(struct amdgpu_device *adev)
                return AMD_RESET_METHOD_MODE1;
        case IP_VERSION(13, 0, 4):
        case IP_VERSION(13, 0, 11):
+       case IP_VERSION(14, 0, 0):
                return AMD_RESET_METHOD_MODE2;
        default:
                if (amdgpu_dpm_is_baco_supported(adev))
index 743d2f68b09020f7cbe07f6560456c84f274ab5e..e9c2ff74f0bc1d6f530a5433b2383072289b0940 100644 (file)
@@ -88,16 +88,15 @@ static void umc_v12_0_reset_error_count(struct amdgpu_device *adev)
                umc_v12_0_reset_error_count_per_channel, NULL);
 }
 
-static bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status)
+bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status)
 {
        return ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
-               (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
-               REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
+               (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
                REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
                REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1));
 }
 
-static bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status)
+bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status)
 {
        return (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
                (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1 ||
index 4885b9fff2721753126cdf3aa1a576f9d587ba2e..b34b1e358f8b823f439cfa437b870726aba6984b 100644 (file)
                (pa) |= (UMC_V12_0_CHANNEL_HASH_CH6(channel_idx, pa) << UMC_V12_0_PA_CH6_BIT); \
        } while (0)
 
+bool umc_v12_0_is_uncorrectable_error(uint64_t mc_umc_status);
+bool umc_v12_0_is_correctable_error(uint64_t mc_umc_status);
+
 extern const uint32_t
        umc_v12_0_channel_idx_tbl[]
                        [UMC_V12_0_UMC_INSTANCE_NUM]
index 58a8f78c003c7a0634a3a5f1d2d6bcf6858fad20..a6006f231c655903b626750726495bdcd6a508e2 100644 (file)
@@ -577,8 +577,6 @@ static int uvd_v3_1_sw_init(void *handle)
        ptr += ucode_len;
        memcpy(&adev->uvd.keyselect, ptr, 4);
 
-       r = amdgpu_uvd_entity_init(adev);
-
        return r;
 }
 
index d3b1e31f545032f97b96d2b836bab5bda8b989d8..1aa09ad7bbe3e05f2b5854982e78e149ebefde5e 100644 (file)
@@ -127,8 +127,6 @@ static int uvd_v4_2_sw_init(void *handle)
        if (r)
                return r;
 
-       r = amdgpu_uvd_entity_init(adev);
-
        return r;
 }
 
index 5a8116437abf6f95c69e3393887b8b3acc9cb733..f8b229b75435844054d6d4f2e4b38ba82923f80e 100644 (file)
@@ -125,8 +125,6 @@ static int uvd_v5_0_sw_init(void *handle)
        if (r)
                return r;
 
-       r = amdgpu_uvd_entity_init(adev);
-
        return r;
 }
 
index 74c09230aeb32bd38b00551cfbd3c711f85d9294..a9a6880f44e3302bada40d798168219386a505d7 100644 (file)
@@ -432,8 +432,6 @@ static int uvd_v6_0_sw_init(void *handle)
                }
        }
 
-       r = amdgpu_uvd_entity_init(adev);
-
        return r;
 }
 
index 1c42cf10cc2937546d1e003e1b72bd25c1d563ab..6068b784dc6938d0acff333d715108811164a13d 100644 (file)
@@ -480,10 +480,6 @@ static int uvd_v7_0_sw_init(void *handle)
        if (r)
                return r;
 
-       r = amdgpu_uvd_entity_init(adev);
-       if (r)
-               return r;
-
        r = amdgpu_virt_alloc_mm_table(adev);
        if (r)
                return r;
index 67eb01fef789b9ab78eae3d774561e10458733a8..a08e7abca423bd3db15c8d9f30ce213550c93349 100644 (file)
@@ -441,8 +441,6 @@ static int vce_v2_0_sw_init(void *handle)
                        return r;
        }
 
-       r = amdgpu_vce_entity_init(adev);
-
        return r;
 }
 
index 18f6e62af33984b18e42b1f9f7e22a69de9a80ee..f4760748d34998abb9d8a0b0856419dbbd003ef0 100644 (file)
@@ -450,8 +450,6 @@ static int vce_v3_0_sw_init(void *handle)
                        return r;
        }
 
-       r = amdgpu_vce_entity_init(adev);
-
        return r;
 }
 
index e0b70cd3b697c53de7128f5bc28f5924e8848ba7..06d787385ad460f9e78ff15b527d1b3a102d8eff 100644 (file)
@@ -486,11 +486,6 @@ static int vce_v4_0_sw_init(void *handle)
                        return r;
        }
 
-
-       r = amdgpu_vce_entity_init(adev);
-       if (r)
-               return r;
-
        r = amdgpu_virt_alloc_mm_table(adev);
        if (r)
                return r;
index 0e792a8496d656f9bd8f4a717adbb1c6a13d1653..cd8e459201f18ca0fd40dfa92c800c5a9271f6e4 100644 (file)
@@ -1404,6 +1404,66 @@ static int kfd_fill_gpu_cache_info_from_gfx_config(struct kfd_dev *kdev,
        return i;
 }
 
+static int kfd_fill_gpu_cache_info_from_gfx_config_v2(struct kfd_dev *kdev,
+                                                  struct kfd_gpu_cache_info *pcache_info)
+{
+       struct amdgpu_device *adev = kdev->adev;
+       int i = 0;
+
+       /* TCP L1 Cache per CU */
+       if (adev->gfx.config.gc_tcp_size_per_cu) {
+               pcache_info[i].cache_size = adev->gfx.config.gc_tcp_size_per_cu;
+               pcache_info[i].cache_level = 1;
+               pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
+                                       CRAT_CACHE_FLAGS_DATA_CACHE |
+                                       CRAT_CACHE_FLAGS_SIMD_CACHE);
+               pcache_info[i].num_cu_shared = 1;
+               i++;
+       }
+       /* Scalar L1 Instruction Cache per SQC */
+       if (adev->gfx.config.gc_l1_instruction_cache_size_per_sqc) {
+               pcache_info[i].cache_size =
+                       adev->gfx.config.gc_l1_instruction_cache_size_per_sqc;
+               pcache_info[i].cache_level = 1;
+               pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
+                                       CRAT_CACHE_FLAGS_INST_CACHE |
+                                       CRAT_CACHE_FLAGS_SIMD_CACHE);
+               pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_cu_per_sqc;
+               i++;
+       }
+       /* Scalar L1 Data Cache per SQC */
+       if (adev->gfx.config.gc_l1_data_cache_size_per_sqc) {
+               pcache_info[i].cache_size = adev->gfx.config.gc_l1_data_cache_size_per_sqc;
+               pcache_info[i].cache_level = 1;
+               pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
+                                       CRAT_CACHE_FLAGS_DATA_CACHE |
+                                       CRAT_CACHE_FLAGS_SIMD_CACHE);
+               pcache_info[i].num_cu_shared = adev->gfx.config.gc_num_cu_per_sqc;
+               i++;
+       }
+       /* L2 Data Cache per GPU (Total Tex Cache) */
+       if (adev->gfx.config.gc_tcc_size) {
+               pcache_info[i].cache_size = adev->gfx.config.gc_tcc_size;
+               pcache_info[i].cache_level = 2;
+               pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
+                                       CRAT_CACHE_FLAGS_DATA_CACHE |
+                                       CRAT_CACHE_FLAGS_SIMD_CACHE);
+               pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
+               i++;
+       }
+       /* L3 Data Cache per GPU */
+       if (adev->gmc.mall_size) {
+               pcache_info[i].cache_size = adev->gmc.mall_size / 1024;
+               pcache_info[i].cache_level = 3;
+               pcache_info[i].flags = (CRAT_CACHE_FLAGS_ENABLED |
+                                       CRAT_CACHE_FLAGS_DATA_CACHE |
+                                       CRAT_CACHE_FLAGS_SIMD_CACHE);
+               pcache_info[i].num_cu_shared = adev->gfx.config.max_cu_per_sh;
+               i++;
+       }
+       return i;
+}
+
 int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pcache_info)
 {
        int num_of_cache_types = 0;
@@ -1461,10 +1521,14 @@ int kfd_get_gpu_cache_info(struct kfd_node *kdev, struct kfd_gpu_cache_info **pc
                        num_of_cache_types = ARRAY_SIZE(vega20_cache_info);
                        break;
                case IP_VERSION(9, 4, 2):
-               case IP_VERSION(9, 4, 3):
                        *pcache_info = aldebaran_cache_info;
                        num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info);
                        break;
+               case IP_VERSION(9, 4, 3):
+                       num_of_cache_types =
+                               kfd_fill_gpu_cache_info_from_gfx_config_v2(kdev->kfd,
+                                                                       *pcache_info);
+                       break;
                case IP_VERSION(9, 1, 0):
                case IP_VERSION(9, 2, 2):
                        *pcache_info = raven_cache_info;
index fbf053001af9782abd03ed9f3707aa967610d43e..7a33e06f5c90093c775421927d45b2afce726957 100644 (file)
@@ -1416,8 +1416,13 @@ bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
                 * per-process XNACK mode selection. But let the dev->noretry
                 * setting still influence the default XNACK mode.
                 */
-               if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev))
+               if (supported && KFD_SUPPORT_XNACK_PER_PROCESS(dev)) {
+                       if (!amdgpu_sriov_xnack_support(dev->kfd->adev)) {
+                               pr_debug("SRIOV platform xnack not supported\n");
+                               return false;
+                       }
                        continue;
+               }
 
                /* GFXv10 and later GPUs do not support shader preemption
                 * during page faults. This can lead to poor QoS for queue
index e67d06a4280954f545cb88c34219e38af7cab9a1..f2f3c338fd9446606a65a5cd5126e7a317ced82e 100644 (file)
@@ -1255,9 +1255,11 @@ svm_range_get_pte_flags(struct kfd_node *node,
                }
                break;
        case IP_VERSION(9, 4, 3):
-               mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
-                             (amdgpu_mtype_local == 2 || ext_coherent ?
-                                       AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW);
+               if (ext_coherent)
+                       mtype_local = node->adev->rev_id ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_UC;
+               else
+                       mtype_local = amdgpu_mtype_local == 1 ? AMDGPU_VM_MTYPE_NC :
+                               amdgpu_mtype_local == 2 ? AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
                snoop = true;
                if (uncached) {
                        mapping_flags |= AMDGPU_VM_MTYPE_UC;
index c1e10f42db289b7a2d3eb5d08613e009bfde4a22..057284bf50bbea43c819daa6a8d9f14b85ab7abe 100644 (file)
@@ -1602,10 +1602,13 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
        unsigned int cu_sibling_map_mask;
        int first_active_cu;
        int i, j, k, xcc, start, end;
+       int num_xcc = NUM_XCC(knode->xcc_mask);
        struct kfd_cache_properties *pcache = NULL;
+       enum amdgpu_memory_partition mode;
+       struct amdgpu_device *adev = knode->adev;
 
        start = ffs(knode->xcc_mask) - 1;
-       end = start + NUM_XCC(knode->xcc_mask);
+       end = start + num_xcc;
        cu_sibling_map_mask = cu_info->bitmap[start][0][0];
        cu_sibling_map_mask &=
                ((1 << pcache_info[cache_type].num_cu_shared) - 1);
@@ -1624,7 +1627,18 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
                pcache->processor_id_low = cu_processor_id
                                        + (first_active_cu - 1);
                pcache->cache_level = pcache_info[cache_type].cache_level;
-               pcache->cache_size = pcache_info[cache_type].cache_size;
+
+               if (KFD_GC_VERSION(knode) == IP_VERSION(9, 4, 3))
+                       mode = adev->gmc.gmc_funcs->query_mem_partition_mode(adev);
+               else
+                       mode = UNKNOWN_MEMORY_PARTITION_MODE;
+
+               if (pcache->cache_level == 2)
+                       pcache->cache_size = pcache_info[cache_type].cache_size * num_xcc;
+               else if (mode)
+                       pcache->cache_size = pcache_info[cache_type].cache_size / mode;
+               else
+                       pcache->cache_size = pcache_info[cache_type].cache_size;
 
                if (pcache_info[cache_type].flags & CRAT_CACHE_FLAGS_DATA_CACHE)
                        pcache->cache_type |= HSA_CACHE_TYPE_DATA;
index 9cd83b780102784894f14b1bd4bcc6658e806db5..ed784cf27d396f10fe507a1a4a41a009f01e15a5 100644 (file)
@@ -1216,6 +1216,9 @@ bool dm_helpers_dp_handle_test_pattern_request(
 
        }
 
+       pipe_ctx->stream->test_pattern.type = test_pattern;
+       pipe_ctx->stream->test_pattern.color_space = test_pattern_color_space;
+
        dc_link_dp_set_test_pattern(
                (struct dc_link *) link,
                test_pattern,
index 0f580ea37576dc16d2f27be4e601030c5c8e2b52..133af994a08c178bbdff1a60381703ecbbe34334 100644 (file)
@@ -37,7 +37,7 @@
 #include <drm/drm_framebuffer.h>
 #include <drm/drm_encoder.h>
 #include <drm/drm_atomic.h>
-#include "dcn10/dcn10_optc.h"
+#include "dc/inc/hw/optc.h"
 
 #include "dc/inc/core_types.h"
 
index f80917f6153b36ded3b2cace9a51d81ff2c5555d..0fa4fcd00de2c982ebc2634c7a74f19585c3b721 100644 (file)
@@ -111,17 +111,21 @@ static int dcn35_get_active_display_cnt_wa(
        return display_count;
 }
 
-static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
+static void dcn35_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
+               bool safe_to_lower, bool disable)
 {
        struct dc *dc = clk_mgr_base->ctx->dc;
        int i;
 
        for (i = 0; i < dc->res_pool->pipe_count; ++i) {
-               struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+               struct pipe_ctx *pipe = safe_to_lower
+                       ? &context->res_ctx.pipe_ctx[i]
+                       : &dc->current_state->res_ctx.pipe_ctx[i];
 
                if (pipe->top_pipe || pipe->prev_odm_pipe)
                        continue;
-               if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
+               if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
+                                    !pipe->stream->link_enc)) {
                        struct stream_encoder *stream_enc = pipe->stream_res.stream_enc;
 
                        if (disable) {
@@ -301,11 +305,11 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
        }
 
        if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
-               dcn35_disable_otg_wa(clk_mgr_base, context, true);
+               dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
 
                clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
                dcn35_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
-               dcn35_disable_otg_wa(clk_mgr_base, context, false);
+               dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
 
                update_dispclk = true;
        }
@@ -814,7 +818,8 @@ static void dcn35_set_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle)
        struct dc *dc = clk_mgr_base->ctx->dc;
        uint32_t val = dcn35_smu_read_ips_scratch(clk_mgr);
 
-       if (dc->config.disable_ips == 0) {
+       if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
+               dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
                val |= DMUB_IPS1_ALLOW_MASK;
                val |= DMUB_IPS2_ALLOW_MASK;
        } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
@@ -1114,7 +1119,7 @@ void dcn35_clk_mgr_construct(
                dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
                                smu_dpm_clks.dpm_clks);
 
-       if (ctx->dc->config.disable_ips == 0) {
+       if (ctx->dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) {
                bool ips_support = false;
 
                /*avoid call pmfw at init*/
@@ -1127,7 +1132,7 @@ void dcn35_clk_mgr_construct(
                        ctx->dc->debug.disable_hpo_power_gate = false;
                } else {
                        /*let's reset the config control flag*/
-                       ctx->dc->config.disable_ips = 1; /*pmfw not support it, disable it all*/
+                       ctx->dc->config.disable_ips = DMUB_IPS_DISABLE_ALL; /*pmfw not support it, disable it all*/
                }
        }
 }
index 74c21d98b4de37541c848ab0c015f69891a0b820..7b9bf5cb45299974757bd16d608e41d444ef3b0f 100644 (file)
@@ -2582,6 +2582,9 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
        if (u->gamut_remap_matrix)
                update_flags->bits.gamut_remap_change = 1;
 
+       if (u->blend_tf)
+               update_flags->bits.gamma_change = 1;
+
        if (u->gamma) {
                enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
 
@@ -4113,8 +4116,17 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
        bool success = false;
        struct dc_state *minimal_transition_context;
        struct pipe_split_policy_backup policy;
+       struct mall_temp_config mall_temp_config;
 
        /* commit based on new context */
+       /* Since all phantom pipes are removed in full validation,
+        * we have to save and restore the subvp/mall config when
+        * we do a minimal transition since the flags marking the
+        * pipe as subvp/phantom will be cleared (dc copy constructor
+        * creates a shallow copy).
+        */
+       if (dc->res_pool->funcs->save_mall_state)
+               dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config);
        minimal_transition_context = create_minimal_transition_state(dc,
                        context, &policy);
        if (minimal_transition_context) {
@@ -4123,9 +4135,20 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
                        dc->hwss.is_pipe_topology_transition_seamless(
                                        dc, minimal_transition_context, context)) {
                        DC_LOG_DC("%s base = new state\n", __func__);
+
                        success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
                }
                release_minimal_transition_state(dc, minimal_transition_context, &policy);
+               if (dc->res_pool->funcs->restore_mall_state)
+                       dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config);
+               /* If we do a minimal transition with plane removal and the context
+                * has subvp we also have to retain back the phantom stream / planes
+                * since the refcount is decremented as part of the min transition
+                * (we commit a state with no subvp, so the phantom streams / planes
+                * had to be removed).
+                */
+               if (dc->res_pool->funcs->retain_phantom_pipes)
+                       dc->res_pool->funcs->retain_phantom_pipes(dc, context);
        }
 
        if (!success) {
@@ -4348,7 +4371,6 @@ static bool full_update_required(struct dc *dc,
                                srf_updates[i].in_transfer_func ||
                                srf_updates[i].func_shaper ||
                                srf_updates[i].lut3d_func ||
-                               srf_updates[i].blend_tf ||
                                srf_updates[i].surface->force_full_update ||
                                (srf_updates[i].flip_addr &&
                                srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
@@ -4885,7 +4907,7 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow)
        if (dc->debug.disable_idle_power_optimizations)
                return;
 
-       if (dc->caps.ips_support && dc->config.disable_ips)
+       if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
                return;
 
        if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
@@ -4906,7 +4928,7 @@ bool dc_dmub_is_ips_idle_state(struct dc *dc)
        if (dc->debug.disable_idle_power_optimizations)
                return false;
 
-       if (!dc->caps.ips_support || dc->config.disable_ips)
+       if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
                return false;
 
        if (dc->hwss.get_idle_state)
index 6ed40b6c6178f9c08aeaedb73c48a10d04d23511..4bdf105d1d7150d666bb2a42eccfd940e7a29a39 100644 (file)
@@ -533,7 +533,7 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
        for (i = 0; i < MAX_PIPES; i++) {
                struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
 
-               if (res_ctx->pipe_ctx[i].stream != stream)
+               if (res_ctx->pipe_ctx[i].stream != stream || !tg)
                        continue;
 
                return tg->funcs->get_frame_count(tg);
@@ -592,7 +592,7 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
        for (i = 0; i < MAX_PIPES; i++) {
                struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
 
-               if (res_ctx->pipe_ctx[i].stream != stream)
+               if (res_ctx->pipe_ctx[i].stream != stream || !tg)
                        continue;
 
                tg->funcs->get_scanoutpos(tg,
index 6e54ca055fcb701fdd64f6ddb09e0f18587bec02..9316b737a8ba892c6494687bae472e206d01283d 100644 (file)
@@ -49,7 +49,7 @@ struct aux_payload;
 struct set_config_cmd_payload;
 struct dmub_notification;
 
-#define DC_VER "3.2.256"
+#define DC_VER "3.2.259"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
index ba142bef626bf5910c52c04980055d999139cb47..e4c007203318bf0b9ad3f3f9b1d2fd73973ca4f9 100644 (file)
@@ -120,6 +120,80 @@ void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dc_dmub_srv,
        }
 }
 
+bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
+               unsigned int count,
+               union dmub_rb_cmd *cmd_list)
+{
+       struct dc_context *dc_ctx = dc_dmub_srv->ctx;
+       struct dmub_srv *dmub;
+       enum dmub_status status;
+       int i;
+
+       if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+               return false;
+
+       dmub = dc_dmub_srv->dmub;
+
+       for (i = 0 ; i < count; i++) {
+               // Queue command
+               status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
+
+               if (status == DMUB_STATUS_QUEUE_FULL) {
+                       /* Execute and wait for queue to become empty again. */
+                       dmub_srv_cmd_execute(dmub);
+                       dmub_srv_wait_for_idle(dmub, 100000);
+
+                       /* Requeue the command. */
+                       status = dmub_srv_cmd_queue(dmub, &cmd_list[i]);
+               }
+
+               if (status != DMUB_STATUS_OK) {
+                       DC_ERROR("Error queueing DMUB command: status=%d\n", status);
+                       dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+                       return false;
+               }
+       }
+
+       status = dmub_srv_cmd_execute(dmub);
+       if (status != DMUB_STATUS_OK) {
+               DC_ERROR("Error starting DMUB execution: status=%d\n", status);
+               dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+               return false;
+       }
+
+       return true;
+}
+
+bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
+               enum dm_dmub_wait_type wait_type,
+               union dmub_rb_cmd *cmd_list)
+{
+       struct dmub_srv *dmub;
+       enum dmub_status status;
+
+       if (!dc_dmub_srv || !dc_dmub_srv->dmub)
+               return false;
+
+       dmub = dc_dmub_srv->dmub;
+
+       // Wait for DMUB to process command
+       if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
+               status = dmub_srv_wait_for_idle(dmub, 100000);
+
+               if (status != DMUB_STATUS_OK) {
+                       DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
+                       dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+                       return false;
+               }
+
+               // Copy data back from ring buffer into command
+               if (wait_type == DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)
+                       dmub_rb_get_return_data(&dmub->inbox1_rb, cmd_list);
+       }
+
+       return true;
+}
+
 bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
 {
        return dc_dmub_srv_cmd_run_list(dc_dmub_srv, 1, cmd, wait_type);
index 31150b21439484d2cf6eff186bc7c43a463dbcd7..d4a60f53faab12f69e3d09a78b54adade290d6fa 100644 (file)
@@ -56,6 +56,14 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv);
 
 bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv);
 
+bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
+               unsigned int count,
+               union dmub_rb_cmd *cmd_list);
+
+bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv,
+               enum dm_dmub_wait_type wait_type,
+               union dmub_rb_cmd *cmd_list);
+
 bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type);
 
 bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type);
index 35ae245ef722b938b8f10b00c625b4e42a90ff07..eeeeeef4d717345e85296ffcc8fc28409909978b 100644 (file)
@@ -142,7 +142,8 @@ enum dp_test_link_rate {
        DP_TEST_LINK_RATE_HBR3          = 0x1E,
        DP_TEST_LINK_RATE_UHBR10        = 0x01,
        DP_TEST_LINK_RATE_UHBR20        = 0x02,
-       DP_TEST_LINK_RATE_UHBR13_5      = 0x03,
+       DP_TEST_LINK_RATE_UHBR13_5_LEGACY       = 0x03, /* For backward compatibility*/
+       DP_TEST_LINK_RATE_UHBR13_5      = 0x04,
 };
 
 struct dc_link_settings {
index 40dc51853d62a92334cea5811dce0201f76273cb..cea666ea66c6144cad038aa9a8d833b2d36b0a78 100644 (file)
@@ -1037,7 +1037,9 @@ struct replay_config {
        bool replay_smu_opt_supported;                  // SMU optimization is supported
        unsigned int replay_enable_option;              // Replay enablement option
        uint32_t debug_flags;                           // Replay debug flags
-       bool replay_timing_sync_supported;             // Replay desync is supported
+       bool replay_timing_sync_supported; // Replay desync is supported
+       bool force_disable_desync_error_check;             // Replay desync is supported
+       bool received_desync_error_hpd; //Replay Received Desync Error HPD.
        union replay_error_status replay_error_status; // Replay error status
 };
 
index c50aa30614be2c7b65e2ddb6324b76941cb508c1..051e4c2b4cf271e0d084e00e5314916409069740 100644 (file)
        SRI(DC_ABM1_ACE_THRES_12, ABM, id), \
        NBIO_SR(BIOS_SCRATCH_2)
 
-#define ABM_DCN32_REG_LIST(id)\
-       SRI(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \
-       SRI(DC_ABM1_LS_SAMPLE_RATE, ABM, id), \
-       SRI(BL1_PWM_BL_UPDATE_SAMPLE_RATE, ABM, id), \
-       SRI(DC_ABM1_HG_MISC_CTRL, ABM, id), \
-       SRI(DC_ABM1_IPCSC_COEFF_SEL, ABM, id), \
-       SRI(BL1_PWM_CURRENT_ABM_LEVEL, ABM, id), \
-       SRI(BL1_PWM_TARGET_ABM_LEVEL, ABM, id), \
-       SRI(BL1_PWM_USER_LEVEL, ABM, id), \
-       SRI(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \
-       SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \
-       SRI(DC_ABM1_ACE_OFFSET_SLOPE_0, ABM, id), \
-       SRI(DC_ABM1_ACE_THRES_12, ABM, id), \
-       NBIO_SR(BIOS_SCRATCH_2)
-
 #define ABM_SF(reg_name, field_name, post_fix)\
        .field_name = reg_name ## __ ## field_name ## post_fix
 
index aaf6c981fd9e13b61ee171257c1d9007d4a60234..ab81594a7fadcc0ea6eecb148ddc264202a1c0df 100644 (file)
@@ -26,7 +26,7 @@
 #ifndef __DC_TIMING_GENERATOR_DCN10_H__
 #define __DC_TIMING_GENERATOR_DCN10_H__
 
-#include "timing_generator.h"
+#include "optc.h"
 
 #define DCN10TG_FROM_TG(tg)\
        container_of(tg, struct optc, base)
@@ -594,190 +594,6 @@ struct dcn_optc_mask {
        TG_REG_FIELD_LIST_DCN3_5(uint32_t)
 };
 
-struct optc {
-       struct timing_generator base;
-
-       const struct dcn_optc_registers *tg_regs;
-       const struct dcn_optc_shift *tg_shift;
-       const struct dcn_optc_mask *tg_mask;
-
-       int opp_count;
-
-       uint32_t max_h_total;
-       uint32_t max_v_total;
-
-       uint32_t min_h_blank;
-
-       uint32_t min_h_sync_width;
-       uint32_t min_v_sync_width;
-       uint32_t min_v_blank;
-       uint32_t min_v_blank_interlace;
-
-       int vstartup_start;
-       int vupdate_offset;
-       int vupdate_width;
-       int vready_offset;
-       struct dc_crtc_timing orginal_patched_timing;
-       enum signal_type signal;
-};
-
 void dcn10_timing_generator_init(struct optc *optc);
 
-struct dcn_otg_state {
-       uint32_t v_blank_start;
-       uint32_t v_blank_end;
-       uint32_t v_sync_a_pol;
-       uint32_t v_total;
-       uint32_t v_total_max;
-       uint32_t v_total_min;
-       uint32_t v_total_min_sel;
-       uint32_t v_total_max_sel;
-       uint32_t v_sync_a_start;
-       uint32_t v_sync_a_end;
-       uint32_t h_blank_start;
-       uint32_t h_blank_end;
-       uint32_t h_sync_a_start;
-       uint32_t h_sync_a_end;
-       uint32_t h_sync_a_pol;
-       uint32_t h_total;
-       uint32_t underflow_occurred_status;
-       uint32_t otg_enabled;
-       uint32_t blank_enabled;
-       uint32_t vertical_interrupt1_en;
-       uint32_t vertical_interrupt1_line;
-       uint32_t vertical_interrupt2_en;
-       uint32_t vertical_interrupt2_line;
-};
-
-void optc1_read_otg_state(struct optc *optc1,
-               struct dcn_otg_state *s);
-
-bool optc1_get_hw_timing(struct timing_generator *tg,
-               struct dc_crtc_timing *hw_crtc_timing);
-
-bool optc1_validate_timing(
-       struct timing_generator *optc,
-       const struct dc_crtc_timing *timing);
-
-void optc1_program_timing(
-       struct timing_generator *optc,
-       const struct dc_crtc_timing *dc_crtc_timing,
-       int vready_offset,
-       int vstartup_start,
-       int vupdate_offset,
-       int vupdate_width,
-       const enum signal_type signal,
-       bool use_vbios);
-
-void optc1_setup_vertical_interrupt0(
-               struct timing_generator *optc,
-               uint32_t start_line,
-               uint32_t end_line);
-void optc1_setup_vertical_interrupt1(
-               struct timing_generator *optc,
-               uint32_t start_line);
-void optc1_setup_vertical_interrupt2(
-               struct timing_generator *optc,
-               uint32_t start_line);
-
-void optc1_program_global_sync(
-               struct timing_generator *optc,
-               int vready_offset,
-               int vstartup_start,
-               int vupdate_offset,
-               int vupdate_width);
-
-bool optc1_disable_crtc(struct timing_generator *optc);
-
-bool optc1_is_counter_moving(struct timing_generator *optc);
-
-void optc1_get_position(struct timing_generator *optc,
-               struct crtc_position *position);
-
-uint32_t optc1_get_vblank_counter(struct timing_generator *optc);
-
-void optc1_get_crtc_scanoutpos(
-       struct timing_generator *optc,
-       uint32_t *v_blank_start,
-       uint32_t *v_blank_end,
-       uint32_t *h_position,
-       uint32_t *v_position);
-
-void optc1_set_early_control(
-       struct timing_generator *optc,
-       uint32_t early_cntl);
-
-void optc1_wait_for_state(struct timing_generator *optc,
-               enum crtc_state state);
-
-void optc1_set_blank(struct timing_generator *optc,
-               bool enable_blanking);
-
-bool optc1_is_blanked(struct timing_generator *optc);
-
-void optc1_program_blank_color(
-               struct timing_generator *optc,
-               const struct tg_color *black_color);
-
-bool optc1_did_triggered_reset_occur(
-       struct timing_generator *optc);
-
-void optc1_enable_reset_trigger(struct timing_generator *optc, int source_tg_inst);
-
-void optc1_disable_reset_trigger(struct timing_generator *optc);
-
-void optc1_lock(struct timing_generator *optc);
-
-void optc1_unlock(struct timing_generator *optc);
-
-void optc1_enable_optc_clock(struct timing_generator *optc, bool enable);
-
-void optc1_set_drr(
-       struct timing_generator *optc,
-       const struct drr_params *params);
-
-void optc1_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max);
-
-void optc1_set_static_screen_control(
-       struct timing_generator *optc,
-       uint32_t event_triggers,
-       uint32_t num_frames);
-
-void optc1_program_stereo(struct timing_generator *optc,
-       const struct dc_crtc_timing *timing, struct crtc_stereo_flags *flags);
-
-bool optc1_is_stereo_left_eye(struct timing_generator *optc);
-
-void optc1_clear_optc_underflow(struct timing_generator *optc);
-
-void optc1_tg_init(struct timing_generator *optc);
-
-bool optc1_is_tg_enabled(struct timing_generator *optc);
-
-bool optc1_is_optc_underflow_occurred(struct timing_generator *optc);
-
-void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable);
-
-void optc1_set_timing_double_buffer(struct timing_generator *optc, bool enable);
-
-bool optc1_get_otg_active_size(struct timing_generator *optc,
-               uint32_t *otg_active_width,
-               uint32_t *otg_active_height);
-
-void optc1_enable_crtc_reset(
-               struct timing_generator *optc,
-               int source_tg_inst,
-               struct crtc_trigger_info *crtc_tp);
-
-bool optc1_configure_crc(struct timing_generator *optc,
-                         const struct crc_params *params);
-
-bool optc1_get_crc(struct timing_generator *optc,
-                   uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb);
-
-bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
-
-void optc1_set_vtg_params(struct timing_generator *optc,
-               const struct dc_crtc_timing *dc_crtc_timing, bool program_fp2);
-
 #endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
index 5eebe7f03ddc72a8b060ec4768b5e7753b827942..c9ae2d8f0096fa4c63ffb2167059eaaf59227f00 100644 (file)
@@ -137,7 +137,15 @@ void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz)
                dsc_enc_caps->max_total_throughput_mps = DCN20_MAX_DISPLAY_CLOCK_Mhz * 2;
        }
 
-       // TODO DSC: This is actually image width limitation, not a slice width. This should be added to the criteria to use ODM.
+       /* For pixel clock bigger than a single-pipe limit needing four engines ODM 4:1, which then quardruples our
+        * throughput and number of slices
+        */
+       if (pixel_clock_100Hz > DCN20_MAX_PIXEL_CLOCK_Mhz*10000*2) {
+               dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 = 1;
+               dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 = 1;
+               dsc_enc_caps->max_total_throughput_mps = DCN20_MAX_DISPLAY_CLOCK_Mhz * 4;
+       }
+
        dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */
        dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */
 }
index 50dc834046446ae72647d722be59b519e4d35082..11f7746f3a656a2a9ad430cb96ff42534a70f90e 100644 (file)
@@ -613,16 +613,19 @@ static void dpp3_program_blnd_pwl(
                        REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg);
                REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red);
        } else {
+               REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0);
                REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 4);
                for (i = 0 ; i < num; i++)
                        REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].red_reg);
                REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_red);
 
+               REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0);
                REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 2);
                for (i = 0 ; i < num; i++)
                        REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].green_reg);
                REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, last_base_value_green);
 
+               REG_SET(CM_BLNDGAM_LUT_INDEX, 0, CM_BLNDGAM_LUT_INDEX, 0);
                REG_UPDATE(CM_BLNDGAM_LUT_CONTROL, CM_BLNDGAM_LUT_WRITE_COLOR_MASK, 1);
                for (i = 0 ; i < num; i++)
                        REG_SET(CM_BLNDGAM_LUT_DATA, 0, CM_BLNDGAM_LUT_DATA, rgb[i].blue_reg);
index 2861d974fcf62a0465ab6a39a6e65c249888ec27..75547ce86c09bc9e41d809f4e5c5ee8b4b1214eb 100644 (file)
@@ -316,7 +316,7 @@ bool hubp3_program_surface_flip_and_addr(
        return true;
 }
 
-static void hubp3_program_tiling(
+void hubp3_program_tiling(
        struct dcn20_hubp *hubp2,
        const union dc_tiling_info *info,
        const enum surface_pixel_format pixel_format)
index 8a32772d4e91af4b673fe0953ea4d9786d2f58c9..b010531a7fe886cd0c393c70ddfadcd0d40d0b39 100644 (file)
@@ -278,6 +278,11 @@ void hubp3_setup(
                struct _vcs_dpi_display_rq_regs_st *rq_regs,
                struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
 
+void hubp3_program_tiling(
+               struct dcn20_hubp *hubp2,
+               const union dc_tiling_info *info,
+               const enum surface_pixel_format pixel_format);
+
 void hubp3_dcc_control(struct hubp *hubp, bool enable,
                enum hubp_ind_block_size blk_size);
 
index 1d052f08aff5e16ffc10a9695bca927a64ea68f5..994b21ed272f175318a0b3295ac1c1e052a6b95d 100644 (file)
@@ -237,16 +237,19 @@ void mpc32_program_post1dlut_pwl(
                        REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].red_reg);
                REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_red);
        } else {
+               REG_SET(MPCC_MCM_1DLUT_LUT_INDEX[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_INDEX, 0);
                REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 4);
                for (i = 0 ; i < num; i++)
                        REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].red_reg);
                REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_red);
 
+               REG_SET(MPCC_MCM_1DLUT_LUT_INDEX[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_INDEX, 0);
                REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 2);
                for (i = 0 ; i < num; i++)
                        REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].green_reg);
                REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, last_base_value_green);
 
+               REG_SET(MPCC_MCM_1DLUT_LUT_INDEX[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_INDEX, 0);
                REG_UPDATE(MPCC_MCM_1DLUT_LUT_CONTROL[mpcc_id], MPCC_MCM_1DLUT_LUT_WRITE_COLOR_MASK, 1);
                for (i = 0 ; i < num; i++)
                        REG_SET(MPCC_MCM_1DLUT_LUT_DATA[mpcc_id], 0, MPCC_MCM_1DLUT_LUT_DATA, rgb[i].blue_reg);
index addedcfd1238beb4eeec681e20b7a41c91b5f40f..479f3683c0b70eeeec8e77c5f1717c9533ca3f64 100644 (file)
@@ -325,6 +325,43 @@ static void dccg35_set_dpstreamclk(
        }
 }
 
+static void dccg35_set_physymclk_root_clock_gating(
+               struct dccg *dccg,
+               int phy_inst,
+               bool enable)
+{
+       struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+       if (!dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+               return;
+
+       switch (phy_inst) {
+       case 0:
+               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+                               PHYASYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+               break;
+       case 1:
+               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+                               PHYBSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+               break;
+       case 2:
+               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+                               PHYCSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+               break;
+       case 3:
+               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+                               PHYDSYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+               break;
+       case 4:
+               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+                               PHYESYMCLK_ROOT_GATE_DISABLE, enable ? 1 : 0);
+               break;
+       default:
+               BREAK_TO_DEBUGGER();
+               return;
+       }
+}
+
 static void dccg35_set_physymclk(
                struct dccg *dccg,
                int phy_inst,
@@ -340,16 +377,10 @@ static void dccg35_set_physymclk(
                        REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL,
                                        PHYASYMCLK_EN, 1,
                                        PHYASYMCLK_SRC_SEL, clk_src);
-                       if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
-                               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
-                                       PHYASYMCLK_ROOT_GATE_DISABLE, 1);
                } else {
                        REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL,
                                        PHYASYMCLK_EN, 0,
                                        PHYASYMCLK_SRC_SEL, 0);
-                       if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
-                               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
-                                       PHYASYMCLK_ROOT_GATE_DISABLE, 0);
                }
                break;
        case 1:
@@ -357,16 +388,10 @@ static void dccg35_set_physymclk(
                        REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL,
                                        PHYBSYMCLK_EN, 1,
                                        PHYBSYMCLK_SRC_SEL, clk_src);
-                       if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
-                               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
-                                       PHYBSYMCLK_ROOT_GATE_DISABLE, 1);
                } else {
                        REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL,
                                        PHYBSYMCLK_EN, 0,
                                        PHYBSYMCLK_SRC_SEL, 0);
-                       if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
-                               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
-                                       PHYBSYMCLK_ROOT_GATE_DISABLE, 0);
                }
                break;
        case 2:
@@ -374,16 +399,10 @@ static void dccg35_set_physymclk(
                        REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL,
                                        PHYCSYMCLK_EN, 1,
                                        PHYCSYMCLK_SRC_SEL, clk_src);
-                       if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
-                               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
-                                       PHYCSYMCLK_ROOT_GATE_DISABLE, 1);
                } else {
                        REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL,
                                        PHYCSYMCLK_EN, 0,
                                        PHYCSYMCLK_SRC_SEL, 0);
-                       if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
-                               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
-                                       PHYCSYMCLK_ROOT_GATE_DISABLE, 0);
                }
                break;
        case 3:
@@ -391,16 +410,10 @@ static void dccg35_set_physymclk(
                        REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL,
                                        PHYDSYMCLK_EN, 1,
                                        PHYDSYMCLK_SRC_SEL, clk_src);
-                       if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
-                               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
-                                       PHYDSYMCLK_ROOT_GATE_DISABLE, 1);
                } else {
                        REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL,
                                        PHYDSYMCLK_EN, 0,
                                        PHYDSYMCLK_SRC_SEL, 0);
-                       if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
-                               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
-                                       PHYDSYMCLK_ROOT_GATE_DISABLE, 0);
                }
                break;
        case 4:
@@ -408,16 +421,10 @@ static void dccg35_set_physymclk(
                        REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL,
                                        PHYESYMCLK_EN, 1,
                                        PHYESYMCLK_SRC_SEL, clk_src);
-                       if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
-                               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
-                                       PHYESYMCLK_ROOT_GATE_DISABLE, 1);
                } else {
                        REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL,
                                        PHYESYMCLK_EN, 0,
                                        PHYESYMCLK_SRC_SEL, 0);
-                       if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
-                               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
-                                       PHYESYMCLK_ROOT_GATE_DISABLE, 0);
                }
                break;
        default:
@@ -490,8 +497,8 @@ void dccg35_init(struct dccg *dccg)
 
        if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
                for (otg_inst = 0; otg_inst < 5; otg_inst++)
-                       dccg35_set_physymclk(dccg, otg_inst,
-                                            PHYSYMCLK_FORCE_SRC_SYMCLK, false);
+                       dccg35_set_physymclk_root_clock_gating(dccg, otg_inst,
+                                       false);
 /*
        dccg35_enable_global_fgcg_rep(
                dccg, dccg->ctx->dc->debug.enable_fine_grain_clock_gating.bits
@@ -754,7 +761,9 @@ static const struct dccg_funcs dccg35_funcs = {
        .disable_symclk32_se = dccg31_disable_symclk32_se,
        .enable_symclk32_le = dccg31_enable_symclk32_le,
        .disable_symclk32_le = dccg31_disable_symclk32_le,
+       .set_symclk32_le_root_clock_gating = dccg31_set_symclk32_le_root_clock_gating,
        .set_physymclk = dccg35_set_physymclk,
+       .set_physymclk_root_clock_gating = dccg35_set_physymclk_root_clock_gating,
        .set_dtbclk_dto = dccg35_set_dtbclk_dto,
        .set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto,
        .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en,
index 1ed58660779ef2f0da564a6dd7d97b7876f2d7f4..771fcd0d3b9911ad5f0271f4ac6f0f022cee3e56 100644 (file)
@@ -53,11 +53,146 @@ static void hubp35_init(struct hubp *hubp)
 
        /*do nothing for now for dcn3.5 or later*/
 }
+
+void hubp35_program_pixel_format(
+       struct hubp *hubp,
+       enum surface_pixel_format format)
+{
+       struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+       uint32_t green_bar = 1;
+       uint32_t red_bar = 3;
+       uint32_t blue_bar = 2;
+
+       /* swap for ABGR format */
+       if (format == SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
+                       || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
+                       || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS
+                       || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616
+                       || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) {
+               red_bar = 2;
+               blue_bar = 3;
+       }
+
+       REG_UPDATE_3(HUBPRET_CONTROL,
+                       CROSSBAR_SRC_Y_G, green_bar,
+                       CROSSBAR_SRC_CB_B, blue_bar,
+                       CROSSBAR_SRC_CR_R, red_bar);
+
+       /* Mapping is same as ipp programming (cnvc) */
+
+       switch (format) {
+       case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+               REG_UPDATE(DCSURF_SURFACE_CONFIG,
+                               SURFACE_PIXEL_FORMAT, 1);
+               break;
+       case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+               REG_UPDATE(DCSURF_SURFACE_CONFIG,
+                               SURFACE_PIXEL_FORMAT, 3);
+               break;
+       case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+       case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+               REG_UPDATE(DCSURF_SURFACE_CONFIG,
+                               SURFACE_PIXEL_FORMAT, 8);
+               break;
+       case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+       case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+       case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
+               REG_UPDATE(DCSURF_SURFACE_CONFIG,
+                               SURFACE_PIXEL_FORMAT, 10);
+               break;
+       case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+       case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /* we use crossbar already */
+               REG_UPDATE(DCSURF_SURFACE_CONFIG,
+                               SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
+               break;
+       case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+       case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:/*we use crossbar already*/
+               REG_UPDATE(DCSURF_SURFACE_CONFIG,
+                               SURFACE_PIXEL_FORMAT, 24);
+               break;
+
+       case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+               REG_UPDATE(DCSURF_SURFACE_CONFIG,
+                               SURFACE_PIXEL_FORMAT, 65);
+               break;
+       case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+               REG_UPDATE(DCSURF_SURFACE_CONFIG,
+                               SURFACE_PIXEL_FORMAT, 64);
+               break;
+       case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+               REG_UPDATE(DCSURF_SURFACE_CONFIG,
+                               SURFACE_PIXEL_FORMAT, 67);
+               break;
+       case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+               REG_UPDATE(DCSURF_SURFACE_CONFIG,
+                               SURFACE_PIXEL_FORMAT, 66);
+               break;
+       case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
+               REG_UPDATE(DCSURF_SURFACE_CONFIG,
+                               SURFACE_PIXEL_FORMAT, 12);
+               break;
+       case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
+               REG_UPDATE(DCSURF_SURFACE_CONFIG,
+                               SURFACE_PIXEL_FORMAT, 112);
+               break;
+       case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
+               REG_UPDATE(DCSURF_SURFACE_CONFIG,
+                               SURFACE_PIXEL_FORMAT, 113);
+               break;
+       case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
+               REG_UPDATE(DCSURF_SURFACE_CONFIG,
+                               SURFACE_PIXEL_FORMAT, 114);
+               break;
+       case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
+               REG_UPDATE(DCSURF_SURFACE_CONFIG,
+                               SURFACE_PIXEL_FORMAT, 118);
+               break;
+       case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
+               REG_UPDATE(DCSURF_SURFACE_CONFIG,
+                               SURFACE_PIXEL_FORMAT, 119);
+               break;
+       case SURFACE_PIXEL_FORMAT_GRPH_RGBE:
+               REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+                               SURFACE_PIXEL_FORMAT, 116,
+                               ALPHA_PLANE_EN, 0);
+               break;
+       case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA:
+               REG_UPDATE_2(DCSURF_SURFACE_CONFIG,
+                               SURFACE_PIXEL_FORMAT, 116,
+                               ALPHA_PLANE_EN, 1);
+               break;
+       default:
+               BREAK_TO_DEBUGGER();
+               break;
+       }
+
+       /* don't see the need of program the xbar in DCN 1.0 */
+}
+
+void hubp35_program_surface_config(
+       struct hubp *hubp,
+       enum surface_pixel_format format,
+       union dc_tiling_info *tiling_info,
+       struct plane_size *plane_size,
+       enum dc_rotation_angle rotation,
+       struct dc_plane_dcc_param *dcc,
+       bool horizontal_mirror,
+       unsigned int compat_level)
+{
+       struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+       hubp3_dcc_control_sienna_cichlid(hubp, dcc);
+       hubp3_program_tiling(hubp2, tiling_info, format);
+       hubp2_program_size(hubp, format, plane_size, dcc);
+       hubp2_program_rotation(hubp, rotation, horizontal_mirror);
+       hubp35_program_pixel_format(hubp, format);
+}
+
 struct hubp_funcs dcn35_hubp_funcs = {
        .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
        .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
        .hubp_program_surface_flip_and_addr = hubp3_program_surface_flip_and_addr,
-       .hubp_program_surface_config = hubp3_program_surface_config,
+       .hubp_program_surface_config = hubp35_program_surface_config,
        .hubp_is_flip_pending = hubp2_is_flip_pending,
        .hubp_setup = hubp3_setup,
        .hubp_setup_interdependent = hubp2_setup_interdependent,
index 3d830f93141e8252dcb5789b771d051618de56d2..586b43aa5834174bb46bce04961cb94af21a9368 100644 (file)
@@ -58,4 +58,18 @@ bool hubp35_construct(
 
 void hubp35_set_fgcg(struct hubp *hubp, bool enable);
 
+void hubp35_program_pixel_format(
+       struct hubp *hubp,
+       enum surface_pixel_format format);
+
+void hubp35_program_surface_config(
+       struct hubp *hubp,
+       enum surface_pixel_format format,
+       union dc_tiling_info *tiling_info,
+       struct plane_size *plane_size,
+       enum dc_rotation_angle rotation,
+       struct dc_plane_dcc_param *dcc,
+       bool horizontal_mirror,
+       unsigned int compat_level);
+
 #endif /* __DC_HUBP_DCN35_H__ */
index 0f60c40e1fc50a8117c3b3c0144ebaf4ba8d3625..46f71ff08fd176a668732db37dac8c5ee0de16a5 100644 (file)
@@ -332,6 +332,13 @@ void pg_cntl35_io_clk_pg_control(struct pg_cntl *pg_cntl, bool power_on)
        pg_cntl->pg_res_enable[PG_DCIO] = power_on;
 }
 
+void pg_cntl35_set_force_poweron_domain22(struct pg_cntl *pg_cntl, bool power_on)
+{
+       struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
+
+       REG_UPDATE(DOMAIN22_PG_CONFIG, DOMAIN_POWER_FORCEON, power_on ? 1 : 0);
+}
+
 static bool pg_cntl35_plane_otg_status(struct pg_cntl *pg_cntl)
 {
        struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
@@ -501,7 +508,8 @@ static const struct pg_cntl_funcs pg_cntl35_funcs = {
        .mpcc_pg_control = pg_cntl35_mpcc_pg_control,
        .opp_pg_control = pg_cntl35_opp_pg_control,
        .optc_pg_control = pg_cntl35_optc_pg_control,
-       .dwb_pg_control = pg_cntl35_dwb_pg_control
+       .dwb_pg_control = pg_cntl35_dwb_pg_control,
+       .set_force_poweron_domain22 = pg_cntl35_set_force_poweron_domain22
 };
 
 struct pg_cntl *pg_cntl35_create(
index 3de240884d22fa23e662cfd68413e6590d20745e..069dae08e2224b2ccfafddd1ca0a467e2a1b2a92 100644 (file)
@@ -183,6 +183,7 @@ void pg_cntl35_optc_pg_control(struct pg_cntl *pg_cntl,
        unsigned int optc_inst, bool power_on);
 void pg_cntl35_dwb_pg_control(struct pg_cntl *pg_cntl, bool power_on);
 void pg_cntl35_init_pg_status(struct pg_cntl *pg_cntl);
+void pg_cntl35_set_force_poweron_domain22(struct pg_cntl *pg_cntl, bool power_on);
 
 struct pg_cntl *pg_cntl35_create(
        struct dc_context *ctx,
index 3c7c810bab1ff79b26fffb2ed710c512f5ce302b..c7e011d26d41780262c06ce59dee7f13b3de8997 100644 (file)
@@ -610,7 +610,23 @@ static struct dce_hwseq_registers hwseq_reg;
        HWS_SF(, DMU_CLK_CNTL, LONO_FGCG_REP_DIS, mask_sh),\
        HWS_SF(, DMU_CLK_CNTL, LONO_DISPCLK_GATE_DISABLE, mask_sh),\
        HWS_SF(, DMU_CLK_CNTL, LONO_SOCCLK_GATE_DISABLE, mask_sh),\
-       HWS_SF(, DMU_CLK_CNTL, LONO_DMCUBCLK_GATE_DISABLE, mask_sh)
+       HWS_SF(, DMU_CLK_CNTL, LONO_DMCUBCLK_GATE_DISABLE, mask_sh),\
+       HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKA_FE_GATE_DISABLE, mask_sh), \
+       HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKB_FE_GATE_DISABLE, mask_sh), \
+       HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKC_FE_GATE_DISABLE, mask_sh), \
+       HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKD_FE_GATE_DISABLE, mask_sh), \
+       HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKE_FE_GATE_DISABLE, mask_sh), \
+       HWS_SF(, DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, mask_sh), \
+       HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKA_GATE_DISABLE, mask_sh), \
+       HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKB_GATE_DISABLE, mask_sh), \
+       HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKC_GATE_DISABLE, mask_sh), \
+       HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKD_GATE_DISABLE, mask_sh), \
+       HWS_SF(, DCCG_GATE_DISABLE_CNTL2, SYMCLKE_GATE_DISABLE, mask_sh), \
+       HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, mask_sh), \
+       HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_ROOT_GATE_DISABLE, mask_sh), \
+       HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_ROOT_GATE_DISABLE, mask_sh), \
+       HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_ROOT_GATE_DISABLE, mask_sh), \
+       HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_ROOT_GATE_DISABLE, mask_sh)
 
 static const struct dce_hwseq_shift hwseq_shift = {
                HWSEQ_DCN35_MASK_SH_LIST(__SHIFT)
@@ -708,7 +724,7 @@ static const struct dc_debug_options debug_defaults_drv = {
                        .i2c = true,
                        .dmcu = false, // This is previously known to cause hang on S3 cycles if enabled
                        .dscl = true,
-                       .cm = true,
+                       .cm = false,
                        .mpc = true,
                        .optc = true,
                        .vpg = true,
@@ -719,14 +735,14 @@ static const struct dc_debug_options debug_defaults_drv = {
                .bits = {
                        .dpp = true,
                        .dsc = true,/*dscclk and dsc pg*/
-                       .hdmistream = false,
-                       .hdmichar = false,
-                       .dpstream = false,
-                       .symclk32_se = false,
-                       .symclk32_le = false,
-                       .symclk_fe = false,
-                       .physymclk = false,
-                       .dpiasymclk = false,
+                       .hdmistream = true,
+                       .hdmichar = true,
+                       .dpstream = true,
+                       .symclk32_se = true,
+                       .symclk32_le = true,
+                       .symclk_fe = true,
+                       .physymclk = true,
+                       .dpiasymclk = true,
                }
        },
        .seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT,
@@ -741,7 +757,6 @@ static const struct dc_debug_options debug_defaults_drv = {
        .disable_boot_optimizations = false,
        .disable_unbounded_requesting = false,
        .disable_mem_low_power = false,
-       .enable_hpo_pg_support = false,
        //must match enable_single_display_2to1_odm_policy to support dynamic ODM transitions
        .enable_double_buffered_dsc_pg_support = true,
        .enable_dp_dig_pixel_rate_div_policy = 1,
index ad741a723c0e8a697a5fd1ad98d3d38d9eddbf58..3686f1e7de3abf659e2e60f117d7992d1da95915 100644 (file)
@@ -5128,7 +5128,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                        ViewportExceedsSurface = true;
 
                if (v->SourcePixelFormat[k] != dm_444_64 && v->SourcePixelFormat[k] != dm_444_32 && v->SourcePixelFormat[k] != dm_444_16
-                               && v->SourcePixelFormat[k] != dm_444_16 && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) {
+                               && v->SourcePixelFormat[k] != dm_444_8 && v->SourcePixelFormat[k] != dm_rgbe) {
                        if (v->ViewportWidthChroma[k] > v->SurfaceWidthC[k] || v->ViewportHeightChroma[k] > v->SurfaceHeightC[k]) {
                                ViewportExceedsSurface = true;
                        }
index 70ae5eba624e5aa8c7405077ce1e267f97cb544a..acff3449b8d78754b004a15f9b1c7351a33612d2 100644 (file)
@@ -60,8 +60,12 @@ endif
 endif
 
 ifneq ($(CONFIG_FRAME_WARN),0)
+ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
+frame_warn_flag := -Wframe-larger-than=3072
+else
 frame_warn_flag := -Wframe-larger-than=2048
 endif
+endif
 
 CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_core.o := $(dml2_ccflags) $(frame_warn_flag)
 CFLAGS_$(AMDDALPATH)/dc/dml2/display_mode_util.o := $(dml2_ccflags)
index d2046e770c507f3047cdb91244cdbcd483399cf3..1a2b24cc6b61d5ebaa621a0c3656066bdb7c8f25 100644 (file)
@@ -55,10 +55,11 @@ struct dc_pipe_mapping_scratch {
        struct dc_plane_pipe_pool pipe_pool;
 };
 
-static bool get_plane_id(const struct dc_state *state, const struct dc_plane_state *plane,
-       unsigned int stream_id, unsigned int *plane_id)
+static bool get_plane_id(struct dml2_context *dml2, const struct dc_state *state, const struct dc_plane_state *plane,
+       unsigned int stream_id, unsigned int plane_index, unsigned int *plane_id)
 {
        int i, j;
+       bool is_plane_duplicate = dml2->v20.scratch.plane_duplicate_exists;
 
        if (!plane_id)
                return false;
@@ -66,7 +67,8 @@ static bool get_plane_id(const struct dc_state *state, const struct dc_plane_sta
        for (i = 0; i < state->stream_count; i++) {
                if (state->streams[i]->stream_id == stream_id) {
                        for (j = 0; j < state->stream_status[i].plane_count; j++) {
-                               if (state->stream_status[i].plane_states[j] == plane) {
+                               if (state->stream_status[i].plane_states[j] == plane &&
+                                       (!is_plane_duplicate || (is_plane_duplicate && (j == plane_index)))) {
                                        *plane_id = (i << 16) | j;
                                        return true;
                                }
@@ -123,8 +125,9 @@ static struct pipe_ctx *find_master_pipe_of_plane(struct dml2_context *ctx,
        unsigned int plane_id_assigned_to_pipe;
 
        for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
-               if (state->res_ctx.pipe_ctx[i].plane_state && get_plane_id(state, state->res_ctx.pipe_ctx[i].plane_state,
-                       state->res_ctx.pipe_ctx[i].stream->stream_id, &plane_id_assigned_to_pipe)) {
+               if (state->res_ctx.pipe_ctx[i].plane_state && get_plane_id(ctx, state, state->res_ctx.pipe_ctx[i].plane_state,
+                       state->res_ctx.pipe_ctx[i].stream->stream_id,
+                       ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[state->res_ctx.pipe_ctx[i].pipe_idx], &plane_id_assigned_to_pipe)) {
                        if (plane_id_assigned_to_pipe == plane_id)
                                return &state->res_ctx.pipe_ctx[i];
                }
@@ -141,8 +144,9 @@ static unsigned int find_pipes_assigned_to_plane(struct dml2_context *ctx,
        unsigned int plane_id_assigned_to_pipe;
 
        for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
-               if (state->res_ctx.pipe_ctx[i].plane_state && get_plane_id(state, state->res_ctx.pipe_ctx[i].plane_state,
-                       state->res_ctx.pipe_ctx[i].stream->stream_id, &plane_id_assigned_to_pipe)) {
+               if (state->res_ctx.pipe_ctx[i].plane_state && get_plane_id(ctx, state, state->res_ctx.pipe_ctx[i].plane_state,
+                       state->res_ctx.pipe_ctx[i].stream->stream_id,
+                       ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[state->res_ctx.pipe_ctx[i].pipe_idx], &plane_id_assigned_to_pipe)) {
                        if (plane_id_assigned_to_pipe == plane_id)
                                pipes[num_found++] = i;
                }
@@ -609,6 +613,7 @@ static struct pipe_ctx *assign_pipes_to_plane(struct dml2_context *ctx, struct d
                const struct dc_plane_state *plane,
                int odm_factor,
                int mpc_factor,
+               int plane_index,
                struct dc_plane_pipe_pool *pipe_pool,
                const struct dc_state *existing_state)
 {
@@ -620,7 +625,7 @@ static struct pipe_ctx *assign_pipes_to_plane(struct dml2_context *ctx, struct d
        unsigned int next_pipe_to_assign;
        int odm_slice, mpc_slice;
 
-       if (!get_plane_id(state, plane, stream->stream_id, &plane_id)) {
+       if (!get_plane_id(ctx, state, plane, stream->stream_id, plane_index, &plane_id)) {
                ASSERT(false);
                return master_pipe;
        }
@@ -667,12 +672,16 @@ static void free_pipe(struct pipe_ctx *pipe)
 }
 
 static void free_unused_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state,
-       const struct dc_plane_state *plane, const struct dc_plane_pipe_pool *pool, unsigned int stream_id)
+       const struct dc_plane_state *plane, const struct dc_plane_pipe_pool *pool, unsigned int stream_id, int plane_index)
 {
        int i;
+       bool is_plane_duplicate = ctx->v20.scratch.plane_duplicate_exists;
+
        for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
                if (state->res_ctx.pipe_ctx[i].plane_state == plane &&
                        state->res_ctx.pipe_ctx[i].stream->stream_id == stream_id &&
+                       (!is_plane_duplicate || (is_plane_duplicate &&
+                       ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[state->res_ctx.pipe_ctx[i].pipe_idx] == plane_index)) &&
                        !is_pipe_used(pool, state->res_ctx.pipe_ctx[i].pipe_idx)) {
                        free_pipe(&state->res_ctx.pipe_ctx[i]);
                }
@@ -717,19 +726,20 @@ static void map_pipes_for_stream(struct dml2_context *ctx, struct dc_state *stat
 }
 
 static void map_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state, const struct dc_stream_state *stream, const struct dc_plane_state *plane,
-               struct dc_pipe_mapping_scratch *scratch, const struct dc_state *existing_state)
+               int plane_index, struct dc_pipe_mapping_scratch *scratch, const struct dc_state *existing_state)
 {
        int odm_slice_index;
        unsigned int plane_id;
        struct pipe_ctx *master_pipe = NULL;
        int i;
 
-       if (!get_plane_id(state, plane, stream->stream_id, &plane_id)) {
+       if (!get_plane_id(ctx, state, plane, stream->stream_id, plane_index, &plane_id)) {
                ASSERT(false);
                return;
        }
 
-       master_pipe = assign_pipes_to_plane(ctx, state, stream, plane, scratch->odm_info.odm_factor, scratch->mpc_info.mpc_factor, &scratch->pipe_pool, existing_state);
+       master_pipe = assign_pipes_to_plane(ctx, state, stream, plane, scratch->odm_info.odm_factor,
+                       scratch->mpc_info.mpc_factor, plane_index, &scratch->pipe_pool, existing_state);
        sort_pipes_for_splitting(&scratch->pipe_pool);
 
        for (odm_slice_index = 0; odm_slice_index < scratch->odm_info.odm_factor; odm_slice_index++) {
@@ -755,7 +765,7 @@ static void map_pipes_for_plane(struct dml2_context *ctx, struct dc_state *state
                }
        }
 
-       free_unused_pipes_for_plane(ctx, state, plane, &scratch->pipe_pool, stream->stream_id);
+       free_unused_pipes_for_plane(ctx, state, plane, &scratch->pipe_pool, stream->stream_id, plane_index);
 }
 
 static unsigned int get_mpc_factor(struct dml2_context *ctx,
@@ -768,7 +778,7 @@ static unsigned int get_mpc_factor(struct dml2_context *ctx,
        unsigned int plane_id;
        unsigned int cfg_idx;
 
-       get_plane_id(state, status->plane_states[plane_idx], stream_id, &plane_id);
+       get_plane_id(ctx, state, status->plane_states[plane_idx], stream_id, plane_idx, &plane_id);
        cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id);
        if (ctx->architecture == dml2_architecture_20)
                return (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx];
@@ -911,26 +921,14 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s
        unsigned int stream_id;
 
        const unsigned int *ODMMode, *DPPPerSurface;
-       unsigned int odm_mode_array[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0}, dpp_per_surface_array[__DML2_WRAPPER_MAX_STREAMS_PLANES__] = {0};
        struct dc_pipe_mapping_scratch scratch;
 
        if (ctx->config.map_dc_pipes_with_callbacks)
                return map_dc_pipes_with_callbacks(
                                ctx, state, disp_cfg, mapping, existing_state);
 
-       if (ctx->architecture == dml2_architecture_21) {
-               /*
-                * Extract ODM and DPP outputs from DML2.1 and map them in an array as required for pipe mapping in dml2_map_dc_pipes.
-                * As data cannot be directly extracted in const pointers, assign these arrays to const pointers before proceeding to
-                * maximize the reuse of existing code. Const pointers are required because dml2.0 dml_display_cfg_st is const.
-                *
-                */
-               ODMMode = (const unsigned int *)odm_mode_array;
-               DPPPerSurface = (const unsigned int *)dpp_per_surface_array;
-       } else {
-               ODMMode = (unsigned int *)disp_cfg->hw.ODMMode;
-               DPPPerSurface = disp_cfg->hw.DPPPerSurface;
-       }
+       ODMMode = (unsigned int *)disp_cfg->hw.ODMMode;
+       DPPPerSurface = disp_cfg->hw.DPPPerSurface;
 
        for (stream_index = 0; stream_index < state->stream_count; stream_index++) {
                memset(&scratch, 0, sizeof(struct dc_pipe_mapping_scratch));
@@ -958,8 +956,8 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s
 
                for (plane_index = 0; plane_index < state->stream_status[stream_index].plane_count; plane_index++) {
                        // Planes are ordered top to bottom.
-                       if (get_plane_id(state, state->stream_status[stream_index].plane_states[plane_index],
-                               stream_id, &plane_id)) {
+                       if (get_plane_id(ctx, state, state->stream_status[stream_index].plane_states[plane_index],
+                               stream_id, plane_index, &plane_id)) {
                                plane_disp_cfg_index = find_disp_cfg_idx_by_plane_id(mapping, plane_id);
 
                                // Setup mpc_info for this plane
@@ -983,7 +981,8 @@ bool dml2_map_dc_pipes(struct dml2_context *ctx, struct dc_state *state, const s
                                // Clear the pool assignment scratch (which is per plane)
                                memset(&scratch.pipe_pool, 0, sizeof(struct dc_plane_pipe_pool));
 
-                               map_pipes_for_plane(ctx, state, state->streams[stream_index], state->stream_status[stream_index].plane_states[plane_index], &scratch, existing_state);
+                               map_pipes_for_plane(ctx, state, state->streams[stream_index],
+                                       state->stream_status[stream_index].plane_states[plane_index], plane_index, &scratch, existing_state);
                        } else {
                                // Plane ID cannot be generated, therefore no DML mapping can be performed.
                                ASSERT(false);
index ed5b767d46e030cd2a427e79a10c46094b022c4d..1cf8a884c0fbefaac8f7067d84196fc9cd0689b8 100644 (file)
@@ -75,6 +75,8 @@ struct dml2_dml_to_dc_pipe_mapping {
        bool dml_pipe_idx_to_stream_id_valid[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
        unsigned int dml_pipe_idx_to_plane_id[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
        bool dml_pipe_idx_to_plane_id_valid[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
+       unsigned int dml_pipe_idx_to_plane_index[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
+       bool dml_pipe_idx_to_plane_index_valid[__DML2_WRAPPER_MAX_STREAMS_PLANES__];
 };
 
 struct dml2_wrapper_scratch {
@@ -96,6 +98,7 @@ struct dml2_wrapper_scratch {
 
        struct dml2_dml_to_dc_pipe_mapping dml_to_dc_pipe_mapping;
        bool enable_flexible_pipe_mapping;
+       bool plane_duplicate_exists;
 };
 
 struct dml2_helper_det_policy_scratch {
@@ -104,7 +107,6 @@ struct dml2_helper_det_policy_scratch {
 
 enum dml2_architecture {
        dml2_architecture_20,
-       dml2_architecture_21
 };
 
 struct dml2_context {
index 89836f175a138e924253adedd5e13d602b004e21..75171bee6f7164a9a93b9e2e8e4c2a34690ffddc 100644 (file)
@@ -231,6 +231,7 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s
                out->num_chans = 4;
                out->round_trip_ping_latency_dcfclk_cycles = 106;
                out->smn_latency_us = 2;
+               out->dispclk_dppclk_vco_speed_mhz = 3600;
                break;
 
        case dml_project_dcn351:
@@ -930,10 +931,11 @@ static unsigned int map_stream_to_dml_display_cfg(const struct dml2_context *dml
        return location;
 }
 
-static bool get_plane_id(const struct dc_state *context, const struct dc_plane_state *plane,
-               unsigned int stream_id, unsigned int *plane_id)
+static bool get_plane_id(struct dml2_context *dml2, const struct dc_state *context, const struct dc_plane_state *plane,
+               unsigned int stream_id, unsigned int plane_index, unsigned int *plane_id)
 {
        int i, j;
+       bool is_plane_duplicate = dml2->v20.scratch.plane_duplicate_exists;
 
        if (!plane_id)
                return false;
@@ -941,7 +943,8 @@ static bool get_plane_id(const struct dc_state *context, const struct dc_plane_s
        for (i = 0; i < context->stream_count; i++) {
                if (context->streams[i]->stream_id == stream_id) {
                        for (j = 0; j < context->stream_status[i].plane_count; j++) {
-                               if (context->stream_status[i].plane_states[j] == plane) {
+                               if (context->stream_status[i].plane_states[j] == plane &&
+                                       (!is_plane_duplicate || (is_plane_duplicate && (j == plane_index)))) {
                                        *plane_id = (i << 16) | j;
                                        return true;
                                }
@@ -953,13 +956,13 @@ static bool get_plane_id(const struct dc_state *context, const struct dc_plane_s
 }
 
 static unsigned int map_plane_to_dml_display_cfg(const struct dml2_context *dml2, const struct dc_plane_state *plane,
-               const struct dc_state *context, const struct dml_display_cfg_st *dml_dispcfg, unsigned int stream_id)
+               const struct dc_state *context, const struct dml_display_cfg_st *dml_dispcfg, unsigned int stream_id, int plane_index)
 {
        unsigned int plane_id;
        int i = 0;
        int location = -1;
 
-       if (!get_plane_id(context, plane, stream_id, &plane_id)) {
+       if (!get_plane_id(context->bw_ctx.dml2, context, plane, stream_id, plane_index, &plane_id)) {
                ASSERT(false);
                return -1;
        }
@@ -990,7 +993,41 @@ static void apply_legacy_svp_drr_settings(struct dml2_context *dml2, const struc
        }
 }
 
-void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, const struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg)
+static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2, struct dc_state *state)
+{
+       unsigned int i;
+       unsigned int pipe_index = 0;
+       unsigned int plane_index = 0;
+       struct dml2_dml_to_dc_pipe_mapping *dml_to_dc_pipe_mapping = &dml2->v20.scratch.dml_to_dc_pipe_mapping;
+
+       for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) {
+               dml_to_dc_pipe_mapping->dml_pipe_idx_to_plane_index_valid[i] = false;
+               dml_to_dc_pipe_mapping->dml_pipe_idx_to_plane_index[i] = 0;
+       }
+
+       for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) {
+               struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
+
+               if (!pipe || !pipe->stream || !pipe->plane_state)
+                       continue;
+
+               while (pipe) {
+                       pipe_index = pipe->pipe_idx;
+
+                       if (pipe->stream && dml_to_dc_pipe_mapping->dml_pipe_idx_to_plane_index_valid[pipe_index] == false) {
+                               dml_to_dc_pipe_mapping->dml_pipe_idx_to_plane_index[pipe_index] = plane_index;
+                               plane_index++;
+                               dml_to_dc_pipe_mapping->dml_pipe_idx_to_plane_index_valid[pipe_index] = true;
+                       }
+
+                       pipe = pipe->bottom_pipe;
+               }
+
+               plane_index = 0;
+       }
+}
+
+void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg)
 {
        int i = 0, j = 0;
        int disp_cfg_stream_location, disp_cfg_plane_location;
@@ -1007,6 +1044,8 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, const struct d
        dml_dispcfg->plane.GPUVMMaxPageTableLevels = 4;
        dml_dispcfg->plane.HostVMEnable = false;
 
+       dml2_populate_pipe_to_plane_index_mapping(dml2, context);
+
        for (i = 0; i < context->stream_count; i++) {
                disp_cfg_stream_location = map_stream_to_dml_display_cfg(dml2, context->streams[i], dml_dispcfg);
 
@@ -1043,7 +1082,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, const struct d
                } else {
                        for (j = 0; j < context->stream_status[i].plane_count; j++) {
                                disp_cfg_plane_location = map_plane_to_dml_display_cfg(dml2,
-                                       context->stream_status[i].plane_states[j], context, dml_dispcfg, context->streams[i]->stream_id);
+                                       context->stream_status[i].plane_states[j], context, dml_dispcfg, context->streams[i]->stream_id, j);
 
                                if (disp_cfg_plane_location < 0)
                                        disp_cfg_plane_location = dml_dispcfg->num_surfaces++;
@@ -1067,7 +1106,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, const struct d
 
                                dml_dispcfg->plane.BlendingAndTiming[disp_cfg_plane_location] = disp_cfg_stream_location;
 
-                               if (get_plane_id(context, context->stream_status[i].plane_states[j], context->streams[i]->stream_id,
+                               if (get_plane_id(dml2, context, context->stream_status[i].plane_states[j], context->streams[i]->stream_id, j,
                                        &dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id[disp_cfg_plane_location]))
                                        dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_plane_id_valid[disp_cfg_plane_location] = true;
 
index dac6d27b14cde688b2ed1d2aed970b5b9dd352b6..d764773938f4ef46b331e9b24bd5d6644241bd2b 100644 (file)
@@ -34,7 +34,7 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
 void dml2_translate_ip_params(const struct dc *in_dc, struct ip_params_st *out);
 void dml2_translate_socbb_params(const struct dc *in_dc, struct soc_bounding_box_st *out);
 void dml2_translate_soc_states(const struct dc *in_dc, struct soc_states_st *out, int num_states);
-void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, const struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg);
+void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg);
 void dml2_update_pipe_ctx_dchub_regs(struct _vcs_dpi_dml_display_rq_regs_st *rq_regs, struct _vcs_dpi_dml_display_dlg_regs_st *disp_dlg_regs, struct _vcs_dpi_dml_display_ttu_regs_st *disp_ttu_regs, struct pipe_ctx *out);
 bool is_dp2p0_output_encoder(const struct pipe_ctx *pipe);
 
index 69fd96f4f3b03799c3010dcdd4860d11fc84384f..2498b8341199bac8ef76c51cace4028450594f9e 100644 (file)
@@ -209,10 +209,11 @@ static int find_dml_pipe_idx_by_plane_id(struct dml2_context *ctx, unsigned int
        return -1;
 }
 
-static bool get_plane_id(const struct dc_state *state, const struct dc_plane_state *plane,
-       unsigned int stream_id, unsigned int *plane_id)
+static bool get_plane_id(struct dml2_context *dml2, const struct dc_state *state, const struct dc_plane_state *plane,
+       unsigned int stream_id, unsigned int plane_index, unsigned int *plane_id)
 {
        int i, j;
+       bool is_plane_duplicate = dml2->v20.scratch.plane_duplicate_exists;
 
        if (!plane_id)
                return false;
@@ -220,7 +221,8 @@ static bool get_plane_id(const struct dc_state *state, const struct dc_plane_sta
        for (i = 0; i < state->stream_count; i++) {
                if (state->streams[i]->stream_id == stream_id) {
                        for (j = 0; j < state->stream_status[i].plane_count; j++) {
-                               if (state->stream_status[i].plane_states[j] == plane) {
+                               if (state->stream_status[i].plane_states[j] == plane &&
+                                       (!is_plane_duplicate || (is_plane_duplicate && (j == plane_index)))) {
                                        *plane_id = (i << 16) | j;
                                        return true;
                                }
@@ -304,8 +306,9 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont
                 * there is a need to know which DML pipe index maps to which DC pipe. The code below
                 * finds a dml_pipe_index from the plane id if a plane is valid. If a plane is not valid then
                 * it finds a dml_pipe_index from the stream id. */
-               if (get_plane_id(context, context->res_ctx.pipe_ctx[dc_pipe_ctx_index].plane_state,
-                       context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id, &plane_id)) {
+               if (get_plane_id(in_ctx, context, context->res_ctx.pipe_ctx[dc_pipe_ctx_index].plane_state,
+                       context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id,
+                       in_ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[context->res_ctx.pipe_ctx[dc_pipe_ctx_index].pipe_idx], &plane_id)) {
                        dml_pipe_idx = find_dml_pipe_idx_by_plane_id(in_ctx, plane_id);
                } else {
                        dml_pipe_idx = dml2_helper_find_dml_pipe_idx_by_stream_id(in_ctx, context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->stream_id);
@@ -445,8 +448,9 @@ bool dml2_verify_det_buffer_configuration(struct dml2_context *in_ctx, struct dc
        for (i = 0; i < MAX_PIPES; i++) {
                if (!display_state->res_ctx.pipe_ctx[i].stream)
                        continue;
-               if (get_plane_id(display_state, display_state->res_ctx.pipe_ctx[i].plane_state,
-                       display_state->res_ctx.pipe_ctx[i].stream->stream_id, &plane_id))
+               if (get_plane_id(in_ctx, display_state, display_state->res_ctx.pipe_ctx[i].plane_state,
+                       display_state->res_ctx.pipe_ctx[i].stream->stream_id,
+                       in_ctx->v20.scratch.dml_to_dc_pipe_mapping.dml_pipe_idx_to_plane_index[display_state->res_ctx.pipe_ctx[i].pipe_idx], &plane_id))
                        dml_pipe_idx = find_dml_pipe_idx_by_plane_id(in_ctx, plane_id);
                else
                        dml_pipe_idx = dml2_helper_find_dml_pipe_idx_by_stream_id(in_ctx, display_state->res_ctx.pipe_ctx[i].stream->stream_id);
index 0a06bf3b135aa685dffdd75e2ad94b245e067399..8f231418870f2afbad672878d005088771335271 100644 (file)
@@ -639,7 +639,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
        return result;
 }
 
-static bool dml2_validate_only(const struct dc_state *context)
+static bool dml2_validate_only(struct dc_state *context)
 {
        struct dml2_context *dml2 = context->bw_ctx.dml2;
        unsigned int result = 0;
index 3966845c769453888675b80c32ede95c0b62fa5f..e8b5f17beb9636ca95ced0f4cd05631b02f42e3a 100644 (file)
@@ -512,6 +512,11 @@ static bool intersect_dsc_caps(
                dsc_sink_caps->slice_caps1.bits.NUM_SLICES_4 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_4;
        dsc_common_caps->slice_caps.bits.NUM_SLICES_8 =
                dsc_sink_caps->slice_caps1.bits.NUM_SLICES_8 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_8;
+       dsc_common_caps->slice_caps.bits.NUM_SLICES_12 =
+               dsc_sink_caps->slice_caps1.bits.NUM_SLICES_12 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_12;
+       dsc_common_caps->slice_caps.bits.NUM_SLICES_16 =
+               dsc_sink_caps->slice_caps2.bits.NUM_SLICES_16 && dsc_enc_caps->slice_caps.bits.NUM_SLICES_16;
+
        if (!dsc_common_caps->slice_caps.raw)
                return false;
 
@@ -703,6 +708,12 @@ static int get_available_dsc_slices(union dsc_enc_slice_caps slice_caps, int *av
        if (slice_caps.bits.NUM_SLICES_8)
                available_slices[idx++] = 8;
 
+       if (slice_caps.bits.NUM_SLICES_12)
+               available_slices[idx++] = 12;
+
+       if (slice_caps.bits.NUM_SLICES_16)
+               available_slices[idx++] = 16;
+
        return idx;
 }
 
index 2fefdf40612da817f5615c2afbd9f26402490ce3..44b4df6469d1aa7a9e2be6af7e43eb5610b62852 100644 (file)
@@ -1183,7 +1183,23 @@ struct dce_hwseq_registers {
        type LONO_FGCG_REP_DIS;\
        type LONO_DISPCLK_GATE_DISABLE;\
        type LONO_SOCCLK_GATE_DISABLE;\
-       type LONO_DMCUBCLK_GATE_DISABLE;
+       type LONO_DMCUBCLK_GATE_DISABLE;\
+       type SYMCLKA_FE_GATE_DISABLE;\
+       type SYMCLKB_FE_GATE_DISABLE;\
+       type SYMCLKC_FE_GATE_DISABLE;\
+       type SYMCLKD_FE_GATE_DISABLE;\
+       type SYMCLKE_FE_GATE_DISABLE;\
+       type HDMICHARCLK0_GATE_DISABLE;\
+       type SYMCLKA_GATE_DISABLE;\
+       type SYMCLKB_GATE_DISABLE;\
+       type SYMCLKC_GATE_DISABLE;\
+       type SYMCLKD_GATE_DISABLE;\
+       type SYMCLKE_GATE_DISABLE;\
+       type PHYASYMCLK_ROOT_GATE_DISABLE;\
+       type PHYBSYMCLK_ROOT_GATE_DISABLE;\
+       type PHYCSYMCLK_ROOT_GATE_DISABLE;\
+       type PHYDSYMCLK_ROOT_GATE_DISABLE;\
+       type PHYESYMCLK_ROOT_GATE_DISABLE;
 
 struct dce_hwseq_shift {
        HWSEQ_REG_FIELD_LIST(uint8_t)
index 1b9f21fd4f1732989bc8b63e43cda974c254ff5a..6a65af8c36b904fdb1cb3308a149174925c3b404 100644 (file)
@@ -615,12 +615,6 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context)
                    pipe->stream->fpo_in_use)) {
                        if (hubp && hubp->funcs->hubp_update_force_pstate_disallow)
                                hubp->funcs->hubp_update_force_pstate_disallow(hubp, false);
-               }
-
-               /* Today only FPO uses cursor P-State force. Only clear cursor P-State force
-                * if it's not FPO.
-                */
-               if (!pipe->stream || !pipe->stream->fpo_in_use) {
                        if (hubp && hubp->funcs->hubp_update_force_cursor_pstate_disallow)
                                hubp->funcs->hubp_update_force_cursor_pstate_disallow(hubp, false);
                }
@@ -632,17 +626,10 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context)
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
                struct hubp *hubp = pipe->plane_res.hubp;
 
-               if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+               if (pipe->stream && (pipe->stream->mall_stream_config.type == SUBVP_MAIN ||
+                               pipe->stream->fpo_in_use)) {
                        if (hubp && hubp->funcs->hubp_update_force_pstate_disallow)
                                hubp->funcs->hubp_update_force_pstate_disallow(hubp, true);
-               }
-
-               if (pipe->stream && pipe->stream->fpo_in_use) {
-                       if (hubp && hubp->funcs->hubp_update_force_pstate_disallow)
-                               hubp->funcs->hubp_update_force_pstate_disallow(hubp, true);
-                       /* For now only force cursor p-state disallow for FPO
-                        * Needs to be added for subvp once FW side gets updated
-                        */
                        if (hubp && hubp->funcs->hubp_update_force_cursor_pstate_disallow)
                                hubp->funcs->hubp_update_force_cursor_pstate_disallow(hubp, true);
                }
index 34737d60b965b81671c1fc2b5e1b4bda3b854c49..5a8258287438e9fe27024173c55c0dd345f4ee9f 100644 (file)
@@ -138,16 +138,25 @@ void dcn35_init_hw(struct dc *dc)
        if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
                dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
 
-       REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
-       REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0x3F000000);
-       REG_WRITE(DCCG_GATE_DISABLE_CNTL5, 0x1f7c3fcf);
-
        //dcn35_set_dmu_fgcg(hws, dc->debug.enable_fine_grain_clock_gating.bits.dmu);
 
        if (!dcb->funcs->is_accelerated_mode(dcb)) {
                /*this calls into dmubfw to do the init*/
                hws->funcs.bios_golden_init(dc);
        }
+
+       REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
+       REG_WRITE(DCCG_GATE_DISABLE_CNTL2,  0);
+
+       /* Disable gating for PHYASYMCLK. This will be enabled in dccg if needed */
+       REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, 1,
+                       PHYBSYMCLK_ROOT_GATE_DISABLE, 1,
+                       PHYCSYMCLK_ROOT_GATE_DISABLE, 1,
+                       PHYDSYMCLK_ROOT_GATE_DISABLE, 1,
+                       PHYESYMCLK_ROOT_GATE_DISABLE, 1);
+
+       REG_WRITE(DCCG_GATE_DISABLE_CNTL5, 0x1f7c3fcf);
+
        // Initialize the dccg
        if (res_pool->dccg->funcs->dccg_init)
                res_pool->dccg->funcs->dccg_init(res_pool->dccg);
@@ -274,7 +283,19 @@ void dcn35_init_hw(struct dc *dc)
        if (!dc->debug.disable_clock_gate) {
                /* enable all DCN clock gating */
                REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
-               REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
+
+               REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, SYMCLKA_FE_GATE_DISABLE, 0,
+                               SYMCLKB_FE_GATE_DISABLE, 0,
+                               SYMCLKC_FE_GATE_DISABLE, 0,
+                               SYMCLKD_FE_GATE_DISABLE, 0,
+                               SYMCLKE_FE_GATE_DISABLE, 0);
+               REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, 0);
+               REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, SYMCLKA_GATE_DISABLE, 0,
+                               SYMCLKB_GATE_DISABLE, 0,
+                               SYMCLKC_GATE_DISABLE, 0,
+                               SYMCLKD_GATE_DISABLE, 0,
+                               SYMCLKE_GATE_DISABLE, 0);
+
                REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
        }
 
@@ -311,6 +332,9 @@ void dcn35_init_hw(struct dc *dc)
        if (dc->res_pool->pg_cntl) {
                if (dc->res_pool->pg_cntl->funcs->init_pg_status)
                        dc->res_pool->pg_cntl->funcs->init_pg_status(dc->res_pool->pg_cntl);
+
+               if (dc->res_pool->pg_cntl->funcs->set_force_poweron_domain22)
+                       dc->res_pool->pg_cntl->funcs->set_force_poweron_domain22(dc->res_pool->pg_cntl, false);
        }
 }
 
index 13f12f2a3f81338dd354e4fe94958f379b2382e1..ce2f0c0e82bd65c67af0b725fd3ed26fbcd369ec 100644 (file)
@@ -141,6 +141,11 @@ struct dccg_funcs {
                        enum physymclk_clock_source clk_src,
                        bool force_enable);
 
+       void (*set_physymclk_root_clock_gating)(
+                       struct dccg *dccg,
+                       int phy_inst,
+                       bool enable);
+
        void (*set_dtbclk_dto)(
                        struct dccg *dccg,
                        const struct dtbclk_dto_params *params);
index d7b8d586b5237e9a8ecb67c1cbf9254a2f38c1d3..4b27f29d0d80d9666c4c4af350f4d5ddf5b88fea 100644 (file)
@@ -76,6 +76,8 @@ union dsc_enc_slice_caps {
                uint8_t NUM_SLICES_3 : 1; /* This one is not per DSC spec, but our encoder supports it */
                uint8_t NUM_SLICES_4 : 1;
                uint8_t NUM_SLICES_8 : 1;
+               uint8_t NUM_SLICES_12 : 1;
+               uint8_t NUM_SLICES_16 : 1;
        } bits;
        uint8_t raw;
 };
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/optc.h
new file mode 100644 (file)
index 0000000..9a8bf6e
--- /dev/null
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+/**
+ * DOC: overview
+ *
+ * Output Pipe Timing Combiner (OPTC) includes two major functional blocks:
+ * Output Data Mapper (ODM) and Output Timing Generator (OTG).
+ *
+ * - ODM: It is Output Data Mapping block. It can combine input data from
+ *   multiple OPP data pipes into one single data stream or split data from one
+ *   OPP data pipe into multiple data streams or just bypass OPP data to DIO.
+ * - OTG: It is Output Timing Generator. It generates display timing signals to
+ *   drive the display output.
+ */
+
+#ifndef __DC_OPTC_H__
+#define __DC_OPTC_H__
+
+#include "timing_generator.h"
+
+struct optc {
+       struct timing_generator base;
+
+       const struct dcn_optc_registers *tg_regs;
+       const struct dcn_optc_shift *tg_shift;
+       const struct dcn_optc_mask *tg_mask;
+
+       int opp_count;
+
+       uint32_t max_h_total;
+       uint32_t max_v_total;
+
+       uint32_t min_h_blank;
+
+       uint32_t min_h_sync_width;
+       uint32_t min_v_sync_width;
+       uint32_t min_v_blank;
+       uint32_t min_v_blank_interlace;
+
+       int vstartup_start;
+       int vupdate_offset;
+       int vupdate_width;
+       int vready_offset;
+       struct dc_crtc_timing orginal_patched_timing;
+       enum signal_type signal;
+};
+
+struct dcn_otg_state {
+       uint32_t v_blank_start;
+       uint32_t v_blank_end;
+       uint32_t v_sync_a_pol;
+       uint32_t v_total;
+       uint32_t v_total_max;
+       uint32_t v_total_min;
+       uint32_t v_total_min_sel;
+       uint32_t v_total_max_sel;
+       uint32_t v_sync_a_start;
+       uint32_t v_sync_a_end;
+       uint32_t h_blank_start;
+       uint32_t h_blank_end;
+       uint32_t h_sync_a_start;
+       uint32_t h_sync_a_end;
+       uint32_t h_sync_a_pol;
+       uint32_t h_total;
+       uint32_t underflow_occurred_status;
+       uint32_t otg_enabled;
+       uint32_t blank_enabled;
+       uint32_t vertical_interrupt1_en;
+       uint32_t vertical_interrupt1_line;
+       uint32_t vertical_interrupt2_en;
+       uint32_t vertical_interrupt2_line;
+};
+
+void optc1_read_otg_state(struct optc *optc1, struct dcn_otg_state *s);
+
+bool optc1_get_hw_timing(struct timing_generator *tg, struct dc_crtc_timing *hw_crtc_timing);
+
+bool optc1_validate_timing(struct timing_generator *optc,
+                          const struct dc_crtc_timing *timing);
+
+void optc1_program_timing(struct timing_generator *optc,
+                         const struct dc_crtc_timing *dc_crtc_timing,
+                         int vready_offset,
+                         int vstartup_start,
+                         int vupdate_offset,
+                         int vupdate_width,
+                         const enum signal_type signal,
+                         bool use_vbios);
+
+void optc1_setup_vertical_interrupt0(struct timing_generator *optc,
+                                    uint32_t start_line,
+                                    uint32_t end_line);
+
+void optc1_setup_vertical_interrupt1(struct timing_generator *optc,
+                                    uint32_t start_line);
+
+void optc1_setup_vertical_interrupt2(struct timing_generator *optc,
+                                    uint32_t start_line);
+
+void optc1_program_global_sync(struct timing_generator *optc,
+                              int vready_offset,
+                              int vstartup_start,
+                              int vupdate_offset,
+                              int vupdate_width);
+
+bool optc1_disable_crtc(struct timing_generator *optc);
+
+bool optc1_is_counter_moving(struct timing_generator *optc);
+
+void optc1_get_position(struct timing_generator *optc,
+                       struct crtc_position *position);
+
+uint32_t optc1_get_vblank_counter(struct timing_generator *optc);
+
+void optc1_get_crtc_scanoutpos(struct timing_generator *optc,
+                              uint32_t *v_blank_start,
+                              uint32_t *v_blank_end,
+                              uint32_t *h_position,
+                              uint32_t *v_position);
+
+void optc1_set_early_control(struct timing_generator *optc,
+                            uint32_t early_cntl);
+
+void optc1_wait_for_state(struct timing_generator *optc,
+                         enum crtc_state state);
+
+void optc1_set_blank(struct timing_generator *optc,
+                    bool enable_blanking);
+
+bool optc1_is_blanked(struct timing_generator *optc);
+
+void optc1_program_blank_color(struct timing_generator *optc,
+                              const struct tg_color *black_color);
+
+bool optc1_did_triggered_reset_occur(struct timing_generator *optc);
+
+void optc1_enable_reset_trigger(struct timing_generator *optc, int source_tg_inst);
+
+void optc1_disable_reset_trigger(struct timing_generator *optc);
+
+void optc1_lock(struct timing_generator *optc);
+
+void optc1_unlock(struct timing_generator *optc);
+
+void optc1_enable_optc_clock(struct timing_generator *optc, bool enable);
+
+void optc1_set_drr(struct timing_generator *optc,
+                  const struct drr_params *params);
+
+void optc1_set_vtotal_min_max(struct timing_generator *optc, int vtotal_min, int vtotal_max);
+
+void optc1_set_static_screen_control(struct timing_generator *optc,
+                                    uint32_t event_triggers,
+                                    uint32_t num_frames);
+
+void optc1_program_stereo(struct timing_generator *optc,
+                         const struct dc_crtc_timing *timing,
+                         struct crtc_stereo_flags *flags);
+
+bool optc1_is_stereo_left_eye(struct timing_generator *optc);
+
+void optc1_clear_optc_underflow(struct timing_generator *optc);
+
+void optc1_tg_init(struct timing_generator *optc);
+
+bool optc1_is_tg_enabled(struct timing_generator *optc);
+
+bool optc1_is_optc_underflow_occurred(struct timing_generator *optc);
+
+void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable);
+
+void optc1_set_timing_double_buffer(struct timing_generator *optc, bool enable);
+
+bool optc1_get_otg_active_size(struct timing_generator *optc,
+                              uint32_t *otg_active_width,
+                              uint32_t *otg_active_height);
+
+void optc1_enable_crtc_reset(struct timing_generator *optc,
+                            int source_tg_inst,
+                            struct crtc_trigger_info *crtc_tp);
+
+bool optc1_configure_crc(struct timing_generator *optc, const struct crc_params *params);
+
+bool optc1_get_crc(struct timing_generator *optc,
+                  uint32_t *r_cr,
+                  uint32_t *g_y,
+                  uint32_t *b_cb);
+
+bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
+
+void optc1_set_vtg_params(struct timing_generator *optc,
+                         const struct dc_crtc_timing *dc_crtc_timing,
+                         bool program_fp2);
+
+#endif
index 00ea3864dd4df4bbd5f8d4c15b6c4aaa4eb8e306..b9812afb886be16937ee1aae8d763600e7815e08 100644 (file)
@@ -47,6 +47,8 @@ struct pg_cntl_funcs {
        void (*optc_pg_control)(struct pg_cntl *pg_cntl, unsigned int optc_inst, bool power_on);
        void (*dwb_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
        void (*init_pg_status)(struct pg_cntl *pg_cntl);
+
+       void (*set_force_poweron_domain22)(struct pg_cntl *pg_cntl, bool power_on);
 };
 
 #endif //__DC_PG_CNTL_H__
index 21a39afd274bb64560a3b8ac2dd4c2cdc9a6ad95..2d152b68a501f41a48c180f7953365aa6a48129c 100644 (file)
@@ -53,6 +53,7 @@ static enum dc_link_rate get_link_rate_from_test_link_rate(uint8_t test_rate)
                return LINK_RATE_UHBR10;
        case DP_TEST_LINK_RATE_UHBR20:
                return LINK_RATE_UHBR20;
+       case DP_TEST_LINK_RATE_UHBR13_5_LEGACY:
        case DP_TEST_LINK_RATE_UHBR13_5:
                return LINK_RATE_UHBR13_5;
        default:
@@ -119,6 +120,11 @@ static void dp_test_send_link_training(struct dc_link *link)
                        1);
        link_settings.link_rate = get_link_rate_from_test_link_rate(test_rate);
 
+       if (link_settings.link_rate == LINK_RATE_UNKNOWN) {
+               DC_LOG_ERROR("%s: Invalid test link rate.", __func__);
+               ASSERT(0);
+       }
+
        /* Set preferred link settings */
        link->verified_link_cap.lane_count = link_settings.lane_count;
        link->verified_link_cap.link_rate = link_settings.link_rate;
@@ -457,7 +463,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
                        controller_color_space = pipe_ctx->stream_res.test_pattern_params.color_space;
 
                        if (controller_color_space == CONTROLLER_DP_COLOR_SPACE_UDEFINED) {
-                               DC_LOG_WARNING("%s: Color space must be defined for test pattern", __func__);
+                               DC_LOG_ERROR("%s: Color space must be defined for test pattern", __func__);
                                ASSERT(0);
                        }
 
@@ -592,6 +598,7 @@ bool dp_set_test_pattern(
        const unsigned char *p_custom_pattern,
        unsigned int cust_pattern_size)
 {
+       const struct link_hwss *link_hwss;
        struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
        struct pipe_ctx *pipe_ctx = NULL;
        unsigned int lane;
@@ -828,11 +835,9 @@ bool dp_set_test_pattern(
 
                pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
                /* update MSA to requested color space */
-               pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc,
-                               &pipe_ctx->stream->timing,
-                               color_space,
-                               pipe_ctx->stream->use_vsc_sdp_for_colorimetry,
-                               link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
+               link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
+               pipe_ctx->stream->output_color_space = color_space;
+               link_hwss->setup_stream_attribute(pipe_ctx);
 
                if (pipe_ctx->stream->use_vsc_sdp_for_colorimetry) {
                        if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
index 34bf8a9ef7380938d063476c466fb902cc974c04..0c00e94e90b1d598fd1442fe46cd7629c821e779 100644 (file)
@@ -184,14 +184,17 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
        return false;
 }
 
-static bool handle_hpd_irq_replay_sink(struct dc_link *link)
+static void handle_hpd_irq_replay_sink(struct dc_link *link)
 {
        union dpcd_replay_configuration replay_configuration;
        /*AMD Replay version reuse DP_PSR_ERROR_STATUS for REPLAY_ERROR status.*/
        union psr_error_status replay_error_status;
 
+       if (link->replay_settings.config.force_disable_desync_error_check)
+               return;
+
        if (!link->replay_settings.replay_feature_enabled)
-               return false;
+               return;
 
        dm_helpers_dp_read_dpcd(
                link->ctx,
@@ -207,6 +210,9 @@ static bool handle_hpd_irq_replay_sink(struct dc_link *link)
                &replay_error_status.raw,
                sizeof(replay_error_status.raw));
 
+       if (replay_configuration.bits.DESYNC_ERROR_STATUS)
+               link->replay_settings.config.received_desync_error_hpd = 1;
+
        link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR =
                replay_error_status.bits.LINK_CRC_ERROR;
        link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR =
@@ -243,7 +249,6 @@ static bool handle_hpd_irq_replay_sink(struct dc_link *link)
                        edp_set_replay_allow_active(link, &allow_active, true, false, NULL);
                }
        }
-       return true;
 }
 
 void dp_handle_link_loss(struct dc_link *link)
@@ -424,9 +429,7 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
                /* PSR-related error was detected and handled */
                return true;
 
-       if (handle_hpd_irq_replay_sink(link))
-               /* Replay-related error was detected and handled */
-               return true;
+       handle_hpd_irq_replay_sink(link);
 
        /* If PSR-related error handled, Main link may be off,
         * so do not handle as a normal sink status change interrupt.
index bc907ae2052d5eea67506cac242316751b8310c6..ed4379c047151a14d2d70cc00ba0c6656ad12573 100644 (file)
@@ -583,6 +583,7 @@ union dmub_fw_boot_status {
                uint32_t fams_enabled : 1; /**< 1 if VBIOS data is deferred programmed */
                uint32_t detection_required: 1; /**<  if detection need to be triggered by driver */
                uint32_t hw_power_init_done: 1; /**< 1 if hw power init is completed */
+               uint32_t ono_regions_enabled: 1; /**< 1 if ONO regions are enabled */
        } bits; /**< status bits */
        uint32_t all; /**< 32-bit access to status bits */
 };
@@ -599,6 +600,7 @@ enum dmub_fw_boot_status_bit {
        DMUB_FW_BOOT_STATUS_BIT_FAMS_ENABLED = (1 << 5), /**< 1 if FAMS is enabled*/
        DMUB_FW_BOOT_STATUS_BIT_DETECTION_REQUIRED = (1 << 6), /**< 1 if detection need to be triggered by driver*/
        DMUB_FW_BOOT_STATUS_BIT_HW_POWER_INIT_DONE = (1 << 7), /**< 1 if hw power init is completed */
+       DMUB_FW_BOOT_STATUS_BIT_ONO_REGIONS_ENABLED = (1 << 8), /**< 1 if ONO regions are enabled */
 };
 
 /* Register bit definition for SCRATCH5 */
@@ -617,9 +619,12 @@ enum dmub_lvtma_status_bit {
 };
 
 enum dmub_ips_disable_type {
-       DMUB_IPS_DISABLE_IPS1 = 1,
-       DMUB_IPS_DISABLE_IPS2 = 2,
-       DMUB_IPS_DISABLE_IPS2_Z10 = 3,
+       DMUB_IPS_ENABLE = 0,
+       DMUB_IPS_DISABLE_ALL = 1,
+       DMUB_IPS_DISABLE_IPS1 = 2,
+       DMUB_IPS_DISABLE_IPS2 = 3,
+       DMUB_IPS_DISABLE_IPS2_Z10 = 4,
+       DMUB_IPS_DISABLE_DYNAMIC = 5,
 };
 
 #define DMUB_IPS1_ALLOW_MASK 0x00000001
@@ -653,8 +658,8 @@ union dmub_fw_boot_options {
                uint32_t disable_clk_ds: 1; /* 1 if disallow dispclk_ds and dppclk_ds*/
                uint32_t disable_timeout_recovery : 1; /* 1 if timeout recovery should be disabled */
                uint32_t ips_pg_disable: 1; /* 1 to disable ONO domains power gating*/
-               uint32_t ips_disable: 2; /* options to disable ips support*/
-               uint32_t reserved : 10; /**< reserved */
+               uint32_t ips_disable: 3; /* options to disable ips support*/
+               uint32_t reserved : 9; /**< reserved */
        } bits; /**< boot bits */
        uint32_t all; /**< 32-bit access to bits */
 };
@@ -2098,7 +2103,7 @@ enum psr_version {
        /**
         * PSR not supported.
         */
-       PSR_VERSION_UNSUPPORTED                 = 0xFFFFFFFF,
+       PSR_VERSION_UNSUPPORTED                 = 0xFF, // psr_version field is only 8 bits wide
 };
 
 /**
@@ -3620,7 +3625,6 @@ struct dmub_cmd_abm_pause_data {
        uint8_t pad[1];
 };
 
-
 /**
  * Definition of a DMUB_CMD__ABM_PAUSE command.
  */
@@ -4046,6 +4050,7 @@ union dmub_rb_cmd {
         * Definition of a DMUB_CMD__MALL command.
         */
        struct dmub_rb_cmd_mall mall;
+
        /**
         * Definition of a DMUB_CMD__CAB command.
         */
@@ -4067,6 +4072,7 @@ union dmub_rb_cmd {
         * Definition of DMUB_CMD__PANEL_CNTL commands.
         */
        struct dmub_rb_cmd_panel_cntl panel_cntl;
+
        /**
         * Definition of a DMUB_CMD__ABM_SET_PIPE command.
         */
@@ -4470,10 +4476,6 @@ static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
                uint64_t *data = (uint64_t *)((uint8_t *)(rb->base_address) + rptr);
                uint8_t i;
 
-               /* Don't remove this.
-                * The contents need to actually be read from the ring buffer
-                * for this function to be effective.
-                */
                for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
                        (void)READ_ONCE(*data++);
 
@@ -4522,5 +4524,4 @@ static inline void dmub_rb_get_return_data(struct dmub_rb *rb,
 //==============================================================================
 //</DMUB_RB>====================================================================
 //==============================================================================
-
 #endif /* _DMUB_CMD_H_ */
index 6e29a185de515304a2220c782f776a5ed888250e..765d9ca2316fb376bf3adac2ccf5316a81432dd8 100644 (file)
 //MP0_SMN_C2PMSG_103
 #define MP0_SMN_C2PMSG_103__CONTENT__SHIFT                                                                    0x0
 #define MP0_SMN_C2PMSG_103__CONTENT_MASK                                                                      0xFFFFFFFFL
+//MP0_SMN_C2PMSG_109
+#define MP0_SMN_C2PMSG_109__CONTENT__SHIFT                                                                    0x0
+#define MP0_SMN_C2PMSG_109__CONTENT_MASK                                                                      0xFFFFFFFFL
+//MP0_SMN_C2PMSG_126
+#define MP0_SMN_C2PMSG_126__GPU_ERR_MEM_TRAINING__SHIFT                                                       0x0
+#define MP0_SMN_C2PMSG_126__GPU_ERR_FW_LOAD__SHIFT                                                            0x1
+#define MP0_SMN_C2PMSG_126__GPU_ERR_WAFL_LINK_TRAINING__SHIFT                                                 0x2
+#define MP0_SMN_C2PMSG_126__GPU_ERR_XGMI_LINK_TRAINING__SHIFT                                                 0x3
+#define MP0_SMN_C2PMSG_126__GPU_ERR_USR_CP_LINK_TRAINING__SHIFT                                               0x4
+#define MP0_SMN_C2PMSG_126__GPU_ERR_USR_DP_LINK_TRAINING__SHIFT                                               0x5
+#define MP0_SMN_C2PMSG_126__GPU_ERR_HBM_MEM_TEST__SHIFT                                                       0x6
+#define MP0_SMN_C2PMSG_126__GPU_ERR_HBM_BIST_TEST__SHIFT                                                      0x7
+#define MP0_SMN_C2PMSG_126__SOCKET_ID__SHIFT                                                                  0x8
+#define MP0_SMN_C2PMSG_126__AID_ID__SHIFT                                                                     0xb
+#define MP0_SMN_C2PMSG_126__HBM_ID__SHIFT                                                                     0xd
+#define MP0_SMN_C2PMSG_126__BOOT_STATUS__SHIFT                                                                0x1f
+#define MP0_SMN_C2PMSG_126__GPU_ERR_MEM_TRAINING_MASK                                                         0x00000001L
+#define MP0_SMN_C2PMSG_126__GPU_ERR_FW_LOAD_MASK                                                              0x00000002L
+#define MP0_SMN_C2PMSG_126__GPU_ERR_WAFL_LINK_TRAINING_MASK                                                   0x00000004L
+#define MP0_SMN_C2PMSG_126__GPU_ERR_XGMI_LINK_TRAINING_MASK                                                   0x00000008L
+#define MP0_SMN_C2PMSG_126__GPU_ERR_USR_CP_LINK_TRAINING_MASK                                                 0x00000010L
+#define MP0_SMN_C2PMSG_126__GPU_ERR_USR_DP_LINK_TRAINING_MASK                                                 0x00000020L
+#define MP0_SMN_C2PMSG_126__GPU_ERR_HBM_MEM_TEST_MASK                                                         0x00000040L
+#define MP0_SMN_C2PMSG_126__GPU_ERR_HBM_BIST_TEST_MASK                                                        0x00000080L
+#define MP0_SMN_C2PMSG_126__SOCKET_ID_MASK                                                                    0x00000700L
+#define MP0_SMN_C2PMSG_126__AID_ID_MASK                                                                       0x00001800L
+#define MP0_SMN_C2PMSG_126__HBM_ID_MASK                                                                       0x00002000L
+#define MP0_SMN_C2PMSG_126__BOOT_STATUS_MASK                                                                  0x80000000L
 //MP0_SMN_IH_CREDIT
 #define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT                                                                0x0
 #define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT                                                                   0x10
index 3201808c2dd81b89999cf72ae789f4c31adcedd0..cd3c40a860293c2430efbf26c9832fc82533de34 100644 (file)
@@ -1080,33 +1080,35 @@ struct gpu_metrics_v3_0 {
        uint16_t                        average_ipu_activity[8];
        /* time filtered per-core C0 residency % [0-100]*/
        uint16_t                        average_core_c0_activity[16];
-       /* time filtered DRAM read bandwidth [GB/sec] */
+       /* time filtered DRAM read bandwidth [MB/sec] */
        uint16_t                        average_dram_reads;
-       /* time filtered DRAM write bandwidth [GB/sec] */
+       /* time filtered DRAM write bandwidth [MB/sec] */
        uint16_t                        average_dram_writes;
 
        /* Driver attached timestamp (in ns) */
        uint64_t                        system_clock_counter;
 
        /* Power/Energy */
-       /* average dGPU + APU power on A + A platform */
+       /* time filtered power used for PPT/STAPM [APU+dGPU] [mW] */
        uint32_t                        average_socket_power;
-       /* average IPU power [W] */
+       /* time filtered IPU power [mW] */
        uint16_t                        average_ipu_power;
-       /* average APU power [W] */
+       /* time filtered APU power [mW] */
        uint32_t                        average_apu_power;
-       /* average dGPU power [W] */
+       /* time filtered GFX power [mW] */
+       uint32_t                        average_gfx_power;
+       /* time filtered dGPU power [mW] */
        uint32_t                        average_dgpu_power;
-       /* sum of core power across all cores in the socket [W] */
-       uint32_t                        average_core_power;
-       /* calculated core power [W] */
-       uint16_t                        core_power[16];
-       /* maximum IRM defined STAPM power limit [W] */
+       /* time filtered sum of core power across all cores in the socket [mW] */
+       uint32_t                        average_all_core_power;
+       /* calculated core power [mW] */
+       uint16_t                        average_core_power[16];
+       /* maximum IRM defined STAPM power limit [mW] */
        uint16_t                        stapm_power_limit;
-       /* time filtered STAPM power limit [W] */
+       /* time filtered STAPM power limit [mW] */
        uint16_t                        current_stapm_power_limit;
 
-       /* Average clocks */
+       /* time filtered clocks [MHz] */
        uint16_t                        average_gfxclk_frequency;
        uint16_t                        average_socclk_frequency;
        uint16_t                        average_vpeclk_frequency;
@@ -1115,7 +1117,7 @@ struct gpu_metrics_v3_0 {
        uint16_t                        average_vclk_frequency;
 
        /* Current clocks */
-       /* target core frequency */
+       /* target core frequency [MHz] */
        uint16_t                        current_coreclk[16];
        /* CCLK frequency limit enforced on classic cores [MHz] */
        uint16_t                        current_core_maxfreq;
index acf3527fff2dbfb9dba466d865c3d48e2171bcb8..08cb79401410ad0768a7bf9d95bea93c2ba2916b 100644 (file)
@@ -491,7 +491,7 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso
 int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit)
 {
        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-       int ret = -EINVAL;
+       int ret = -EOPNOTSUPP;
 
        if (pp_funcs && pp_funcs->get_apu_thermal_limit) {
                mutex_lock(&adev->pm.mutex);
@@ -505,7 +505,7 @@ int amdgpu_dpm_get_apu_thermal_limit(struct amdgpu_device *adev, uint32_t *limit
 int amdgpu_dpm_set_apu_thermal_limit(struct amdgpu_device *adev, uint32_t limit)
 {
        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-       int ret = -EINVAL;
+       int ret = -EOPNOTSUPP;
 
        if (pp_funcs && pp_funcs->set_apu_thermal_limit) {
                mutex_lock(&adev->pm.mutex);
@@ -1182,7 +1182,7 @@ int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
        int ret = 0;
 
        if (!pp_funcs->get_sclk_od)
-               return 0;
+               return -EOPNOTSUPP;
 
        mutex_lock(&adev->pm.mutex);
        ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
@@ -1196,7 +1196,7 @@ int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
        if (is_support_sw_smu(adev))
-               return 0;
+               return -EOPNOTSUPP;
 
        mutex_lock(&adev->pm.mutex);
        if (pp_funcs->set_sclk_od)
@@ -1219,7 +1219,7 @@ int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
        int ret = 0;
 
        if (!pp_funcs->get_mclk_od)
-               return 0;
+               return -EOPNOTSUPP;
 
        mutex_lock(&adev->pm.mutex);
        ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
@@ -1233,7 +1233,7 @@ int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
        const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
 
        if (is_support_sw_smu(adev))
-               return 0;
+               return -EOPNOTSUPP;
 
        mutex_lock(&adev->pm.mutex);
        if (pp_funcs->set_mclk_od)
index 517b9fb4624c4586406fb5c883f5c6b775bf2c9f..ca2ece24e1e07bf97fc4a66e3baf25f0ea5acd13 100644 (file)
@@ -989,12 +989,13 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
  * Reading back the files will show you the available power levels within
  * the power state and the clock information for those levels. If deep sleep is
  * applied to a clock, the level will be denoted by a special level 'S:'
- * E.g.,
- *     S: 19Mhz *
- *     0: 615Mhz
- *     1: 800Mhz
- *     2: 888Mhz
- *     3: 1000Mhz
+ * E.g., ::
+ *
+ *  S: 19Mhz *
+ *  0: 615Mhz
+ *  1: 800Mhz
+ *  2: 888Mhz
+ *  3: 1000Mhz
  *
  *
  * To manually adjust these states, first select manual using
@@ -2197,6 +2198,22 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
        } else if (DEVICE_ATTR_IS(xgmi_plpd_policy)) {
                if (amdgpu_dpm_get_xgmi_plpd_mode(adev, NULL) == XGMI_PLPD_NONE)
                        *states = ATTR_STATE_UNSUPPORTED;
+       } else if (DEVICE_ATTR_IS(pp_dpm_mclk_od)) {
+               if (amdgpu_dpm_get_mclk_od(adev) == -EOPNOTSUPP)
+                       *states = ATTR_STATE_UNSUPPORTED;
+       } else if (DEVICE_ATTR_IS(pp_dpm_sclk_od)) {
+               if (amdgpu_dpm_get_sclk_od(adev) == -EOPNOTSUPP)
+                       *states = ATTR_STATE_UNSUPPORTED;
+       } else if (DEVICE_ATTR_IS(apu_thermal_cap)) {
+               u32 limit;
+
+               if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) ==
+                   -EOPNOTSUPP)
+                       *states = ATTR_STATE_UNSUPPORTED;
+       } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
+               if (gc_ver == IP_VERSION(9, 4, 2) ||
+                   gc_ver == IP_VERSION(9, 4, 3))
+                       *states = ATTR_STATE_UNSUPPORTED;
        }
 
        switch (gc_ver) {
@@ -3288,10 +3305,6 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
        uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0);
        uint32_t tmp;
 
-       /* under multi-vf mode, the hwmon attributes are all not supported */
-       if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
-               return 0;
-
        /* under pp one vf mode manage of hwmon attributes is not supported */
        if (amdgpu_sriov_is_pp_one_vf(adev))
                effective_mode &= ~S_IWUSR;
@@ -4162,6 +4175,7 @@ err_out:
 
 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
 {
+       enum amdgpu_sriov_vf_mode mode;
        uint32_t mask = 0;
        int ret;
 
@@ -4173,17 +4187,21 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
        if (adev->pm.dpm_enabled == 0)
                return 0;
 
-       adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
-                                                                  DRIVER_NAME, adev,
-                                                                  hwmon_groups);
-       if (IS_ERR(adev->pm.int_hwmon_dev)) {
-               ret = PTR_ERR(adev->pm.int_hwmon_dev);
-               dev_err(adev->dev,
-                       "Unable to register hwmon device: %d\n", ret);
-               return ret;
+       mode = amdgpu_virt_get_sriov_vf_mode(adev);
+
+       /* under multi-vf mode, the hwmon attributes are all not supported */
+       if (mode != SRIOV_VF_MODE_MULTI_VF) {
+               adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
+                                                                                                               DRIVER_NAME, adev,
+                                                                                                               hwmon_groups);
+               if (IS_ERR(adev->pm.int_hwmon_dev)) {
+                       ret = PTR_ERR(adev->pm.int_hwmon_dev);
+                       dev_err(adev->dev, "Unable to register hwmon device: %d\n", ret);
+                       return ret;
+               }
        }
 
-       switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
+       switch (mode) {
        case SRIOV_VF_MODE_ONE_VF:
                mask = ATTR_FLAG_ONEVF;
                break;
index 9e4f8a4104a346a99a307332548a9fdb1ccea093..914c15387157564f4f8f4fb0636b6ad9ad53fca8 100644 (file)
@@ -1022,6 +1022,9 @@ static int pp_get_power_limit(void *handle, uint32_t *limit,
                                *limit /= 100;
                        }
                        break;
+               case PP_PWR_LIMIT_MIN:
+                       *limit = 0;
+                       break;
                default:
                        ret = -EOPNOTSUPP;
                        break;
index 9fcad69a9f34461f9cb3f5b4d1c5b88130b50c41..2cf2a7b126235f0667faf907cf02a689ca417a63 100644 (file)
@@ -367,7 +367,7 @@ typedef struct _ATOM_Tonga_VCE_State_Record {
 typedef struct _ATOM_Tonga_VCE_State_Table {
        UCHAR ucRevId;
        UCHAR ucNumEntries;
-       ATOM_Tonga_VCE_State_Record entries[1];
+       ATOM_Tonga_VCE_State_Record entries[];
 } ATOM_Tonga_VCE_State_Table;
 
 typedef struct _ATOM_Tonga_PowerTune_Table {
@@ -481,7 +481,7 @@ typedef struct _ATOM_Tonga_Hard_Limit_Record {
 typedef struct _ATOM_Tonga_Hard_Limit_Table {
        UCHAR ucRevId;
        UCHAR ucNumEntries;
-       ATOM_Tonga_Hard_Limit_Record entries[1];
+       ATOM_Tonga_Hard_Limit_Record entries[];
 } ATOM_Tonga_Hard_Limit_Table;
 
 typedef struct _ATOM_Tonga_GPIO_Table {
index 8b0590b834cca733c53e7726cb3eeb0cbb2e3bbd..de2926df5ed7437e20b374ff6da2944455fe7fde 100644 (file)
@@ -129,7 +129,7 @@ typedef struct _ATOM_Vega10_State {
 typedef struct _ATOM_Vega10_State_Array {
        UCHAR ucRevId;
        UCHAR ucNumEntries;                                         /* Number of entries. */
-       ATOM_Vega10_State states[1];                             /* Dynamically allocate entries. */
+       ATOM_Vega10_State states[];                             /* Dynamically allocate entries. */
 } ATOM_Vega10_State_Array;
 
 typedef struct _ATOM_Vega10_CLK_Dependency_Record {
@@ -169,37 +169,37 @@ typedef struct _ATOM_Vega10_GFXCLK_Dependency_Table {
 typedef struct _ATOM_Vega10_MCLK_Dependency_Table {
     UCHAR ucRevId;
     UCHAR ucNumEntries;                                         /* Number of entries. */
-    ATOM_Vega10_MCLK_Dependency_Record entries[1];            /* Dynamically allocate entries. */
+    ATOM_Vega10_MCLK_Dependency_Record entries[];            /* Dynamically allocate entries. */
 } ATOM_Vega10_MCLK_Dependency_Table;
 
 typedef struct _ATOM_Vega10_SOCCLK_Dependency_Table {
     UCHAR ucRevId;
     UCHAR ucNumEntries;                                         /* Number of entries. */
-    ATOM_Vega10_CLK_Dependency_Record entries[1];            /* Dynamically allocate entries. */
+    ATOM_Vega10_CLK_Dependency_Record entries[];            /* Dynamically allocate entries. */
 } ATOM_Vega10_SOCCLK_Dependency_Table;
 
 typedef struct _ATOM_Vega10_DCEFCLK_Dependency_Table {
     UCHAR ucRevId;
     UCHAR ucNumEntries;                                         /* Number of entries. */
-    ATOM_Vega10_CLK_Dependency_Record entries[1];            /* Dynamically allocate entries. */
+    ATOM_Vega10_CLK_Dependency_Record entries[];            /* Dynamically allocate entries. */
 } ATOM_Vega10_DCEFCLK_Dependency_Table;
 
 typedef struct _ATOM_Vega10_PIXCLK_Dependency_Table {
        UCHAR ucRevId;
        UCHAR ucNumEntries;                                         /* Number of entries. */
-       ATOM_Vega10_CLK_Dependency_Record entries[1];            /* Dynamically allocate entries. */
+       ATOM_Vega10_CLK_Dependency_Record entries[];            /* Dynamically allocate entries. */
 } ATOM_Vega10_PIXCLK_Dependency_Table;
 
 typedef struct _ATOM_Vega10_DISPCLK_Dependency_Table {
        UCHAR ucRevId;
        UCHAR ucNumEntries;                                         /* Number of entries.*/
-       ATOM_Vega10_CLK_Dependency_Record entries[1];            /* Dynamically allocate entries. */
+       ATOM_Vega10_CLK_Dependency_Record entries[];            /* Dynamically allocate entries. */
 } ATOM_Vega10_DISPCLK_Dependency_Table;
 
 typedef struct _ATOM_Vega10_PHYCLK_Dependency_Table {
        UCHAR ucRevId;
        UCHAR ucNumEntries;                                         /* Number of entries. */
-       ATOM_Vega10_CLK_Dependency_Record entries[1];            /* Dynamically allocate entries. */
+       ATOM_Vega10_CLK_Dependency_Record entries[];            /* Dynamically allocate entries. */
 } ATOM_Vega10_PHYCLK_Dependency_Table;
 
 typedef struct _ATOM_Vega10_MM_Dependency_Record {
@@ -213,7 +213,7 @@ typedef struct _ATOM_Vega10_MM_Dependency_Record {
 typedef struct _ATOM_Vega10_MM_Dependency_Table {
        UCHAR ucRevId;
        UCHAR ucNumEntries;                                         /* Number of entries */
-       ATOM_Vega10_MM_Dependency_Record entries[1];             /* Dynamically allocate entries */
+       ATOM_Vega10_MM_Dependency_Record entries[];             /* Dynamically allocate entries */
 } ATOM_Vega10_MM_Dependency_Table;
 
 typedef struct _ATOM_Vega10_PCIE_Record {
@@ -225,7 +225,7 @@ typedef struct _ATOM_Vega10_PCIE_Record {
 typedef struct _ATOM_Vega10_PCIE_Table {
        UCHAR  ucRevId;
        UCHAR  ucNumEntries;                                        /* Number of entries */
-       ATOM_Vega10_PCIE_Record entries[1];                      /* Dynamically allocate entries. */
+       ATOM_Vega10_PCIE_Record entries[];                      /* Dynamically allocate entries. */
 } ATOM_Vega10_PCIE_Table;
 
 typedef struct _ATOM_Vega10_Voltage_Lookup_Record {
@@ -235,7 +235,7 @@ typedef struct _ATOM_Vega10_Voltage_Lookup_Record {
 typedef struct _ATOM_Vega10_Voltage_Lookup_Table {
        UCHAR ucRevId;
        UCHAR ucNumEntries;                                          /* Number of entries */
-       ATOM_Vega10_Voltage_Lookup_Record entries[1];             /* Dynamically allocate entries */
+       ATOM_Vega10_Voltage_Lookup_Record entries[];             /* Dynamically allocate entries */
 } ATOM_Vega10_Voltage_Lookup_Table;
 
 typedef struct _ATOM_Vega10_Fan_Table {
@@ -327,7 +327,7 @@ typedef struct _ATOM_Vega10_VCE_State_Record {
 typedef struct _ATOM_Vega10_VCE_State_Table {
     UCHAR ucRevId;
     UCHAR ucNumEntries;
-    ATOM_Vega10_VCE_State_Record entries[1];
+    ATOM_Vega10_VCE_State_Record entries[];
 } ATOM_Vega10_VCE_State_Table;
 
 typedef struct _ATOM_Vega10_PowerTune_Table {
@@ -427,7 +427,7 @@ typedef struct _ATOM_Vega10_Hard_Limit_Record {
 typedef struct _ATOM_Vega10_Hard_Limit_Table {
     UCHAR ucRevId;
     UCHAR ucNumEntries;
-    ATOM_Vega10_Hard_Limit_Record entries[1];
+    ATOM_Vega10_Hard_Limit_Record entries[];
 } ATOM_Vega10_Hard_Limit_Table;
 
 typedef struct _Vega10_PPTable_Generic_SubTable_Header {
index 9f86c1fecbb133667c4c02af93a1854177d56c5e..1ead323f1c78138b0176ec55032fd990239e94fa 100644 (file)
@@ -733,7 +733,7 @@ static int smu_early_init(void *handle)
        smu->adev = adev;
        smu->pm_enabled = !!amdgpu_dpm;
        smu->is_apu = false;
-       smu->smu_baco.state = SMU_BACO_STATE_EXIT;
+       smu->smu_baco.state = SMU_BACO_STATE_NONE;
        smu->smu_baco.platform_support = false;
        smu->user_dpm_profile.fan_mode = -1;
 
@@ -1711,6 +1711,7 @@ static int smu_disable_dpms(struct smu_context *smu)
        }
 
        if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) &&
+           !((adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs) &&
            !amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)
                adev->gfx.rlc.funcs->stop(adev);
 
@@ -1742,10 +1743,31 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
        return 0;
 }
 
+static int smu_reset_mp1_state(struct smu_context *smu)
+{
+       struct amdgpu_device *adev = smu->adev;
+       int ret = 0;
+
+       if ((!adev->in_runpm) && (!adev->in_suspend) &&
+               (!amdgpu_in_reset(adev)))
+               switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
+               case IP_VERSION(13, 0, 0):
+               case IP_VERSION(13, 0, 7):
+               case IP_VERSION(13, 0, 10):
+                       ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
+                       break;
+               default:
+                       break;
+               }
+
+       return ret;
+}
+
 static int smu_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct smu_context *smu = adev->powerplay.pp_handle;
+       int ret;
 
        if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
                return 0;
@@ -1763,7 +1785,15 @@ static int smu_hw_fini(void *handle)
 
        adev->pm.dpm_enabled = false;
 
-       return smu_smc_hw_cleanup(smu);
+       ret = smu_smc_hw_cleanup(smu);
+       if (ret)
+               return ret;
+
+       ret = smu_reset_mp1_state(smu);
+       if (ret)
+               return ret;
+
+       return 0;
 }
 
 static void smu_late_fini(void *handle)
@@ -2718,7 +2748,7 @@ unlock:
 
 static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
 {
-       int ret = -EINVAL;
+       int ret = -EOPNOTSUPP;
        struct smu_context *smu = handle;
 
        if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)
@@ -2729,7 +2759,7 @@ static int smu_get_apu_thermal_limit(void *handle, uint32_t *limit)
 
 static int smu_set_apu_thermal_limit(void *handle, uint32_t limit)
 {
-       int ret = -EINVAL;
+       int ret = -EOPNOTSUPP;
        struct smu_context *smu = handle;
 
        if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)
index 839553a86aa226064eba3041a185128bed1b6cfd..8def291b18bcdaa3db86ec4ef2545f154f70c098 100644 (file)
@@ -419,6 +419,7 @@ enum smu_reset_mode {
 enum smu_baco_state {
        SMU_BACO_STATE_ENTER = 0,
        SMU_BACO_STATE_EXIT,
+       SMU_BACO_STATE_NONE,
 };
 
 struct smu_baco_context {
index b483c8e096e761ab08f8f9b1ee7f71c1ba7c5ec1..22f88842a7fd21e9dd089c0896ff9b03b1876fd4 100644 (file)
@@ -150,97 +150,39 @@ typedef struct {
 } DpmClocks_t;
 
 typedef struct {
-  uint16_t CoreFrequency[16];           //Target core frequency [MHz]
-  uint16_t CorePower[16];               //CAC calculated core power [W] [Q8.8]
-  uint16_t CoreTemperature[16];         //TSEN measured core temperature [C] [Q8.8]
-  uint16_t GfxTemperature;              //TSEN measured GFX temperature [C] [Q8.8]
-  uint16_t SocTemperature;              //TSEN measured SOC temperature [C] [Q8.8]
-  uint16_t StapmOpnLimit;               //Maximum IRM defined STAPM power limit [W] [Q8.8]
-  uint16_t StapmCurrentLimit;           //Time filtered STAPM power limit [W] [Q8.8]
-  uint16_t InfrastructureCpuMaxFreq;    //CCLK frequency limit enforced on classic cores [MHz]
-  uint16_t InfrastructureGfxMaxFreq;    //GFXCLK frequency limit enforced on GFX [MHz]
-  uint16_t SkinTemp;                    //Maximum skin temperature reported by APU and HS2 chassis sensors [C] [Q8.8]
-  uint16_t AverageGfxclkFrequency;      //Time filtered target GFXCLK frequency [MHz]
-  uint16_t AverageFclkFrequency;        //Time filtered target FCLK frequency [MHz]
-  uint16_t AverageGfxActivity;          //Time filtered GFX busy % [0-100] [Q8.8]
-  uint16_t AverageSocclkFrequency;      //Time filtered target SOCCLK frequency [MHz]
-  uint16_t AverageVclkFrequency;        //Time filtered target VCLK frequency [MHz]
-  uint16_t AverageVcnActivity;          //Time filtered VCN busy % [0-100] [Q8.8]
-  uint16_t AverageVpeclkFrequency;      //Time filtered target VPECLK frequency [MHz]
-  uint16_t AverageIpuclkFrequency;      //Time filtered target IPUCLK frequency [MHz]
-  uint16_t AverageIpuBusy[8];           //Time filtered IPU per-column busy % [0-100] [Q8.8]
-  uint16_t AverageDRAMReads;            //Time filtered DRAM read bandwidth [GB/sec] [Q8.8]
-  uint16_t AverageDRAMWrites;           //Time filtered DRAM write bandwidth [GB/sec] [Q8.8]
-  uint16_t AverageCoreC0Residency[16];  //Time filtered per-core C0 residency % [0-100] [Q8.8]
-  uint16_t IpuPower;                    //Time filtered IPU power [W] [Q8.8]
-  uint32_t ApuPower;                    //Time filtered APU power [W] [Q24.8]
-  uint32_t dGpuPower;                   //Time filtered dGPU power [W] [Q24.8]
-  uint32_t AverageSocketPower;          //Time filtered power used for PPT/STAPM [APU+dGPU] [W] [Q24.8]
-  uint32_t AverageCorePower;            //Time filtered sum of core power across all cores in the socket [W] [Q24.8]
-  uint32_t FilterAlphaValue;            //Metrics table alpha filter time constant [us]
-  uint32_t MetricsCounter;              //Counter that is incremented on every metrics table update [PM_TIMER cycles]
+  uint16_t CoreFrequency[16];        //Target core frequency [MHz]
+  uint16_t CorePower[16];            //CAC calculated core power [mW]
+  uint16_t CoreTemperature[16];      //TSEN measured core temperature [centi-C]
+  uint16_t GfxTemperature;           //TSEN measured GFX temperature [centi-C]
+  uint16_t SocTemperature;           //TSEN measured SOC temperature [centi-C]
+  uint16_t StapmOpnLimit;            //Maximum IRM defined STAPM power limit [mW]
+  uint16_t StapmCurrentLimit;        //Time filtered STAPM power limit [mW]
+  uint16_t InfrastructureCpuMaxFreq; //CCLK frequency limit enforced on classic cores [MHz]
+  uint16_t InfrastructureGfxMaxFreq; //GFXCLK frequency limit enforced on GFX [MHz]
+  uint16_t SkinTemp;                 //Maximum skin temperature reported by APU and HS2 chassis sensors [centi-C]
+  uint16_t GfxclkFrequency;          //Time filtered target GFXCLK frequency [MHz]
+  uint16_t FclkFrequency;            //Time filtered target FCLK frequency [MHz]
+  uint16_t GfxActivity;              //Time filtered GFX busy % [0-100]
+  uint16_t SocclkFrequency;          //Time filtered target SOCCLK frequency [MHz]
+  uint16_t VclkFrequency;            //Time filtered target VCLK frequency [MHz]
+  uint16_t VcnActivity;              //Time filtered VCN busy % [0-100]
+  uint16_t VpeclkFrequency;          //Time filtered target VPECLK frequency [MHz]
+  uint16_t IpuclkFrequency;          //Time filtered target IPUCLK frequency [MHz]
+  uint16_t IpuBusy[8];               //Time filtered IPU per-column busy % [0-100]
+  uint16_t DRAMReads;                //Time filtered DRAM read bandwidth [MB/sec]
+  uint16_t DRAMWrites;               //Time filtered DRAM write bandwidth [MB/sec]
+  uint16_t CoreC0Residency[16];      //Time filtered per-core C0 residency % [0-100]
+  uint16_t IpuPower;                 //Time filtered IPU power [mW]
+  uint32_t ApuPower;                 //Time filtered APU power [mW]
+  uint32_t GfxPower;                 //Time filtered GFX power [mW]
+  uint32_t dGpuPower;                //Time filtered dGPU power [mW]
+  uint32_t SocketPower;              //Time filtered power used for PPT/STAPM [APU+dGPU] [mW]
+  uint32_t AllCorePower;             //Time filtered sum of core power across all cores in the socket [mW]
+  uint32_t FilterAlphaValue;         //Metrics table alpha filter time constant [us]
+  uint32_t MetricsCounter;           //Counter that is incremented on every metrics table update [PM_TIMER cycles]
+  uint32_t spare[16];
 } SmuMetrics_t;
 
-typedef struct {
-  uint16_t GfxclkFrequency;             //[MHz]
-  uint16_t SocclkFrequency;             //[MHz]
-  uint16_t VclkFrequency;               //[MHz]
-  uint16_t DclkFrequency;               //[MHz]
-  uint16_t MemclkFrequency;             //[MHz]
-  uint16_t spare;
-  uint16_t UvdActivity;                 //[centi]
-  uint16_t GfxActivity;                 //[centi]
-
-  uint16_t Voltage[2];                  //[mV] indices: VDDCR_VDD, VDDCR_SOC
-  uint16_t Current[2];                  //[mA] indices: VDDCR_VDD, VDDCR_SOC
-  uint16_t Power[2];                    //[mW] indices: VDDCR_VDD, VDDCR_SOC
-
-  uint16_t CoreFrequency[8];            //[MHz]
-  uint16_t CorePower[8];                //[mW]
-  uint16_t CoreTemperature[8];          //[centi-Celsius]
-  uint16_t L3Frequency[2];              //[MHz]
-  uint16_t L3Temperature[2];            //[centi-Celsius]
-
-  uint16_t spare2[24];
-
-  uint16_t GfxTemperature;              //[centi-Celsius]
-  uint16_t SocTemperature;              //[centi-Celsius]
-  uint16_t ThrottlerStatus;
-
-  uint16_t CurrentSocketPower;          //[mW]
-  uint16_t StapmOpnLimit;               //[W]
-  uint16_t StapmCurrentLimit;           //[W]
-  uint32_t ApuPower;                    //[mW]
-  uint32_t dGpuPower;                   //[mW]
-
-  uint16_t VddTdcValue;                 //[mA]
-  uint16_t SocTdcValue;                 //[mA]
-  uint16_t VddEdcValue;                 //[mA]
-  uint16_t SocEdcValue;                 //[mA]
-
-  uint16_t InfrastructureCpuMaxFreq;    //[MHz]
-  uint16_t InfrastructureGfxMaxFreq;    //[MHz]
-
-  uint16_t SkinTemp;
-  uint16_t DeviceState;
-  uint16_t CurTemp;                     //[centi-Celsius]
-  uint16_t FilterAlphaValue;            //[m]
-
-  uint16_t AverageGfxclkFrequency;
-  uint16_t AverageFclkFrequency;
-  uint16_t AverageGfxActivity;
-  uint16_t AverageSocclkFrequency;
-  uint16_t AverageVclkFrequency;
-  uint16_t AverageVcnActivity;
-  uint16_t AverageDRAMReads;          //Filtered DF Bandwidth::DRAM Reads
-  uint16_t AverageDRAMWrites;         //Filtered DF Bandwidth::DRAM Writes
-  uint16_t AverageSocketPower;        //Filtered value of CurrentSocketPower
-  uint16_t AverageCorePower[2];       //Filtered of [sum of CorePower[8] per ccx])
-  uint16_t AverageCoreC0Residency[16]; //Filtered of [average C0 residency % per core]
-  uint16_t spare1;
-  uint32_t MetricsCounter;            //Counts the # of metrics table parameter reads per update to the metrics table, i.e. if the metrics table update happens every 1 second, this value could be up to 1000 if the smu collected metrics data every cycle, or as low as 0 if the smu was asleep the whole time. Reset to 0 after writing.
-} SmuMetrics_legacy_t;
-
 //ISP tile definitions
 typedef enum {
   TILE_XTILE = 0,         //ONO0
index cc02f979e9e9843e10d2fd0ca3c6764cb2cdf91f..95cb919718aebe0ad57f73b3577d6beac4facaf9 100644 (file)
@@ -299,5 +299,7 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
                                     uint8_t pcie_gen_cap,
                                     uint8_t pcie_width_cap);
 
+int smu_v13_0_disable_pmfw_state(struct smu_context *smu);
+
 #endif
 #endif
index 3efc6aed28f1b52a1c5923016c50364facde31b2..762b31455a0b6c794ec85579ce7dfc28cdde4055 100644 (file)
@@ -234,24 +234,15 @@ static int vangogh_tables_init(struct smu_context *smu)
                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
        SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t),
                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+       SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, max(sizeof(SmuMetrics_t), sizeof(SmuMetrics_legacy_t)),
+                      PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
 
-       if (smu->smc_fw_if_version < 0x3) {
-               SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t),
-                               PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
-               smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL);
-       } else {
-               SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
-                               PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
-               smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
-       }
+       smu_table->metrics_table = kzalloc(max(sizeof(SmuMetrics_t), sizeof(SmuMetrics_legacy_t)), GFP_KERNEL);
        if (!smu_table->metrics_table)
                goto err0_out;
        smu_table->metrics_time = 0;
 
-       if (smu->smc_fw_version >= 0x043F3E00)
-               smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_3);
-       else
-               smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
+       smu_table->gpu_metrics_table_size = max(sizeof(struct gpu_metrics_v2_3), sizeof(struct gpu_metrics_v2_2));
        smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
        if (!smu_table->gpu_metrics_table)
                goto err1_out;
index a49e5adf7cc3d7f2c7ed045a7cfc6947318a39d0..cf1b84060bc3da46c12ace3b878b03144288afd1 100644 (file)
@@ -2477,3 +2477,16 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
 
        return 0;
 }
+
+int smu_v13_0_disable_pmfw_state(struct smu_context *smu)
+{
+       int ret;
+       struct amdgpu_device *adev = smu->adev;
+
+       WREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff), 0);
+
+       ret = RREG32_PCIE(MP1_Public |
+                                          (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+
+       return ret == 0 ? 0 : -EINVAL;
+}
index 34bd99b0e137af6b533a0f47c66f8a8b45f16a8d..82c4e1f1c6f075d09dbb9e407e69a8405a9e8c5b 100644 (file)
@@ -354,12 +354,12 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
        if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC)
                smu->dc_controlled_by_gpio = true;
 
-       if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO ||
-           powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
+       if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_BACO) {
                smu_baco->platform_support = true;
 
-       if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
-               smu_baco->maco_support = true;
+               if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
+                       smu_baco->maco_support = true;
+       }
 
        if (!overdrive_lowerlimits->FeatureCtrlMask ||
            !overdrive_upperlimits->FeatureCtrlMask)
@@ -2530,38 +2530,10 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
                }
        }
 
-       if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE &&
-               (((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xC8)) ||
-               ((smu->adev->pdev->device == 0x744C) && (smu->adev->pdev->revision == 0xCC)))) {
-               ret = smu_cmn_update_table(smu,
-                                          SMU_TABLE_ACTIVITY_MONITOR_COEFF,
-                                          WORKLOAD_PPLIB_COMPUTE_BIT,
-                                          (void *)(&activity_monitor_external),
-                                          false);
-               if (ret) {
-                       dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
-                       return ret;
-               }
-
-               ret = smu_cmn_update_table(smu,
-                                          SMU_TABLE_ACTIVITY_MONITOR_COEFF,
-                                          WORKLOAD_PPLIB_CUSTOM_BIT,
-                                          (void *)(&activity_monitor_external),
-                                          true);
-               if (ret) {
-                       dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
-                       return ret;
-               }
-
-               workload_type = smu_cmn_to_asic_specific_index(smu,
-                                                      CMN2ASIC_MAPPING_WORKLOAD,
-                                                      PP_SMC_POWER_PROFILE_CUSTOM);
-       } else {
-               /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
-               workload_type = smu_cmn_to_asic_specific_index(smu,
+       /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+       workload_type = smu_cmn_to_asic_specific_index(smu,
                                                       CMN2ASIC_MAPPING_WORKLOAD,
                                                       smu->power_profile_mode);
-       }
 
        if (workload_type < 0)
                return -EINVAL;
@@ -2602,14 +2574,20 @@ static int smu_v13_0_0_baco_enter(struct smu_context *smu)
 static int smu_v13_0_0_baco_exit(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
+       int ret;
 
        if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
                /* Wait for PMFW handling for the Dstate change */
                usleep_range(10000, 11000);
-               return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+               ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
        } else {
-               return smu_v13_0_baco_exit(smu);
+               ret = smu_v13_0_baco_exit(smu);
        }
+
+       if (!ret)
+               adev->gfx.is_poweron = false;
+
+       return ret;
 }
 
 static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu)
@@ -2794,7 +2772,13 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
 
        switch (mp1_state) {
        case PP_MP1_STATE_UNLOAD:
-               ret = smu_cmn_set_mp1_state(smu, mp1_state);
+               ret = smu_cmn_send_smc_msg_with_param(smu,
+                                                                                         SMU_MSG_PrepareMp1ForUnload,
+                                                                                         0x55, NULL);
+
+               if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
+                       ret = smu_v13_0_disable_pmfw_state(smu);
+
                break;
        default:
                /* Ignore others */
index f42b48b31927071d90f7af5eb7474d8d778b34b7..891605d4975f4e4460f83cb153f887a54cc16146 100644 (file)
@@ -48,6 +48,7 @@
 #include "smu_cmn.h"
 #include "mp/mp_13_0_6_offset.h"
 #include "mp/mp_13_0_6_sh_mask.h"
+#include "umc_v12_0.h"
 
 #undef MP1_Public
 #undef smnMP1_FIRMWARE_FLAGS
@@ -94,22 +95,11 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_6.bin");
 #define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5
 #define LINK_SPEED_MAX                         4
 
-#define SMU_13_0_6_DSCLK_THRESHOLD 100
+#define SMU_13_0_6_DSCLK_THRESHOLD 140
 
 #define MCA_BANK_IPID(_ip, _hwid, _type) \
        [AMDGPU_MCA_IP_##_ip] = { .hwid = _hwid, .mcatype = _type, }
 
-enum mca_reg_idx {
-       MCA_REG_IDX_CONTROL             = 0,
-       MCA_REG_IDX_STATUS              = 1,
-       MCA_REG_IDX_ADDR                = 2,
-       MCA_REG_IDX_MISC0               = 3,
-       MCA_REG_IDX_CONFIG              = 4,
-       MCA_REG_IDX_IPID                = 5,
-       MCA_REG_IDX_SYND                = 6,
-       MCA_REG_IDX_COUNT               = 16,
-};
-
 struct mca_bank_ipid {
        enum amdgpu_mca_ip ip;
        uint16_t hwid;
@@ -122,7 +112,9 @@ struct mca_ras_info {
        int *err_code_array;
        int err_code_count;
        int (*get_err_count)(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
-                            enum amdgpu_mca_error_type type, int idx, uint32_t *count);
+                            enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count);
+       bool (*bank_is_valid)(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
+                             enum amdgpu_mca_error_type type, struct mca_bank_entry *entry);
 };
 
 #define P2S_TABLE_ID_A 0x50325341
@@ -270,7 +262,7 @@ static int smu_v13_0_6_init_microcode(struct smu_context *smu)
        struct amdgpu_device *adev = smu->adev;
        uint32_t p2s_table_id = P2S_TABLE_ID_A;
        int ret = 0, i, p2stable_count;
-       char ucode_prefix[30];
+       char ucode_prefix[15];
        char fw_name[30];
 
        /* No need to load P2S tables in IOV mode */
@@ -2305,7 +2297,7 @@ static int smu_v13_0_6_post_init(struct smu_context *smu)
        struct amdgpu_device *adev = smu->adev;
 
        if (!amdgpu_sriov_vf(adev) && adev->ras_enabled)
-               return smu_v13_0_6_mca_set_debug_mode(smu, true);
+               return smu_v13_0_6_mca_set_debug_mode(smu, false);
 
        return 0;
 }
@@ -2387,6 +2379,7 @@ static const struct mca_bank_ipid smu_v13_0_6_mca_ipid_table[AMDGPU_MCA_IP_COUNT
        MCA_BANK_IPID(UMC, 0x96, 0x0),
        MCA_BANK_IPID(SMU, 0x01, 0x1),
        MCA_BANK_IPID(MP5, 0x01, 0x2),
+       MCA_BANK_IPID(PCS_XGMI, 0x50, 0x0),
 };
 
 static void mca_bank_entry_info_decode(struct mca_bank_entry *entry, struct mca_bank_info *info)
@@ -2448,53 +2441,60 @@ static int mca_get_mca_entry(struct amdgpu_device *adev, enum amdgpu_mca_error_t
        return 0;
 }
 
-static int mca_decode_mca_ipid(struct amdgpu_device *adev, enum amdgpu_mca_error_type type, int idx, int *ip)
+static int mca_decode_ipid_to_hwip(uint64_t val)
 {
        const struct mca_bank_ipid *ipid;
-       uint64_t val;
        uint16_t hwid, mcatype;
-       int i, ret;
-
-       ret = mca_bank_read_reg(adev, type, idx, MCA_REG_IDX_IPID, &val);
-       if (ret)
-               return ret;
+       int i;
 
        hwid = REG_GET_FIELD(val, MCMP1_IPIDT0, HardwareID);
        mcatype = REG_GET_FIELD(val, MCMP1_IPIDT0, McaType);
 
-       if (hwid) {
-               for (i = 0; i < ARRAY_SIZE(smu_v13_0_6_mca_ipid_table); i++) {
-                       ipid = &smu_v13_0_6_mca_ipid_table[i];
+       for (i = 0; i < ARRAY_SIZE(smu_v13_0_6_mca_ipid_table); i++) {
+               ipid = &smu_v13_0_6_mca_ipid_table[i];
 
-                       if (!ipid->hwid)
-                               continue;
+               if (!ipid->hwid)
+                       continue;
 
-                       if (ipid->hwid == hwid && ipid->mcatype == mcatype) {
-                               *ip = i;
-                               return 0;
-                       }
-               }
+               if (ipid->hwid == hwid && ipid->mcatype == mcatype)
+                       return i;
        }
 
-       *ip = AMDGPU_MCA_IP_UNKNOW;
+       return AMDGPU_MCA_IP_UNKNOW;
+}
+
+static int mca_umc_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
+                                    enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
+{
+       uint64_t status0;
+
+       status0 = entry->regs[MCA_REG_IDX_STATUS];
+
+       if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) {
+               *count = 0;
+               return 0;
+       }
+
+       if (type == AMDGPU_MCA_ERROR_TYPE_UE && umc_v12_0_is_uncorrectable_error(status0))
+               *count = 1;
+       else if (type == AMDGPU_MCA_ERROR_TYPE_CE && umc_v12_0_is_correctable_error(status0))
+               *count = 1;
 
        return 0;
 }
 
-static int mca_normal_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
-                                       enum amdgpu_mca_error_type type, int idx, uint32_t *count)
+static int mca_pcs_xgmi_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
+                                         enum amdgpu_mca_error_type type, struct mca_bank_entry *entry,
+                                         uint32_t *count)
 {
-       uint64_t status0;
-       int ret;
+       u32 ext_error_code;
 
-       ret = mca_bank_read_reg(adev, type, idx, MCA_REG_IDX_STATUS, &status0);
-       if (ret)
-               return ret;
+       ext_error_code = MCA_REG__STATUS__ERRORCODEEXT(entry->regs[MCA_REG_IDX_STATUS]);
 
-       if (REG_GET_FIELD(status0, MCMP1_STATUST0, Val))
+       if (type == AMDGPU_MCA_ERROR_TYPE_UE && ext_error_code == 0)
+               *count = 1;
+       else if (type == AMDGPU_MCA_ERROR_TYPE_CE && ext_error_code == 6)
                *count = 1;
-       else
-               *count = 0;
 
        return 0;
 }
@@ -2515,70 +2515,41 @@ static bool mca_smu_check_error_code(struct amdgpu_device *adev, const struct mc
        return false;
 }
 
-static int mca_mp5_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
-                                    enum amdgpu_mca_error_type type, int idx, uint32_t *count)
+static int mca_gfx_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
+                                    enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
 {
-       uint64_t status0 = 0, misc0 = 0;
-       uint32_t errcode;
-       int ret;
-
-       if (mca_ras->ip != AMDGPU_MCA_IP_MP5)
-               return -EINVAL;
-
-       ret = mca_bank_read_reg(adev, type, idx, MCA_REG_IDX_STATUS, &status0);
-       if (ret)
-               return ret;
+       uint64_t status0, misc0;
 
+       status0 = entry->regs[MCA_REG_IDX_STATUS];
        if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) {
                *count = 0;
                return 0;
        }
 
-       errcode = REG_GET_FIELD(status0, MCMP1_STATUST0, ErrorCode);
-       if (!mca_smu_check_error_code(adev, mca_ras, errcode))
-               return 0;
-
        if (type == AMDGPU_MCA_ERROR_TYPE_UE &&
            REG_GET_FIELD(status0, MCMP1_STATUST0, UC) == 1 &&
            REG_GET_FIELD(status0, MCMP1_STATUST0, PCC) == 1) {
-               if (count)
-                       *count = 1;
+               *count = 1;
                return 0;
-       }
-
-       ret = mca_bank_read_reg(adev, type, idx, MCA_REG_IDX_MISC0, &misc0);
-       if (ret)
-               return ret;
-
-       if (count)
+       } else {
+               misc0 = entry->regs[MCA_REG_IDX_MISC0];
                *count = REG_GET_FIELD(misc0, MCMP1_MISC0T0, ErrCnt);
+       }
 
        return 0;
 }
 
 static int mca_smu_mca_get_err_count(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
-                                    enum amdgpu_mca_error_type type, int idx, uint32_t *count)
+                                    enum amdgpu_mca_error_type type, struct mca_bank_entry *entry, uint32_t *count)
 {
-       uint64_t status0 = 0, misc0 = 0;
-       uint32_t errcode;
-       int ret;
-
-       if (mca_ras->ip != AMDGPU_MCA_IP_SMU)
-               return -EINVAL;
-
-       ret = mca_bank_read_reg(adev, type, idx, MCA_REG_IDX_STATUS, &status0);
-       if (ret)
-               return ret;
+       uint64_t status0, misc0;
 
+       status0 = entry->regs[MCA_REG_IDX_STATUS];
        if (!REG_GET_FIELD(status0, MCMP1_STATUST0, Val)) {
                *count = 0;
                return 0;
        }
 
-       errcode = REG_GET_FIELD(status0, MCMP1_STATUST0, ErrorCode);
-       if (!mca_smu_check_error_code(adev, mca_ras, errcode))
-               return 0;
-
        if (type == AMDGPU_MCA_ERROR_TYPE_UE &&
            REG_GET_FIELD(status0, MCMP1_STATUST0, UC) == 1 &&
            REG_GET_FIELD(status0, MCMP1_STATUST0, PCC) == 1) {
@@ -2587,16 +2558,43 @@ static int mca_smu_mca_get_err_count(const struct mca_ras_info *mca_ras, struct
                return 0;
        }
 
-       ret = mca_bank_read_reg(adev, type, idx, MCA_REG_IDX_MISC0, &misc0);
-       if (ret)
-               return ret;
-
-       if (count)
-               *count = REG_GET_FIELD(misc0, MCMP1_MISC0T0, ErrCnt);
+       misc0 = entry->regs[MCA_REG_IDX_MISC0];
+       *count = REG_GET_FIELD(misc0, MCMP1_MISC0T0, ErrCnt);
 
        return 0;
 }
 
+static bool mca_gfx_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
+                                     enum amdgpu_mca_error_type type, struct mca_bank_entry *entry)
+{
+       uint32_t instlo;
+
+       instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo);
+       switch (instlo) {
+       case 0x36430400: /* SMNAID XCD 0 */
+       case 0x38430400: /* SMNAID XCD 1 */
+       case 0x40430400: /* SMNXCD XCD 0, NOTE: FIXME: fix this error later */
+               return true;
+       default:
+               return false;
+       }
+
+       return false;
+};
+
+static bool mca_smu_bank_is_valid(const struct mca_ras_info *mca_ras, struct amdgpu_device *adev,
+                                 enum amdgpu_mca_error_type type, struct mca_bank_entry *entry)
+{
+       uint32_t errcode, instlo;
+
+       instlo = REG_GET_FIELD(entry->regs[MCA_REG_IDX_IPID], MCMP1_IPIDT0, InstanceIdLo);
+       if (instlo != 0x03b30400)
+               return false;
+
+       errcode = REG_GET_FIELD(entry->regs[MCA_REG_IDX_STATUS], MCMP1_STATUST0, ErrorCode);
+       return mca_smu_check_error_code(adev, mca_ras, errcode);
+}
+
 static int sdma_err_codes[] = { CODE_SDMA0, CODE_SDMA1, CODE_SDMA2, CODE_SDMA3 };
 static int mmhub_err_codes[] = {
        CODE_DAGB0, CODE_DAGB0 + 1, CODE_DAGB0 + 2, CODE_DAGB0 + 3, CODE_DAGB0 + 4, /* DAGB0-4 */
@@ -2608,23 +2606,30 @@ static const struct mca_ras_info mca_ras_table[] = {
        {
                .blkid = AMDGPU_RAS_BLOCK__UMC,
                .ip = AMDGPU_MCA_IP_UMC,
-               .get_err_count = mca_normal_mca_get_err_count,
+               .get_err_count = mca_umc_mca_get_err_count,
        }, {
                .blkid = AMDGPU_RAS_BLOCK__GFX,
-               .ip = AMDGPU_MCA_IP_MP5,
-               .get_err_count = mca_mp5_mca_get_err_count,
+               .ip = AMDGPU_MCA_IP_SMU,
+               .get_err_count = mca_gfx_mca_get_err_count,
+               .bank_is_valid = mca_gfx_smu_bank_is_valid,
        }, {
                .blkid = AMDGPU_RAS_BLOCK__SDMA,
                .ip = AMDGPU_MCA_IP_SMU,
                .err_code_array = sdma_err_codes,
                .err_code_count = ARRAY_SIZE(sdma_err_codes),
                .get_err_count = mca_smu_mca_get_err_count,
+               .bank_is_valid = mca_smu_bank_is_valid,
        }, {
                .blkid = AMDGPU_RAS_BLOCK__MMHUB,
                .ip = AMDGPU_MCA_IP_SMU,
                .err_code_array = mmhub_err_codes,
                .err_code_count = ARRAY_SIZE(mmhub_err_codes),
                .get_err_count = mca_smu_mca_get_err_count,
+               .bank_is_valid = mca_smu_bank_is_valid,
+       }, {
+               .blkid = AMDGPU_RAS_BLOCK__XGMI_WAFL,
+               .ip = AMDGPU_MCA_IP_PCS_XGMI,
+               .get_err_count = mca_pcs_xgmi_mca_get_err_count,
        },
 };
 
@@ -2659,130 +2664,84 @@ static int mca_get_valid_mca_count(struct amdgpu_device *adev, enum amdgpu_mca_e
 }
 
 static bool mca_bank_is_valid(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras,
-                             enum amdgpu_mca_error_type type, int idx)
+                             enum amdgpu_mca_error_type type, struct mca_bank_entry *entry)
 {
-       int ret, ip = AMDGPU_MCA_IP_UNKNOW;
-
-       ret = mca_decode_mca_ipid(adev, type, idx, &ip);
-       if (ret)
-               return false;
-
-       if (ip == AMDGPU_MCA_IP_UNKNOW)
+       if (mca_decode_ipid_to_hwip(entry->regs[MCA_REG_IDX_IPID]) != mca_ras->ip)
                return false;
 
-       return ip == mca_ras->ip;
-}
+       if (mca_ras->bank_is_valid)
+               return mca_ras->bank_is_valid(mca_ras, adev, type, entry);
 
-static int mca_get_valid_mca_idx(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras,
-                                enum amdgpu_mca_error_type type,
-                                uint32_t mca_cnt, int *idx_array, int idx_array_size)
-{
-       int i, idx_cnt = 0;
-
-       for (i = 0; i < mca_cnt; i++) {
-               if (!mca_bank_is_valid(adev, mca_ras, type, i))
-                       continue;
-
-               if (idx_array) {
-                       if (idx_cnt < idx_array_size)
-                               idx_array[idx_cnt] = i;
-                       else
-                               return -EINVAL;
-               }
-
-               idx_cnt++;
-       }
-
-       return idx_cnt;
+       return true;
 }
 
-static int __mca_smu_get_error_count(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras, enum amdgpu_mca_error_type type, uint32_t *count)
+static int __mca_smu_get_ras_mca_set(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras,
+                                    enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set)
 {
-       uint32_t result, mca_cnt, total = 0;
-       int idx_array[16];
-       int i, ret, idx_cnt = 0;
+       struct mca_bank_entry entry;
+       uint32_t mca_cnt;
+       int i, ret;
 
        ret = mca_get_valid_mca_count(adev, type, &mca_cnt);
        if (ret)
                return ret;
 
        /* if valid mca bank count is 0, the driver can return 0 directly */
-       if (!mca_cnt) {
-               *count = 0;
+       if (!mca_cnt)
                return 0;
-       }
 
-       if (!mca_ras->get_err_count)
-               return -EINVAL;
+       for (i = 0; i < mca_cnt; i++) {
+               memset(&entry, 0, sizeof(entry));
+               ret = mca_get_mca_entry(adev, type, i, &entry);
+               if (ret)
+                       return ret;
 
-       idx_cnt = mca_get_valid_mca_idx(adev, mca_ras, type, mca_cnt, idx_array, ARRAY_SIZE(idx_array));
-       if (idx_cnt < 0)
-               return -EINVAL;
+               if (mca_ras && !mca_bank_is_valid(adev, mca_ras, type, &entry))
+                       continue;
 
-       for (i = 0; i < idx_cnt; i++) {
-               result = 0;
-               ret = mca_ras->get_err_count(mca_ras, adev, type, idx_array[i], &result);
+               ret = amdgpu_mca_bank_set_add_entry(mca_set, &entry);
                if (ret)
                        return ret;
-
-               total += result;
        }
 
-       *count = total;
-
        return 0;
 }
 
-static int mca_smu_get_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
-                                  enum amdgpu_mca_error_type type, uint32_t *count)
+static int mca_smu_get_ras_mca_set(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
+                                  enum amdgpu_mca_error_type type, struct mca_bank_set *mca_set)
 {
-       const struct mca_ras_info *mca_ras;
+       const struct mca_ras_info *mca_ras = NULL;
 
-       if (!count)
+       if (!mca_set)
                return -EINVAL;
 
-       mca_ras = mca_get_mca_ras_info(adev, blk);
-       if (!mca_ras)
-               return -EOPNOTSUPP;
-
-       return __mca_smu_get_error_count(adev, mca_ras, type, count);
-}
-
-static int __mca_smu_get_ras_mca_idx_array(struct amdgpu_device *adev, const struct mca_ras_info *mca_ras,
-                                          enum amdgpu_mca_error_type type, int *idx_array, int *idx_array_size)
-{
-       uint32_t mca_cnt = 0;
-       int ret, idx_cnt = 0;
-
-       ret = mca_get_valid_mca_count(adev, type, &mca_cnt);
-       if (ret)
-               return ret;
-
-       /* if valid mca bank count is 0, the driver can return 0 directly */
-       if (!mca_cnt) {
-               *idx_array_size = 0;
-               return 0;
+       if (blk != AMDGPU_RAS_BLOCK_COUNT) {
+               mca_ras = mca_get_mca_ras_info(adev, blk);
+               if (!mca_ras)
+                       return -EOPNOTSUPP;
        }
 
-       idx_cnt = mca_get_valid_mca_idx(adev, mca_ras, type, mca_cnt, idx_array, *idx_array_size);
-       if (idx_cnt < 0)
-               return -EINVAL;
-
-       *idx_array_size = idx_cnt;
-
-       return 0;
+       return __mca_smu_get_ras_mca_set(adev, mca_ras, type, mca_set);
 }
 
-static int mca_smu_get_ras_mca_idx_array(struct amdgpu_device *adev, enum amdgpu_ras_block blk,
-                                        enum amdgpu_mca_error_type type, int *idx_array, int *idx_array_size)
+static int mca_smu_parse_mca_error_count(struct amdgpu_device *adev, enum amdgpu_ras_block blk, enum amdgpu_mca_error_type type,
+                                        struct mca_bank_entry *entry, uint32_t *count)
 {
        const struct mca_ras_info *mca_ras;
 
+       if (!entry || !count)
+               return -EINVAL;
+
        mca_ras = mca_get_mca_ras_info(adev, blk);
        if (!mca_ras)
                return -EOPNOTSUPP;
 
-       return __mca_smu_get_ras_mca_idx_array(adev, mca_ras, type, idx_array, idx_array_size);
+       if (!mca_bank_is_valid(adev, mca_ras, type, entry)) {
+               *count = 0;
+               return 0;
+       }
+
+       return mca_ras->get_err_count(mca_ras, adev, type, entry, count);
 }
 
 static int mca_smu_get_mca_entry(struct amdgpu_device *adev,
@@ -2801,10 +2760,10 @@ static const struct amdgpu_mca_smu_funcs smu_v13_0_6_mca_smu_funcs = {
        .max_ue_count = 12,
        .max_ce_count = 12,
        .mca_set_debug_mode = mca_smu_set_debug_mode,
-       .mca_get_error_count = mca_smu_get_error_count,
+       .mca_get_ras_mca_set = mca_smu_get_ras_mca_set,
+       .mca_parse_mca_error_count = mca_smu_parse_mca_error_count,
        .mca_get_mca_entry = mca_smu_get_mca_entry,
        .mca_get_valid_mca_count = mca_smu_get_valid_mca_count,
-       .mca_get_ras_mca_idx_array = mca_smu_get_ras_mca_idx_array,
 };
 
 static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu,
index ac0e1cc812bdeb43a40c869f1f51aadbf917b210..81eafed76045e97ac9f5dfdb92c7412f082ea496 100644 (file)
@@ -346,12 +346,13 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
        if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC)
                smu->dc_controlled_by_gpio = true;
 
-       if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO ||
-           powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
+       if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_BACO) {
                smu_baco->platform_support = true;
 
-       if (smu_baco->platform_support && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
-               smu_baco->maco_support = true;
+               if ((powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
+                                       && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
+                       smu_baco->maco_support = true;
+       }
 
        if (!overdrive_lowerlimits->FeatureCtrlMask ||
            !overdrive_upperlimits->FeatureCtrlMask)
@@ -2498,7 +2499,13 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
 
        switch (mp1_state) {
        case PP_MP1_STATE_UNLOAD:
-               ret = smu_cmn_set_mp1_state(smu, mp1_state);
+               ret = smu_cmn_send_smc_msg_with_param(smu,
+                                                                                         SMU_MSG_PrepareMp1ForUnload,
+                                                                                         0x55, NULL);
+
+               if (!ret && smu->smu_baco.state == SMU_BACO_STATE_EXIT)
+                       ret = smu_v13_0_disable_pmfw_state(smu);
+
                break;
        default:
                /* Ignore others */
@@ -2524,14 +2531,20 @@ static int smu_v13_0_7_baco_enter(struct smu_context *smu)
 static int smu_v13_0_7_baco_exit(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
+       int ret;
 
        if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
                /* Wait for PMFW handling for the Dstate change */
                usleep_range(10000, 11000);
-               return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
+               ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
        } else {
-               return smu_v13_0_baco_exit(smu);
+               ret = smu_v13_0_baco_exit(smu);
        }
+
+       if (!ret)
+               adev->gfx.is_poweron = false;
+
+       return ret;
 }
 
 static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
index 4ac22f44d160c615b90c2902eb3b9f5345730318..d8f8ad0e71375145ac230e247ba20142b3e2de52 100644 (file)
@@ -57,7 +57,7 @@ int smu_v14_0_init_microcode(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
        char fw_name[30];
-       char ucode_prefix[30];
+       char ucode_prefix[15];
        int err = 0;
        const struct smc_firmware_header_v1_0 *hdr;
        const struct common_firmware_header *header;
@@ -229,6 +229,8 @@ int smu_v14_0_check_fw_version(struct smu_context *smu)
                smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2;
                break;
        case IP_VERSION(14, 0, 0):
+               if ((smu->smc_fw_version < 0x5d3a00))
+                       dev_warn(smu->adev->dev, "The PMFW version(%x) is behind in this BIOS!\n", smu->smc_fw_version);
                smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0;
                break;
        default:
index c36fc10b63c809095bcc980a464f3910de507cd4..03b38c3a9968431914912131c8a82794dca4c283 100644 (file)
@@ -156,15 +156,10 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu)
                PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
        SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
                PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
-       if (smu->smc_fw_version > 0x5d3500) {
-               SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
-                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
-               smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
-       } else {
-               SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_legacy_t),
-                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
-               smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_legacy_t), GFP_KERNEL);
-       }
+       SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
+               PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+       smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
        if (!smu_table->metrics_table)
                goto err0_out;
        smu_table->metrics_time = 0;
@@ -177,10 +172,7 @@ static int smu_v14_0_0_init_smc_tables(struct smu_context *smu)
        if (!smu_table->watermarks_table)
                goto err2_out;
 
-       if (smu->smc_fw_version > 0x5d3500)
-               smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v3_0);
-       else
-               smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1);
+       smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v3_0);
        smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
        if (!smu_table->gpu_metrics_table)
                goto err3_out;
@@ -242,13 +234,13 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu,
 
        switch (member) {
        case METRICS_AVERAGE_GFXCLK:
-               *value = metrics->AverageGfxclkFrequency;
+               *value = metrics->GfxclkFrequency;
                break;
        case METRICS_AVERAGE_SOCCLK:
-               *value = metrics->AverageSocclkFrequency;
+               *value = metrics->SocclkFrequency;
                break;
        case METRICS_AVERAGE_VCLK:
-               *value = metrics->AverageVclkFrequency;
+               *value = metrics->VclkFrequency;
                break;
        case METRICS_AVERAGE_DCLK:
                *value = 0;
@@ -257,25 +249,25 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu,
                *value = 0;
                break;
        case METRICS_AVERAGE_FCLK:
-               *value = metrics->AverageFclkFrequency;
+               *value = metrics->FclkFrequency;
                break;
        case METRICS_AVERAGE_GFXACTIVITY:
-               *value = metrics->AverageGfxActivity >> 8;
+               *value = metrics->GfxActivity / 100;
                break;
        case METRICS_AVERAGE_VCNACTIVITY:
-               *value = metrics->AverageVcnActivity >> 8;
+               *value = metrics->VcnActivity / 100;
                break;
        case METRICS_AVERAGE_SOCKETPOWER:
        case METRICS_CURR_SOCKETPOWER:
-               *value = (metrics->AverageSocketPower & 0xff00) +
-               ((metrics->AverageSocketPower & 0xff) * 100 >> 8);
+               *value = (metrics->SocketPower / 1000 << 8) +
+               (metrics->SocketPower % 1000 / 10);
                break;
        case METRICS_TEMPERATURE_EDGE:
-               *value = (metrics->GfxTemperature >> 8) *
+               *value = metrics->GfxTemperature / 100 *
                SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
                break;
        case METRICS_TEMPERATURE_HOTSPOT:
-               *value = (metrics->SocTemperature >> 8) *
+               *value = metrics->SocTemperature / 100 *
                SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
                break;
        case METRICS_THROTTLER_STATUS:
@@ -317,107 +309,6 @@ static int smu_v14_0_0_get_smu_metrics_data(struct smu_context *smu,
        return ret;
 }
 
-static int smu_v14_0_0_legacy_get_smu_metrics_data(struct smu_context *smu,
-                                           MetricsMember_t member,
-                                           uint32_t *value)
-{
-       struct smu_table_context *smu_table = &smu->smu_table;
-
-       SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table;
-       int ret = 0;
-
-       ret = smu_cmn_get_metrics_table(smu, NULL, false);
-       if (ret)
-               return ret;
-
-       switch (member) {
-       case METRICS_AVERAGE_GFXCLK:
-               *value = metrics->GfxclkFrequency;
-               break;
-       case METRICS_AVERAGE_SOCCLK:
-               *value = metrics->SocclkFrequency;
-               break;
-       case METRICS_AVERAGE_VCLK:
-               *value = metrics->VclkFrequency;
-               break;
-       case METRICS_AVERAGE_DCLK:
-               *value = metrics->DclkFrequency;
-               break;
-       case METRICS_AVERAGE_UCLK:
-               *value = metrics->MemclkFrequency;
-               break;
-       case METRICS_AVERAGE_GFXACTIVITY:
-               *value = metrics->GfxActivity / 100;
-               break;
-       case METRICS_AVERAGE_FCLK:
-               *value = metrics->AverageFclkFrequency;
-               break;
-       case METRICS_AVERAGE_VCNACTIVITY:
-               *value = metrics->UvdActivity;
-               break;
-       case METRICS_AVERAGE_SOCKETPOWER:
-               *value = (metrics->AverageSocketPower << 8) / 1000;
-               break;
-       case METRICS_CURR_SOCKETPOWER:
-               *value = (metrics->CurrentSocketPower << 8) / 1000;
-               break;
-       case METRICS_TEMPERATURE_EDGE:
-               *value = metrics->GfxTemperature / 100 *
-               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
-               break;
-       case METRICS_TEMPERATURE_HOTSPOT:
-               *value = metrics->SocTemperature / 100 *
-               SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
-               break;
-       case METRICS_THROTTLER_STATUS:
-               *value = metrics->ThrottlerStatus;
-               break;
-       case METRICS_VOLTAGE_VDDGFX:
-               *value = metrics->Voltage[0];
-               break;
-       case METRICS_VOLTAGE_VDDSOC:
-               *value = metrics->Voltage[1];
-               break;
-       case METRICS_SS_APU_SHARE:
-               /* return the percentage of APU power with respect to APU's power limit.
-                * percentage is reported, this isn't boost value. Smartshift power
-                * boost/shift is only when the percentage is more than 100.
-                */
-               if (metrics->StapmOpnLimit > 0)
-                       *value =  (metrics->ApuPower * 100) / metrics->StapmOpnLimit;
-               else
-                       *value = 0;
-               break;
-       case METRICS_SS_DGPU_SHARE:
-               /* return the percentage of dGPU power with respect to dGPU's power limit.
-                * percentage is reported, this isn't boost value. Smartshift power
-                * boost/shift is only when the percentage is more than 100.
-                */
-               if ((metrics->dGpuPower > 0) &&
-                   (metrics->StapmCurrentLimit > metrics->StapmOpnLimit))
-                       *value = (metrics->dGpuPower * 100) /
-                                (metrics->StapmCurrentLimit - metrics->StapmOpnLimit);
-               else
-                       *value = 0;
-               break;
-       default:
-               *value = UINT_MAX;
-               break;
-       }
-
-       return ret;
-}
-
-static int smu_v14_0_0_common_get_smu_metrics_data(struct smu_context *smu,
-                                           MetricsMember_t member,
-                                           uint32_t *value)
-{
-       if (smu->smc_fw_version > 0x5d3500)
-               return smu_v14_0_0_get_smu_metrics_data(smu, member, value);
-       else
-               return smu_v14_0_0_legacy_get_smu_metrics_data(smu, member, value);
-}
-
 static int smu_v14_0_0_read_sensor(struct smu_context *smu,
                                   enum amd_pp_sensors sensor,
                                   void *data, uint32_t *size)
@@ -429,69 +320,69 @@ static int smu_v14_0_0_read_sensor(struct smu_context *smu,
 
        switch (sensor) {
        case AMDGPU_PP_SENSOR_GPU_LOAD:
-               ret = smu_v14_0_0_common_get_smu_metrics_data(smu,
+               ret = smu_v14_0_0_get_smu_metrics_data(smu,
                                                       METRICS_AVERAGE_GFXACTIVITY,
                                                       (uint32_t *)data);
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
-               ret = smu_v14_0_0_common_get_smu_metrics_data(smu,
+               ret = smu_v14_0_0_get_smu_metrics_data(smu,
                                                       METRICS_AVERAGE_SOCKETPOWER,
                                                       (uint32_t *)data);
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
-               ret = smu_v14_0_0_common_get_smu_metrics_data(smu,
+               ret = smu_v14_0_0_get_smu_metrics_data(smu,
                                                       METRICS_CURR_SOCKETPOWER,
                                                       (uint32_t *)data);
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_EDGE_TEMP:
-               ret = smu_v14_0_0_common_get_smu_metrics_data(smu,
+               ret = smu_v14_0_0_get_smu_metrics_data(smu,
                                                       METRICS_TEMPERATURE_EDGE,
                                                       (uint32_t *)data);
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
-               ret = smu_v14_0_0_common_get_smu_metrics_data(smu,
+               ret = smu_v14_0_0_get_smu_metrics_data(smu,
                                                       METRICS_TEMPERATURE_HOTSPOT,
                                                       (uint32_t *)data);
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_GFX_MCLK:
-               ret = smu_v14_0_0_common_get_smu_metrics_data(smu,
+               ret = smu_v14_0_0_get_smu_metrics_data(smu,
                                                       METRICS_AVERAGE_UCLK,
                                                       (uint32_t *)data);
                *(uint32_t *)data *= 100;
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_GFX_SCLK:
-               ret = smu_v14_0_0_common_get_smu_metrics_data(smu,
+               ret = smu_v14_0_0_get_smu_metrics_data(smu,
                                                       METRICS_AVERAGE_GFXCLK,
                                                       (uint32_t *)data);
                *(uint32_t *)data *= 100;
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_VDDGFX:
-               ret = smu_v14_0_0_common_get_smu_metrics_data(smu,
+               ret = smu_v14_0_0_get_smu_metrics_data(smu,
                                                       METRICS_VOLTAGE_VDDGFX,
                                                       (uint32_t *)data);
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_VDDNB:
-               ret = smu_v14_0_0_common_get_smu_metrics_data(smu,
+               ret = smu_v14_0_0_get_smu_metrics_data(smu,
                                                       METRICS_VOLTAGE_VDDSOC,
                                                       (uint32_t *)data);
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_SS_APU_SHARE:
-               ret = smu_v14_0_0_common_get_smu_metrics_data(smu,
+               ret = smu_v14_0_0_get_smu_metrics_data(smu,
                                                       METRICS_SS_APU_SHARE,
                                                       (uint32_t *)data);
                *size = 4;
                break;
        case AMDGPU_PP_SENSOR_SS_DGPU_SHARE:
-               ret = smu_v14_0_0_common_get_smu_metrics_data(smu,
+               ret = smu_v14_0_0_get_smu_metrics_data(smu,
                                                       METRICS_SS_DGPU_SHARE,
                                                       (uint32_t *)data);
                *size = 4;
@@ -588,7 +479,7 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu,
        if (ret)
                return ret;
 
-       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1);
+       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 3, 0);
 
        gpu_metrics->temperature_gfx = metrics.GfxTemperature;
        gpu_metrics->temperature_soc = metrics.SocTemperature;
@@ -597,32 +488,33 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu,
                sizeof(uint16_t) * 16);
        gpu_metrics->temperature_skin = metrics.SkinTemp;
 
-       gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
-       gpu_metrics->average_vcn_activity = metrics.AverageVcnActivity;
+       gpu_metrics->average_gfx_activity = metrics.GfxActivity;
+       gpu_metrics->average_vcn_activity = metrics.VcnActivity;
        memcpy(&gpu_metrics->average_ipu_activity[0],
-               &metrics.AverageIpuBusy[0],
+               &metrics.IpuBusy[0],
                sizeof(uint16_t) * 8);
        memcpy(&gpu_metrics->average_core_c0_activity[0],
-               &metrics.AverageCoreC0Residency[0],
+               &metrics.CoreC0Residency[0],
                sizeof(uint16_t) * 16);
-       gpu_metrics->average_dram_reads = metrics.AverageDRAMReads;
-       gpu_metrics->average_dram_writes = metrics.AverageDRAMWrites;
+       gpu_metrics->average_dram_reads = metrics.DRAMReads;
+       gpu_metrics->average_dram_writes = metrics.DRAMWrites;
 
-       gpu_metrics->average_socket_power = metrics.AverageSocketPower;
+       gpu_metrics->average_socket_power = metrics.SocketPower;
        gpu_metrics->average_ipu_power = metrics.IpuPower;
        gpu_metrics->average_apu_power = metrics.ApuPower;
+       gpu_metrics->average_gfx_power = metrics.GfxPower;
        gpu_metrics->average_dgpu_power = metrics.dGpuPower;
-       gpu_metrics->average_core_power = metrics.AverageCorePower;
-       memcpy(&gpu_metrics->core_power[0],
+       gpu_metrics->average_all_core_power = metrics.AllCorePower;
+       memcpy(&gpu_metrics->average_core_power[0],
                &metrics.CorePower[0],
                sizeof(uint16_t) * 16);
 
-       gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
-       gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
-       gpu_metrics->average_vpeclk_frequency = metrics.AverageVpeclkFrequency;
-       gpu_metrics->average_fclk_frequency = metrics.AverageFclkFrequency;
-       gpu_metrics->average_vclk_frequency = metrics.AverageVclkFrequency;
-       gpu_metrics->average_ipuclk_frequency = metrics.AverageIpuclkFrequency;
+       gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
+       gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
+       gpu_metrics->average_vpeclk_frequency = metrics.VpeclkFrequency;
+       gpu_metrics->average_fclk_frequency = metrics.FclkFrequency;
+       gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
+       gpu_metrics->average_ipuclk_frequency = metrics.IpuclkFrequency;
 
        memcpy(&gpu_metrics->current_coreclk[0],
                &metrics.CoreFrequency[0],
@@ -638,68 +530,6 @@ static ssize_t smu_v14_0_0_get_gpu_metrics(struct smu_context *smu,
        return sizeof(struct gpu_metrics_v3_0);
 }
 
-static ssize_t smu_v14_0_0_get_legacy_gpu_metrics(struct smu_context *smu,
-                                               void **table)
-{
-       struct smu_table_context *smu_table = &smu->smu_table;
-       struct gpu_metrics_v2_1 *gpu_metrics =
-               (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table;
-       SmuMetrics_legacy_t metrics;
-       int ret = 0;
-
-       ret = smu_cmn_get_metrics_table(smu, &metrics, true);
-       if (ret)
-               return ret;
-
-       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1);
-
-       gpu_metrics->temperature_gfx = metrics.GfxTemperature;
-       gpu_metrics->temperature_soc = metrics.SocTemperature;
-       memcpy(&gpu_metrics->temperature_core[0],
-               &metrics.CoreTemperature[0],
-               sizeof(uint16_t) * 8);
-       gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0];
-       gpu_metrics->temperature_l3[1] = metrics.L3Temperature[1];
-
-       gpu_metrics->average_gfx_activity = metrics.GfxActivity;
-       gpu_metrics->average_mm_activity = metrics.UvdActivity;
-
-       gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
-       gpu_metrics->average_gfx_power = metrics.Power[0];
-       gpu_metrics->average_soc_power = metrics.Power[1];
-       memcpy(&gpu_metrics->average_core_power[0],
-               &metrics.CorePower[0],
-               sizeof(uint16_t) * 8);
-
-       gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
-       gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
-       gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
-       gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
-       gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
-       gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
-
-       memcpy(&gpu_metrics->current_coreclk[0],
-               &metrics.CoreFrequency[0],
-               sizeof(uint16_t) * 8);
-
-       gpu_metrics->throttle_status = metrics.ThrottlerStatus;
-       gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
-
-       *table = (void *)gpu_metrics;
-
-       return sizeof(struct gpu_metrics_v2_1);
-}
-
-static ssize_t smu_v14_0_0_common_get_gpu_metrics(struct smu_context *smu,
-                                     void **table)
-{
-
-       if (smu->smc_fw_version > 0x5d3500)
-               return smu_v14_0_0_get_gpu_metrics(smu, table);
-       else
-               return smu_v14_0_0_get_legacy_gpu_metrics(smu, table);
-}
-
 static int smu_v14_0_0_mode2_reset(struct smu_context *smu)
 {
        int ret;
@@ -928,7 +758,7 @@ static int smu_v14_0_0_get_current_clk_freq(struct smu_context *smu,
                return -EINVAL;
        }
 
-       return smu_v14_0_0_common_get_smu_metrics_data(smu, member_type, value);
+       return smu_v14_0_0_get_smu_metrics_data(smu, member_type, value);
 }
 
 static int smu_v14_0_0_get_dpm_level_count(struct smu_context *smu,
@@ -1230,7 +1060,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = {
        .read_sensor = smu_v14_0_0_read_sensor,
        .is_dpm_running = smu_v14_0_0_is_dpm_running,
        .set_watermarks_table = smu_v14_0_0_set_watermarks_table,
-       .get_gpu_metrics = smu_v14_0_0_common_get_gpu_metrics,
+       .get_gpu_metrics = smu_v14_0_0_get_gpu_metrics,
        .get_enabled_mask = smu_cmn_get_enabled_mask,
        .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
        .set_driver_table_location = smu_v14_0_set_driver_table_location,
index 6e57c94379a9d9cdb2b912e31af3c646f6ae9fe0..001a5cf096579cc27a97501d2b4e803188f73b14 100644 (file)
@@ -1004,6 +1004,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
        case METRICS_VERSION(2, 4):
                structure_size = sizeof(struct gpu_metrics_v2_4);
                break;
+       case METRICS_VERSION(3, 0):
+               structure_size = sizeof(struct gpu_metrics_v3_0);
+               break;
        default:
                return;
        }
index 84148a79414b7ff4cbc1c41348334cf995d99329..c45c07840f645a3216e0f0a8986920f1bd17d997 100644 (file)
@@ -1580,7 +1580,6 @@ static const struct pwm_ops ti_sn_pwm_ops = {
        .free = ti_sn_pwm_free,
        .apply = ti_sn_pwm_apply,
        .get_state = ti_sn_pwm_get_state,
-       .owner = THIS_MODULE,
 };
 
 static int ti_sn_pwm_probe(struct auxiliary_device *adev,
index f7003d1ec5ef1e080b8ea7e5c2dc22e16ff95d0d..01da6789d0440940c7e754d16e6866746a5614ff 100644 (file)
@@ -1069,7 +1069,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
                fence = drm_syncobj_fence_get(syncobjs[i]);
                if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
                        dma_fence_put(fence);
-                       if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
+                       if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+                                    DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
                                continue;
                        } else {
                                timeout = -EINVAL;
index 6d7ba4d0f13068c31ec2d5cb6a1cb84e4945a9a9..c4839c67cb0f062731afd19f1692f7d2f8eda528 100644 (file)
@@ -2750,6 +2750,18 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
        for_each_pipe(dev_priv, pipe)
                min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
 
+       /*
+        * Avoid glk_force_audio_cdclk() causing excessive screen
+        * blinking when multiple pipes are active by making sure
+        * CDCLK frequency is always high enough for audio. With a
+        * single active pipe we can always change CDCLK frequency
+        * by changing the cd2x divider (see glk_cdclk_table[]) and
+        * thus a full modeset won't be needed then.
+        */
+       if (IS_GEMINILAKE(dev_priv) && cdclk_state->active_pipes &&
+           !is_power_of_2(cdclk_state->active_pipes))
+               min_cdclk = max(2 * 96000, min_cdclk);
+
        if (min_cdclk > dev_priv->display.cdclk.max_cdclk_freq) {
                drm_dbg_kms(&dev_priv->drm,
                            "required cdclk (%d kHz) exceeds max (%d kHz)\n",
index 1891c0cc187d11ba06067be512ba302dc0e87e43..2c103457898407701c25a9403a4238e5a6a93d9c 100644 (file)
@@ -430,7 +430,7 @@ static int mtl_max_source_rate(struct intel_dp *intel_dp)
        enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
 
        if (intel_is_c10phy(i915, phy))
-               return intel_dp_is_edp(intel_dp) ? 675000 : 810000;
+               return 810000;
 
        return 2000000;
 }
index 37b0f8529b4f9ae2b9c415c0a1d059f50696e2b8..f64d348a969efa93dff0bd254369f3b4a6a22eea 100644 (file)
@@ -58,7 +58,7 @@ struct intel_tc_port {
        struct delayed_work link_reset_work;
        int link_refcount;
        bool legacy_port:1;
-       char port_name[8];
+       const char *port_name;
        enum tc_port_mode mode;
        enum tc_port_mode init_mode;
        enum phy_fia phy_fia;
@@ -1875,8 +1875,12 @@ int intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
        else
                tc->phy_ops = &icl_tc_phy_ops;
 
-       snprintf(tc->port_name, sizeof(tc->port_name),
-                "%c/TC#%d", port_name(port), tc_port + 1);
+       tc->port_name = kasprintf(GFP_KERNEL, "%c/TC#%d", port_name(port),
+                                 tc_port + 1);
+       if (!tc->port_name) {
+               kfree(tc);
+               return -ENOMEM;
+       }
 
        mutex_init(&tc->lock);
        /* TODO: Combine the two works */
@@ -1897,6 +1901,7 @@ void intel_tc_port_cleanup(struct intel_digital_port *dig_port)
 {
        intel_tc_port_suspend(dig_port);
 
+       kfree(dig_port->tc->port_name);
        kfree(dig_port->tc);
        dig_port->tc = NULL;
 }
index 9a9ff84c90d7e69d9f6c3b8a5f892f639c960e98..e38f06a6e56ebcde338fd17d5ebf9b0fda51d65d 100644 (file)
@@ -844,6 +844,7 @@ static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
                if (idx >= pc->num_user_engines)
                        return -EINVAL;
 
+               idx = array_index_nospec(idx, pc->num_user_engines);
                pe = &pc->user_engines[idx];
 
                /* Only render engine supports RPCS configuration. */
index 1c93e84278a037d0b8512545edac9ec9747b423c..15fc8e4703f48308919165e1102b38221ddf6b7b 100644 (file)
@@ -195,6 +195,21 @@ void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
        spin_unlock_irq(&uncore->lock);
 }
 
+static bool needs_wc_ggtt_mapping(struct drm_i915_private *i915)
+{
+       /*
+        * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
+        * will be dropped. For WC mappings in general we have 64 byte burst
+        * writes when the WC buffer is flushed, so we can't use it, but have to
+        * resort to an uncached mapping. The WC issue is easily caught by the
+        * readback check when writing GTT PTE entries.
+        */
+       if (!IS_GEN9_LP(i915) && GRAPHICS_VER(i915) < 11)
+               return true;
+
+       return false;
+}
+
 static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
 {
        struct intel_uncore *uncore = ggtt->vm.gt->uncore;
@@ -202,8 +217,12 @@ static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
        /*
         * Note that as an uncached mmio write, this will flush the
         * WCB of the writes into the GGTT before it triggers the invalidate.
+        *
+        * Only perform this when GGTT is mapped as WC, see ggtt_probe_common().
         */
-       intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+       if (needs_wc_ggtt_mapping(ggtt->vm.i915))
+               intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6,
+                                     GFX_FLSH_CNTL_EN);
 }
 
 static void guc_ggtt_ct_invalidate(struct intel_gt *gt)
@@ -1140,17 +1159,11 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
        GEM_WARN_ON(pci_resource_len(pdev, GEN4_GTTMMADR_BAR) != gen6_gttmmadr_size(i915));
        phys_addr = pci_resource_start(pdev, GEN4_GTTMMADR_BAR) + gen6_gttadr_offset(i915);
 
-       /*
-        * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
-        * will be dropped. For WC mappings in general we have 64 byte burst
-        * writes when the WC buffer is flushed, so we can't use it, but have to
-        * resort to an uncached mapping. The WC issue is easily caught by the
-        * readback check when writing GTT PTE entries.
-        */
-       if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
-               ggtt->gsm = ioremap(phys_addr, size);
-       else
+       if (needs_wc_ggtt_mapping(i915))
                ggtt->gsm = ioremap_wc(phys_addr, size);
+       else
+               ggtt->gsm = ioremap(phys_addr, size);
+
        if (!ggtt->gsm) {
                drm_err(&i915->drm, "Failed to map the ggtt page table\n");
                return -ENOMEM;
index 8b67abd720be866282b1c8b3946786c31186fe0a..7090e4be29cb69bb6f2fdf459cf4062fc4a1ce3a 100644 (file)
@@ -581,19 +581,23 @@ static void __intel_rc6_disable(struct intel_rc6 *rc6)
 
 static void rc6_res_reg_init(struct intel_rc6 *rc6)
 {
-       memset(rc6->res_reg, INVALID_MMIO_REG.reg, sizeof(rc6->res_reg));
+       i915_reg_t res_reg[INTEL_RC6_RES_MAX] = {
+               [0 ... INTEL_RC6_RES_MAX - 1] = INVALID_MMIO_REG,
+       };
 
        switch (rc6_to_gt(rc6)->type) {
        case GT_MEDIA:
-               rc6->res_reg[INTEL_RC6_RES_RC6] = MTL_MEDIA_MC6;
+               res_reg[INTEL_RC6_RES_RC6] = MTL_MEDIA_MC6;
                break;
        default:
-               rc6->res_reg[INTEL_RC6_RES_RC6_LOCKED] = GEN6_GT_GFX_RC6_LOCKED;
-               rc6->res_reg[INTEL_RC6_RES_RC6] = GEN6_GT_GFX_RC6;
-               rc6->res_reg[INTEL_RC6_RES_RC6p] = GEN6_GT_GFX_RC6p;
-               rc6->res_reg[INTEL_RC6_RES_RC6pp] = GEN6_GT_GFX_RC6pp;
+               res_reg[INTEL_RC6_RES_RC6_LOCKED] = GEN6_GT_GFX_RC6_LOCKED;
+               res_reg[INTEL_RC6_RES_RC6] = GEN6_GT_GFX_RC6;
+               res_reg[INTEL_RC6_RES_RC6p] = GEN6_GT_GFX_RC6p;
+               res_reg[INTEL_RC6_RES_RC6pp] = GEN6_GT_GFX_RC6pp;
                break;
        }
+
+       memcpy(rc6->res_reg, res_reg, sizeof(res_reg));
 }
 
 void intel_rc6_init(struct intel_rc6 *rc6)
index 614bde321589ddb7949ee904bd7bdb07bbf7ece0..8bca02025e0933300966097a37f1a3a77d1f97d8 100644 (file)
@@ -38,10 +38,13 @@ static int i915_param_int_open(struct inode *inode, struct file *file)
 
 static int notify_guc(struct drm_i915_private *i915)
 {
-       int ret = 0;
+       struct intel_gt *gt;
+       int i, ret = 0;
 
-       if (intel_uc_uses_guc_submission(&to_gt(i915)->uc))
-               ret = intel_guc_global_policies_update(&to_gt(i915)->uc.guc);
+       for_each_gt(gt, i915, i) {
+               if (intel_uc_uses_guc_submission(&gt->uc))
+                       ret = intel_guc_global_policies_update(&gt->uc.guc);
+       }
 
        return ret;
 }
index 2f3ecd7d4804a20d77cc6b5d612431a631814b3a..7b1c8de2f9cb3c76664ebca3cebe536e402bf982 100644 (file)
@@ -4227,11 +4227,8 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data,
        u32 known_open_flags;
        int ret;
 
-       if (!perf->i915) {
-               drm_dbg(&perf->i915->drm,
-                       "i915 perf interface not available for this system\n");
+       if (!perf->i915)
                return -ENOTSUPP;
-       }
 
        known_open_flags = I915_PERF_FLAG_FD_CLOEXEC |
                           I915_PERF_FLAG_FD_NONBLOCK |
@@ -4607,11 +4604,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
        struct i915_oa_reg *regs;
        int err, id;
 
-       if (!perf->i915) {
-               drm_dbg(&perf->i915->drm,
-                       "i915 perf interface not available for this system\n");
+       if (!perf->i915)
                return -ENOTSUPP;
-       }
 
        if (!perf->metrics_kobj) {
                drm_dbg(&perf->i915->drm,
@@ -4773,11 +4767,8 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
        struct i915_oa_config *oa_config;
        int ret;
 
-       if (!perf->i915) {
-               drm_dbg(&perf->i915->drm,
-                       "i915 perf interface not available for this system\n");
+       if (!perf->i915)
                return -ENOTSUPP;
-       }
 
        if (i915_perf_stream_paranoid && !perfmon_capable()) {
                drm_dbg(&perf->i915->drm,
index abefc2343443bef6895f95a35ac08cde64d273ca..f045515696cbbc8b86f45c3f3319a76690b25ad6 100644 (file)
@@ -42,6 +42,7 @@ nv50_core_new(struct nouveau_drm *drm, struct nv50_core **pcore)
                int version;
                int (*new)(struct nouveau_drm *, s32, struct nv50_core **);
        } cores[] = {
+               { AD102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
                { GA102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
                { TU102_DISP_CORE_CHANNEL_DMA, 0, corec57d_new },
                { GV100_DISP_CORE_CHANNEL_DMA, 0, corec37d_new },
index a0ac8c258d9ff118c19e6d4d65e3dc5602acfd2d..7840b6428afbe468b2ad51ac2f59c3fc2c77a3e1 100644 (file)
@@ -1592,6 +1592,148 @@ nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *st
        nv_encoder->crtc = NULL;
 }
 
+// common/inc/displayport/displayport.h
+#define DP_CONFIG_WATERMARK_ADJUST                   2
+#define DP_CONFIG_WATERMARK_LIMIT                   20
+#define DP_CONFIG_INCREASED_WATERMARK_ADJUST         8
+#define DP_CONFIG_INCREASED_WATERMARK_LIMIT         22
+
+static bool
+nv50_sor_dp_watermark_sst(struct nouveau_encoder *outp,
+                         struct nv50_head *head, struct nv50_head_atom *asyh)
+{
+       bool enhancedFraming = outp->dp.dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP;
+       u64 minRate = outp->dp.link_bw * 1000;
+       unsigned tuSize = 64;
+       unsigned waterMark;
+       unsigned hBlankSym;
+       unsigned vBlankSym;
+       unsigned watermarkAdjust = DP_CONFIG_WATERMARK_ADJUST;
+       unsigned watermarkMinimum = DP_CONFIG_WATERMARK_LIMIT;
+       // depth is multiplied by 16 in case of DSC enable
+       s32 hblank_symbols;
+       // number of link clocks per line.
+       int vblank_symbols        = 0;
+       bool bEnableDsc = false;
+       unsigned surfaceWidth = asyh->mode.h.blanks - asyh->mode.h.blanke;
+       unsigned rasterWidth = asyh->mode.h.active;
+       unsigned depth = asyh->or.bpc * 3;
+       unsigned DSC_FACTOR = bEnableDsc ? 16 : 1;
+       u64 pixelClockHz = asyh->mode.clock * 1000;
+       u64 PrecisionFactor = 100000, ratioF, watermarkF;
+       u32 numLanesPerLink = outp->dp.link_nr;
+       u32 numSymbolsPerLine;
+       u32 BlankingBits;
+       u32 surfaceWidthPerLink;
+       u32 PixelSteeringBits;
+       u64 NumBlankingLinkClocks;
+       u32 MinHBlank;
+
+       if (outp->outp.info.dp.increased_wm) {
+               watermarkAdjust = DP_CONFIG_INCREASED_WATERMARK_ADJUST;
+               watermarkMinimum = DP_CONFIG_INCREASED_WATERMARK_LIMIT;
+       }
+
+       if ((pixelClockHz * depth) >= (8 * minRate * outp->dp.link_nr * DSC_FACTOR))
+       {
+               return false;
+       }
+
+       //
+       // For DSC, if (pclk * bpp) < (1/64 * orclk * 8 * lanes) then some TU may end up with
+       // 0 active symbols. This may cause HW hang. Bug 200379426
+       //
+       if ((bEnableDsc) &&
+           ((pixelClockHz * depth) < div_u64(8 * minRate * outp->dp.link_nr * DSC_FACTOR, 64)))
+       {
+               return false;
+       }
+
+       //
+       //  Perform the SST calculation.
+       //      For auto mode the watermark calculation does not need to track accumulated error the
+       //      formulas for manual mode will not work.  So below calculation was extracted from the DTB.
+       //
+       ratioF = div_u64((u64)pixelClockHz * depth * PrecisionFactor, DSC_FACTOR);
+
+       ratioF = div_u64(ratioF, 8 * (u64) minRate * outp->dp.link_nr);
+
+       if (PrecisionFactor < ratioF) // Assert if we will end up with a negative number in below
+               return false;
+
+       watermarkF = div_u64(ratioF * tuSize * (PrecisionFactor - ratioF), PrecisionFactor);
+       waterMark = (unsigned)(watermarkAdjust + (div_u64(2 * div_u64(depth * PrecisionFactor, 8 * numLanesPerLink * DSC_FACTOR) + watermarkF, PrecisionFactor)));
+
+       //
+       //  Bounds check the watermark
+       //
+       numSymbolsPerLine = div_u64(surfaceWidth * depth, 8 * outp->dp.link_nr * DSC_FACTOR);
+
+       if (WARN_ON(waterMark > 39 || waterMark > numSymbolsPerLine))
+               return false;
+
+       //
+       //  Clamp the low side
+       //
+       if (waterMark < watermarkMinimum)
+               waterMark = watermarkMinimum;
+
+       //Bits to send BS/BE/Extra symbols due to pixel padding
+       //Also accounts for enhanced framing.
+       BlankingBits = 3*8*numLanesPerLink + (enhancedFraming ? 3*8*numLanesPerLink : 0);
+
+       //VBID/MVID/MAUD sent 4 times all the time
+       BlankingBits += 3*8*4;
+
+       surfaceWidthPerLink = surfaceWidth;
+
+       //Extra bits sent due to pixel steering
+       u32 remain;
+       div_u64_rem(surfaceWidthPerLink, numLanesPerLink, &remain);
+       PixelSteeringBits = remain ? div_u64((numLanesPerLink - remain) * depth, DSC_FACTOR) : 0;
+
+       BlankingBits += PixelSteeringBits;
+       NumBlankingLinkClocks = div_u64((u64)BlankingBits * PrecisionFactor, (8 * numLanesPerLink));
+       MinHBlank = (u32)(div_u64(div_u64(NumBlankingLinkClocks * pixelClockHz, minRate), PrecisionFactor));
+       MinHBlank += 12;
+
+       if (WARN_ON(MinHBlank > rasterWidth - surfaceWidth))
+               return false;
+
+       // Bug 702290 - Active Width should be greater than 60
+       if (WARN_ON(surfaceWidth <= 60))
+               return false;
+
+
+       hblank_symbols = (s32)(div_u64((u64)(rasterWidth - surfaceWidth - MinHBlank) * minRate, pixelClockHz));
+
+       //reduce HBlank Symbols to account for secondary data packet
+       hblank_symbols -= 1; //Stuffer latency to send BS
+       hblank_symbols -= 3; //SPKT latency to send data to stuffer
+
+       hblank_symbols -= numLanesPerLink == 1 ? 9  : numLanesPerLink == 2 ? 6 : 3;
+
+       hBlankSym = (hblank_symbols < 0) ? 0 : hblank_symbols;
+
+       // Refer to dev_disp.ref for more information.
+       // # symbols/vblank = ((SetRasterBlankEnd.X + SetRasterSize.Width - SetRasterBlankStart.X - 40) * link_clk / pclk) - Y - 1;
+       // where Y = (# lanes == 4) 12 : (# lanes == 2) ? 21 : 39
+       if (surfaceWidth < 40)
+       {
+               vblank_symbols = 0;
+       }
+       else
+       {
+               vblank_symbols = (s32)((div_u64((u64)(surfaceWidth - 40) * minRate, pixelClockHz))) - 1;
+
+               vblank_symbols -= numLanesPerLink == 1 ? 39  : numLanesPerLink == 2 ? 21 : 12;
+       }
+
+       vBlankSym = (vblank_symbols < 0) ? 0 : vblank_symbols;
+
+       return nvif_outp_dp_sst(&outp->outp, head->base.index, waterMark, hBlankSym, vBlankSym);
+}
+
 static void
 nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *state)
 {
@@ -1679,6 +1821,7 @@ nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *sta
                break;
        case DCB_OUTPUT_DP:
                nouveau_dp_train(nv_encoder, false, mode->clock, asyh->or.bpc);
+               nv50_sor_dp_watermark_sst(nv_encoder, head, asyh);
                depth = nv50_dp_bpc_to_depth(asyh->or.bpc);
 
                if (nv_encoder->outp.or.link & 1)
index 8b5a240d57e473d106e8b40630a635ea804dad1f..fa161b74d96774ff76b05707c77e662640b87849 100644 (file)
@@ -35,6 +35,7 @@ struct nv_device_info_v0 {
 #define NV_DEVICE_INFO_V0_VOLTA                                            0x0b
 #define NV_DEVICE_INFO_V0_TURING                                           0x0c
 #define NV_DEVICE_INFO_V0_AMPERE                                           0x0d
+#define NV_DEVICE_INFO_V0_ADA                                              0x0e
        __u8  family;
        __u8  pad06[2];
        __u64 ram_size;
@@ -90,6 +91,8 @@ struct nv_device_time_v0 {
 #define NV_DEVICE_HOST_RUNLIST_ENGINES_SEC2                          0x00004000
 #define NV_DEVICE_HOST_RUNLIST_ENGINES_NVDEC                         0x00008000
 #define NV_DEVICE_HOST_RUNLIST_ENGINES_NVENC                         0x00010000
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_NVJPG                         0x00020000
+#define NV_DEVICE_HOST_RUNLIST_ENGINES_OFA                           0x00040000
 /* Returns the number of available channels on runlist(data). */
 #define NV_DEVICE_HOST_RUNLIST_CHANNELS               NV_DEVICE_HOST(0x00000101)
 #endif
index ad1e5de84e80f5081b0702ade33d6f9725b596f2..e668ab1664f0857d86bd142011d7427119d0adad 100644 (file)
 #define GV100_DISP                                    /* if0010.h */ 0x0000c370
 #define TU102_DISP                                    /* if0010.h */ 0x0000c570
 #define GA102_DISP                                    /* if0010.h */ 0x0000c670
+#define AD102_DISP                                    /* if0010.h */ 0x0000c770
 
 #define GV100_DISP_CAPS                                              0x0000c373
 
 #define GV100_DISP_CORE_CHANNEL_DMA                   /* if0014.h */ 0x0000c37d
 #define TU102_DISP_CORE_CHANNEL_DMA                   /* if0014.h */ 0x0000c57d
 #define GA102_DISP_CORE_CHANNEL_DMA                   /* if0014.h */ 0x0000c67d
+#define AD102_DISP_CORE_CHANNEL_DMA                   /* if0014.h */ 0x0000c77d
 
 #define NV50_DISP_OVERLAY_CHANNEL_DMA                 /* if0014.h */ 0x0000507e
 #define G82_DISP_OVERLAY_CHANNEL_DMA                  /* if0014.h */ 0x0000827e
 
 #define AMPERE_B                                      /* cl9097.h */ 0x0000c797
 
+#define ADA_A                                         /* cl9097.h */ 0x0000c997
+
 #define NV74_BSP                                                     0x000074b0
 
+#define NVC4B0_VIDEO_DECODER                                         0x0000c4b0
+#define NVC6B0_VIDEO_DECODER                                         0x0000c6b0
+#define NVC7B0_VIDEO_DECODER                                         0x0000c7b0
+#define NVC9B0_VIDEO_DECODER                                         0x0000c9b0
+
 #define GT212_MSVLD                                                  0x000085b1
 #define IGT21A_MSVLD                                                 0x000086b1
 #define G98_MSVLD                                                    0x000088b1
 #define AMPERE_DMA_COPY_A                                            0x0000c6b5
 #define AMPERE_DMA_COPY_B                                            0x0000c7b5
 
+#define NVC4B7_VIDEO_ENCODER                                         0x0000c4b7
+#define NVC7B7_VIDEO_ENCODER                                         0x0000c7b7
+#define NVC9B7_VIDEO_ENCODER                                         0x0000c9b7
+
 #define FERMI_DECOMPRESS                                             0x000090b8
 
 #define NV50_COMPUTE                                                 0x000050c0
 #define VOLTA_COMPUTE_A                                              0x0000c3c0
 #define TURING_COMPUTE_A                                             0x0000c5c0
 #define AMPERE_COMPUTE_B                                             0x0000c7c0
+#define ADA_COMPUTE_A                                                0x0000c9c0
 
 #define NV74_CIPHER                                                  0x000074c1
+
+#define NVC4D1_VIDEO_NVJPG                                           0x0000c4d1
+#define NVC9D1_VIDEO_NVJPG                                           0x0000c9d1
+
+#define NVC6FA_VIDEO_OFA                                             0x0000c6fa
+#define NVC7FA_VIDEO_OFA                                             0x0000c7fa
+#define NVC9FA_VIDEO_OFA                                             0x0000c9fa
 #endif
index f65b5009acf75a39d5dc43440c28e7bc917fe61f..f057d348221e5adf8d8686f371d6ce12a08d1b0c 100644 (file)
@@ -46,6 +46,7 @@ struct nvkm_device {
                GV100    = 0x140,
                TU100    = 0x160,
                GA100    = 0x170,
+               AD100    = 0x190,
        } card_type;
        u32 chipset;
        u8  chiprev;
index b857cf142c4a3fcc81999b4585316792d5c932cc..3d3f1063aaa7c7e3ed49f6b637682867db26f463 100644 (file)
@@ -48,6 +48,8 @@ int nvkm_falcon_pio_rd(struct nvkm_falcon *, u8 port, enum nvkm_falcon_mem type,
                       const u8 *img, u32 img_base, int len);
 int nvkm_falcon_dma_wr(struct nvkm_falcon *, const u8 *img, u64 dma_addr, u32 dma_base,
                       enum nvkm_falcon_mem mem_type, u32 mem_base, int len, bool sec);
+bool nvkm_falcon_riscv_active(struct nvkm_falcon *);
+void nvkm_falcon_intr_retrigger(struct nvkm_falcon *);
 
 int gm200_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon *);
 int gm200_flcn_disable(struct nvkm_falcon *);
@@ -61,10 +63,15 @@ void gm200_flcn_tracepc(struct nvkm_falcon *);
 int gp102_flcn_reset_eng(struct nvkm_falcon *);
 extern const struct nvkm_falcon_func_pio gp102_flcn_emem_pio;
 
+bool tu102_flcn_riscv_active(struct nvkm_falcon *);
+
+void ga100_flcn_intr_retrigger(struct nvkm_falcon *);
+
 int ga102_flcn_select(struct nvkm_falcon *);
 int ga102_flcn_reset_prep(struct nvkm_falcon *);
 int ga102_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon *);
 extern const struct nvkm_falcon_func_dma ga102_flcn_dma;
+bool ga102_flcn_riscv_active(struct nvkm_falcon *);
 
 void nvkm_falcon_v1_load_imem(struct nvkm_falcon *,
                              void *, u32, u32, u16, u8, bool);
index d4e507e252b13d624a92293e13a4ee5d1b20a0aa..20839be72644be7f1e46093f9182e632e4cc4bd3 100644 (file)
@@ -10,6 +10,7 @@ struct nvkm_firmware {
                enum nvkm_firmware_type {
                        NVKM_FIRMWARE_IMG_RAM,
                        NVKM_FIRMWARE_IMG_DMA,
+                       NVKM_FIRMWARE_IMG_SGT,
                } type;
        } *func;
        const char *name;
@@ -21,7 +22,10 @@ struct nvkm_firmware {
 
        struct nvkm_firmware_mem {
                struct nvkm_memory memory;
-               struct scatterlist sgl;
+               union {
+                       struct scatterlist sgl; /* DMA */
+                       struct sg_table sgt;    /* SGT */
+               };
        } mem;
 };
 
index 58108dea5aeb69fd5fa9b3a5be0206c814b8992f..30c17db483cb53cd90cc121285f5dd2cc89a388f 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: MIT */
-NVKM_LAYOUT_ONCE(NVKM_SUBDEV_TOP     , struct nvkm_top     ,      top)
 NVKM_LAYOUT_ONCE(NVKM_SUBDEV_GSP     , struct nvkm_gsp     ,      gsp)
+NVKM_LAYOUT_ONCE(NVKM_SUBDEV_TOP     , struct nvkm_top     ,      top)
 NVKM_LAYOUT_ONCE(NVKM_SUBDEV_VFN     , struct nvkm_vfn     ,      vfn)
 NVKM_LAYOUT_ONCE(NVKM_SUBDEV_PCI     , struct nvkm_pci     ,      pci)
 NVKM_LAYOUT_ONCE(NVKM_SUBDEV_VBIOS   , struct nvkm_bios    ,     bios)
@@ -42,9 +42,9 @@ NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSENC   , struct nvkm_engine  ,    msenc)
 NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPDEC  , struct nvkm_engine  ,   mspdec)
 NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSPPP   , struct nvkm_engine  ,    msppp)
 NVKM_LAYOUT_ONCE(NVKM_ENGINE_MSVLD   , struct nvkm_engine  ,    msvld)
-NVKM_LAYOUT_INST(NVKM_ENGINE_NVDEC   , struct nvkm_nvdec   ,    nvdec, 5)
+NVKM_LAYOUT_INST(NVKM_ENGINE_NVDEC   , struct nvkm_nvdec   ,    nvdec, 8)
 NVKM_LAYOUT_INST(NVKM_ENGINE_NVENC   , struct nvkm_nvenc   ,    nvenc, 3)
-NVKM_LAYOUT_ONCE(NVKM_ENGINE_NVJPG   , struct nvkm_engine  ,    nvjpg)
+NVKM_LAYOUT_INST(NVKM_ENGINE_NVJPG   , struct nvkm_engine  ,    nvjpg, 8)
 NVKM_LAYOUT_ONCE(NVKM_ENGINE_OFA     , struct nvkm_engine  ,      ofa)
 NVKM_LAYOUT_ONCE(NVKM_ENGINE_PM      , struct nvkm_pm      ,       pm)
 NVKM_LAYOUT_ONCE(NVKM_ENGINE_SEC     , struct nvkm_engine  ,      sec)
index ad9aef2df48f798750c8d4687985b86652f872ef..3e8db8280e2a4a32077cb1c38df58eac2a75eba8 100644 (file)
@@ -5,11 +5,29 @@
 #include <core/engine.h>
 #include <core/object.h>
 #include <core/event.h>
+#include <subdev/gsp.h>
 
 struct nvkm_disp {
        const struct nvkm_disp_func *func;
        struct nvkm_engine engine;
 
+       struct {
+               struct nvkm_gsp_client client;
+               struct nvkm_gsp_device device;
+
+               struct nvkm_gsp_object objcom;
+               struct nvkm_gsp_object object;
+
+#define NVKM_DPYID_PLUG   BIT(0)
+#define NVKM_DPYID_UNPLUG BIT(1)
+#define NVKM_DPYID_IRQ    BIT(2)
+               struct nvkm_event event;
+               struct nvkm_gsp_event hpd;
+               struct nvkm_gsp_event irq;
+
+               u32 assigned_sors;
+       } rm;
+
        struct list_head heads;
        struct list_head iors;
        struct list_head outps;
@@ -69,4 +87,5 @@ int gp102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct
 int gv100_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
 int tu102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
 int ga102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
+int ad102_disp_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_disp **);
 #endif
index b7bb8a29a729784651725b142157875f4be4a57e..341f133dc38c22c3ebd1c7bf8d4b30cad715655b 100644 (file)
@@ -62,6 +62,7 @@ struct nvkm_falcon_func {
        int (*enable)(struct nvkm_falcon *);
        int (*select)(struct nvkm_falcon *);
        u32 addr2;
+       u32 riscv_irqmask;
        bool reset_pmc;
        int (*reset_eng)(struct nvkm_falcon *);
        int (*reset_prep)(struct nvkm_falcon *);
@@ -87,6 +88,9 @@ struct nvkm_falcon_func {
                u32 stride;
        } cmdq, msgq;
 
+       bool (*riscv_active)(struct nvkm_falcon *);
+       void (*intr_retrigger)(struct nvkm_falcon *);
+
        struct {
                u32 *data;
                u32  size;
index 221abd6c431037c22a53e4ef1404482dac8bbfc0..be508f65b2808f4576e36070458235c903e328ba 100644 (file)
@@ -4,6 +4,7 @@
 #include <core/engine.h>
 #include <core/object.h>
 #include <core/event.h>
+#include <subdev/gsp.h>
 struct nvkm_fault_data;
 
 #define NVKM_FIFO_ENGN_NR 16
@@ -35,6 +36,15 @@ struct nvkm_chan {
        atomic_t blocked;
        atomic_t errored;
 
+       struct {
+               struct nvkm_gsp_object object;
+               struct {
+                       dma_addr_t addr;
+                       void *ptr;
+               } mthdbuf;
+               struct nvkm_vctx *grctx;
+       } rm;
+
        struct list_head cctxs;
        struct list_head head;
 };
@@ -43,6 +53,8 @@ struct nvkm_chan *nvkm_chan_get_chid(struct nvkm_engine *, int id, unsigned long
 struct nvkm_chan *nvkm_chan_get_inst(struct nvkm_engine *, u64 inst, unsigned long *irqflags);
 void nvkm_chan_put(struct nvkm_chan **, unsigned long irqflags);
 
+struct nvkm_chan *nvkm_uchan_chan(struct nvkm_object *);
+
 struct nvkm_fifo {
        const struct nvkm_fifo_func *func;
        struct nvkm_engine engine;
@@ -66,8 +78,15 @@ struct nvkm_fifo {
        struct {
                struct nvkm_memory *mem;
                struct nvkm_vma *bar1;
+
+               struct mutex mutex;
+               struct list_head list;
        } userd;
 
+       struct {
+               u32 mthdbuf_size;
+       } rm;
+
        spinlock_t lock;
        struct mutex mutex;
 };
index a2333cfe6955c951c4e739fa014160da4b3c3e95..8145796ffc61bb8f220893415ff392b9dce45136 100644 (file)
@@ -55,4 +55,5 @@ int gp10b_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct n
 int gv100_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
 int tu102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
 int ga102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
+int ad102_gr_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_gr **);
 #endif
index 9baf197ac8336f3a0b6c5d25125ce33981c24f0e..8d2e170883e1f364b68c474abbca2fffc6acbca6 100644 (file)
@@ -12,5 +12,8 @@ struct nvkm_nvdec {
 };
 
 int gm107_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
+int tu102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
+int ga100_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
 int ga102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
+int ad102_nvdec_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvdec **);
 #endif
index 1a259c5c9a7140e12be1e5c130cc255fe9ea074e..018c58fc32ba61395233e4efa739f8a7c771ab9f 100644 (file)
@@ -12,4 +12,7 @@ struct nvkm_nvenc {
 };
 
 int gm107_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
+int tu102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
+int ga102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
+int ad102_nvenc_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_nvenc **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvjpg.h
new file mode 100644 (file)
index 0000000..80b7933
--- /dev/null
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_NVJPG_H__
+#define __NVKM_NVJPG_H__
+#include <core/engine.h>
+
+int ga100_nvjpg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int ad102_nvjpg_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ofa.h
new file mode 100644 (file)
index 0000000..e72e211
--- /dev/null
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_OFA_H__
+#define __NVKM_OFA_H__
+#include <core/engine.h>
+
+int ga100_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int ga102_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+int ad102_ofa_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_engine **);
+#endif
index 4f07836ab984e74636d8943b3f9e1700f054e2fb..874a5080ba0664bb54ff69d16b43894315d56251 100644 (file)
@@ -11,6 +11,10 @@ struct nvkm_bar {
        spinlock_t lock;
        bool bar2;
 
+       void __iomem *flushBAR2PhysMode;
+       struct nvkm_memory *flushFBZero;
+       void __iomem *flushBAR2;
+
        /* whether the BAR supports to be ioremapped WC or should be uncached */
        bool iomap_uncached;
 };
index b61cfb077533ac5c77a0cb9a3136db89e4ee138f..b4b7841e3b13fdd48f3960a615c3a98bf343aff1 100644 (file)
@@ -29,6 +29,7 @@ int nvbios_memcmp(struct nvkm_bios *, u32 addr, const char *, u32 len);
 u8  nvbios_rd08(struct nvkm_bios *, u32 addr);
 u16 nvbios_rd16(struct nvkm_bios *, u32 addr);
 u32 nvbios_rd32(struct nvkm_bios *, u32 addr);
+void *nvbios_pointer(struct nvkm_bios *, u32 addr);
 
 int nvkm_bios_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_bios **);
 #endif
index 1755b0df3cc1db62a8bb967bc88aebf52420b16c..5b798a1a313d8f80bc8d040088942e087e549695 100644 (file)
@@ -158,9 +158,9 @@ struct nvkm_ram {
        struct nvkm_ram_data target;
 };
 
-int
-nvkm_ram_get(struct nvkm_device *, u8 heap, u8 type, u8 page, u64 size,
-            bool contig, bool back, struct nvkm_memory **);
+int nvkm_ram_wrap(struct nvkm_device *, u64 addr, u64 size, struct nvkm_memory **);
+int nvkm_ram_get(struct nvkm_device *, u8 heap, u8 type, u8 page, u64 size,
+                bool contig, bool back, struct nvkm_memory **);
 
 struct nvkm_ram_func {
        u64 upper;
index 72619d7df73e7047d2ac9764ce0301d53d0e8ced..2fa0445d89280c6677e58ccafa8f91b5962b5f76 100644 (file)
 #define nvkm_gsp(p) container_of((p), struct nvkm_gsp, subdev)
 #include <core/subdev.h>
 #include <core/falcon.h>
+#include <core/firmware.h>
+
+#define GSP_PAGE_SHIFT 12
+#define GSP_PAGE_SIZE  BIT(GSP_PAGE_SHIFT)
+
+struct nvkm_gsp_mem {
+       u32 size;
+       void *data;
+       dma_addr_t addr;
+};
+
+struct nvkm_gsp_radix3 {
+       struct nvkm_gsp_mem mem[3];
+};
+
+int nvkm_gsp_sg(struct nvkm_device *, u64 size, struct sg_table *);
+void nvkm_gsp_sg_free(struct nvkm_device *, struct sg_table *);
+
+typedef int (*nvkm_gsp_msg_ntfy_func)(void *priv, u32 fn, void *repv, u32 repc);
+
+struct nvkm_gsp_event;
+typedef void (*nvkm_gsp_event_func)(struct nvkm_gsp_event *, void *repv, u32 repc);
 
 struct nvkm_gsp {
        const struct nvkm_gsp_func *func;
        struct nvkm_subdev subdev;
 
        struct nvkm_falcon falcon;
+
+       struct {
+               struct {
+                       const struct firmware *load;
+                       const struct firmware *unload;
+               } booter;
+               const struct firmware *bl;
+               const struct firmware *rm;
+       } fws;
+
+       struct nvkm_firmware fw;
+       struct nvkm_gsp_mem sig;
+       struct nvkm_gsp_radix3 radix3;
+
+       struct {
+               struct {
+                       struct {
+                               u64 addr;
+                               u64 size;
+                       } vga_workspace;
+                       u64 addr;
+                       u64 size;
+               } bios;
+               struct {
+                       struct {
+                               u64 addr;
+                               u64 size;
+                       } frts, boot, elf, heap;
+                       u64 addr;
+                       u64 size;
+               } wpr2;
+               struct {
+                       u64 addr;
+                       u64 size;
+               } heap;
+               u64 addr;
+               u64 size;
+
+               struct {
+                       u64 addr;
+                       u64 size;
+               } region[16];
+               int region_nr;
+               u32 rsvd_size;
+       } fb;
+
+       struct {
+               struct nvkm_falcon_fw load;
+               struct nvkm_falcon_fw unload;
+       } booter;
+
+       struct {
+               struct nvkm_gsp_mem fw;
+               u32 code_offset;
+               u32 data_offset;
+               u32 manifest_offset;
+               u32 app_version;
+       } boot;
+
+       struct nvkm_gsp_mem libos;
+       struct nvkm_gsp_mem loginit;
+       struct nvkm_gsp_mem logintr;
+       struct nvkm_gsp_mem logrm;
+       struct nvkm_gsp_mem rmargs;
+
+       struct nvkm_gsp_mem wpr_meta;
+
+       struct {
+               struct sg_table sgt;
+               struct nvkm_gsp_radix3 radix3;
+               struct nvkm_gsp_mem meta;
+       } sr;
+
+       struct {
+               struct nvkm_gsp_mem mem;
+
+               struct {
+                       int   nr;
+                       u32 size;
+                       u64 *ptr;
+               } ptes;
+
+               struct {
+                       u32  size;
+                       void *ptr;
+               } cmdq, msgq;
+       } shm;
+
+       struct nvkm_gsp_cmdq {
+               struct mutex mutex;
+               u32 cnt;
+               u32 seq;
+               u32 *wptr;
+               u32 *rptr;
+       } cmdq;
+
+       struct nvkm_gsp_msgq {
+               struct mutex mutex;
+               u32 cnt;
+               u32 *wptr;
+               u32 *rptr;
+               struct nvkm_gsp_msgq_ntfy {
+                       u32 fn;
+                       nvkm_gsp_msg_ntfy_func func;
+                       void *priv;
+               } ntfy[16];
+               int ntfy_nr;
+               struct work_struct work;
+       } msgq;
+
+       bool running;
+
+       /* Internal GSP-RM control handles. */
+       struct {
+               struct nvkm_gsp_client {
+                       struct nvkm_gsp_object {
+                               struct nvkm_gsp_client *client;
+                               struct nvkm_gsp_object *parent;
+                               u32 handle;
+                       } object;
+
+                       struct nvkm_gsp *gsp;
+
+                       struct list_head events;
+               } client;
+
+               struct nvkm_gsp_device {
+                       struct nvkm_gsp_object object;
+                       struct nvkm_gsp_object subdevice;
+               } device;
+       } internal;
+
+       struct {
+               enum nvkm_subdev_type type;
+               int inst;
+               u32 stall;
+               u32 nonstall;
+       } intr[32];
+       int intr_nr;
+
+       struct {
+               u64 rm_bar1_pdb;
+               u64 rm_bar2_pdb;
+       } bar;
+
+       struct {
+               u8 gpcs;
+               u8 tpcs;
+       } gr;
+
+       const struct nvkm_gsp_rm {
+               void *(*rpc_get)(struct nvkm_gsp *, u32 fn, u32 argc);
+               void *(*rpc_push)(struct nvkm_gsp *, void *argv, bool wait, u32 repc);
+               void (*rpc_done)(struct nvkm_gsp *gsp, void *repv);
+
+               void *(*rm_ctrl_get)(struct nvkm_gsp_object *, u32 cmd, u32 argc);
+               void *(*rm_ctrl_push)(struct nvkm_gsp_object *, void *argv, u32 repc);
+               void (*rm_ctrl_done)(struct nvkm_gsp_object *, void *repv);
+
+               void *(*rm_alloc_get)(struct nvkm_gsp_object *, u32 oclass, u32 argc);
+               void *(*rm_alloc_push)(struct nvkm_gsp_object *, void *argv, u32 repc);
+               void (*rm_alloc_done)(struct nvkm_gsp_object *, void *repv);
+
+               int (*rm_free)(struct nvkm_gsp_object *);
+
+               int (*client_ctor)(struct nvkm_gsp *, struct nvkm_gsp_client *);
+               void (*client_dtor)(struct nvkm_gsp_client *);
+
+               int (*device_ctor)(struct nvkm_gsp_client *, struct nvkm_gsp_device *);
+               void (*device_dtor)(struct nvkm_gsp_device *);
+
+               int (*event_ctor)(struct nvkm_gsp_device *, u32 handle, u32 id,
+                                 nvkm_gsp_event_func, struct nvkm_gsp_event *);
+               void (*event_dtor)(struct nvkm_gsp_event *);
+       } *rm;
+
+       struct {
+               struct mutex mutex;;
+               struct idr idr;
+       } client_id;
 };
 
+static inline bool
+nvkm_gsp_rm(struct nvkm_gsp *gsp)
+{
+       return gsp && (gsp->fws.rm || gsp->fw.img);
+}
+
+static inline void *
+nvkm_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc)
+{
+       return gsp->rm->rpc_get(gsp, fn, argc);
+}
+
+static inline void *
+nvkm_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
+{
+       return gsp->rm->rpc_push(gsp, argv, wait, repc);
+}
+
+static inline void *
+nvkm_gsp_rpc_rd(struct nvkm_gsp *gsp, u32 fn, u32 argc)
+{
+       void *argv = nvkm_gsp_rpc_get(gsp, fn, argc);
+
+       if (IS_ERR_OR_NULL(argv))
+               return argv;
+
+       return nvkm_gsp_rpc_push(gsp, argv, true, argc);
+}
+
+static inline int
+nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv, bool wait)
+{
+       void *repv = nvkm_gsp_rpc_push(gsp, argv, wait, 0);
+
+       if (IS_ERR(repv))
+               return PTR_ERR(repv);
+
+       return 0;
+}
+
+static inline void
+nvkm_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
+{
+       gsp->rm->rpc_done(gsp, repv);
+}
+
+static inline void *
+nvkm_gsp_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc)
+{
+       return object->client->gsp->rm->rm_ctrl_get(object, cmd, argc);
+}
+
+static inline void *
+nvkm_gsp_rm_ctrl_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
+{
+       return object->client->gsp->rm->rm_ctrl_push(object, argv, repc);
+}
+
+static inline void *
+nvkm_gsp_rm_ctrl_rd(struct nvkm_gsp_object *object, u32 cmd, u32 repc)
+{
+       void *argv = nvkm_gsp_rm_ctrl_get(object, cmd, repc);
+
+       if (IS_ERR(argv))
+               return argv;
+
+       return nvkm_gsp_rm_ctrl_push(object, argv, repc);
+}
+
+static inline int
+nvkm_gsp_rm_ctrl_wr(struct nvkm_gsp_object *object, void *argv)
+{
+       void *repv = nvkm_gsp_rm_ctrl_push(object, argv, 0);
+
+       if (IS_ERR(repv))
+               return PTR_ERR(repv);
+
+       return 0;
+}
+
+static inline void
+nvkm_gsp_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv)
+{
+       object->client->gsp->rm->rm_ctrl_done(object, repv);
+}
+
+static inline void *
+nvkm_gsp_rm_alloc_get(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u32 argc,
+                     struct nvkm_gsp_object *object)
+{
+       struct nvkm_gsp_client *client = parent->client;
+       struct nvkm_gsp *gsp = client->gsp;
+       void *argv;
+
+       object->client = parent->client;
+       object->parent = parent;
+       object->handle = handle;
+
+       argv = gsp->rm->rm_alloc_get(object, oclass, argc);
+       if (IS_ERR_OR_NULL(argv)) {
+               object->client = NULL;
+               return argv;
+       }
+
+       return argv;
+}
+
+static inline void *
+nvkm_gsp_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
+{
+       void *repv = object->client->gsp->rm->rm_alloc_push(object, argv, repc);
+
+       if (IS_ERR(repv))
+               object->client = NULL;
+
+       return repv;
+}
+
+static inline int
+nvkm_gsp_rm_alloc_wr(struct nvkm_gsp_object *object, void *argv)
+{
+       void *repv = nvkm_gsp_rm_alloc_push(object, argv, 0);
+
+       if (IS_ERR(repv))
+               return PTR_ERR(repv);
+
+       return 0;
+}
+
+static inline void
+nvkm_gsp_rm_alloc_done(struct nvkm_gsp_object *object, void *repv)
+{
+       object->client->gsp->rm->rm_alloc_done(object, repv);
+}
+
+static inline int
+nvkm_gsp_rm_alloc(struct nvkm_gsp_object *parent, u32 handle, u32 oclass, u32 argc,
+                 struct nvkm_gsp_object *object)
+{
+       void *argv = nvkm_gsp_rm_alloc_get(parent, handle, oclass, argc, object);
+
+       if (IS_ERR_OR_NULL(argv))
+               return argv ? PTR_ERR(argv) : -EIO;
+
+       return nvkm_gsp_rm_alloc_wr(object, argv);
+}
+
+static inline int
+nvkm_gsp_rm_free(struct nvkm_gsp_object *object)
+{
+       if (object->client)
+               return object->client->gsp->rm->rm_free(object);
+
+       return 0;
+}
+
+static inline int
+nvkm_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client)
+{
+       if (WARN_ON(!gsp->rm))
+               return -ENOSYS;
+
+       return gsp->rm->client_ctor(gsp, client);
+}
+
+static inline void
+nvkm_gsp_client_dtor(struct nvkm_gsp_client *client)
+{
+       if (client->gsp)
+               client->gsp->rm->client_dtor(client);
+}
+
+static inline int
+nvkm_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
+{
+       return client->gsp->rm->device_ctor(client, device);
+}
+
+static inline void
+nvkm_gsp_device_dtor(struct nvkm_gsp_device *device)
+{
+       if (device->object.client)
+               device->object.client->gsp->rm->device_dtor(device);
+}
+
+static inline int
+nvkm_gsp_client_device_ctor(struct nvkm_gsp *gsp,
+                           struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
+{
+       int ret = nvkm_gsp_client_ctor(gsp, client);
+
+       if (ret == 0) {
+               ret = nvkm_gsp_device_ctor(client, device);
+               if (ret)
+                       nvkm_gsp_client_dtor(client);
+       }
+
+       return ret;
+}
+
+struct nvkm_gsp_event {
+       struct nvkm_gsp_device *device;
+       u32 id;
+       nvkm_gsp_event_func func;
+
+       struct nvkm_gsp_object object;
+
+       struct list_head head;
+};
+
+static inline int
+nvkm_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
+                          nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
+{
+       return device->object.client->gsp->rm->event_ctor(device, handle, id, func, event);
+}
+
+static inline void
+nvkm_gsp_event_dtor(struct nvkm_gsp_event *event)
+{
+       struct nvkm_gsp_device *device = event->device;
+
+       if (device)
+               device->object.client->gsp->rm->event_dtor(event);
+}
+
+int nvkm_gsp_intr_stall(struct nvkm_gsp *, enum nvkm_subdev_type, int);
+int nvkm_gsp_intr_nonstall(struct nvkm_gsp *, enum nvkm_subdev_type, int);
+
 int gv100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int tu102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int tu116_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int ga100_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
 int ga102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
+int ad102_gsp_new(struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_gsp **);
 #endif
index 92a36ddfc29ffe66a14a2648c9dc8a156440ecb2..e10cbd9203ec55ea9dff9fe993bb78e8645ddfd7 100644 (file)
@@ -8,6 +8,8 @@ struct nvkm_instmem {
        const struct nvkm_instmem_func *func;
        struct nvkm_subdev subdev;
 
+       bool suspend;
+
        spinlock_t lock;
        struct list_head list;
        struct list_head boot;
@@ -22,6 +24,11 @@ struct nvkm_instmem {
        struct nvkm_ramht  *ramht;
        struct nvkm_memory *ramro;
        struct nvkm_memory *ramfc;
+
+       struct {
+               struct sg_table fbsr;
+               bool fbsr_valid;
+       } rm;
 };
 
 u32 nvkm_instmem_rd32(struct nvkm_instmem *, u32 addr);
index 2fd2f2433fc7d40068873380321f353d277cae76..935b1cacd528e8416259803c533d4988004cc59f 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef __NVKM_MMU_H__
 #define __NVKM_MMU_H__
 #include <core/subdev.h>
+#include <subdev/gsp.h>
 
 struct nvkm_vma {
        struct list_head head;
@@ -63,6 +64,16 @@ struct nvkm_vmm {
        void *nullp;
 
        bool replay;
+
+       struct {
+               u64 bar2_pdb;
+
+               struct nvkm_gsp_client client;
+               struct nvkm_gsp_device device;
+               struct nvkm_gsp_object object;
+
+               struct nvkm_vma *rsvd;
+       } rm;
 };
 
 int nvkm_vmm_new(struct nvkm_device *, u64 addr, u64 size, void *argv, u32 argc,
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h
new file mode 100644 (file)
index 0000000..7157c77
--- /dev/null
@@ -0,0 +1,170 @@
+#ifndef __src_common_sdk_nvidia_inc_alloc_alloc_channel_h__
+#define __src_common_sdk_nvidia_inc_alloc_alloc_channel_h__
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NV_MEMORY_DESC_PARAMS {
+    NV_DECLARE_ALIGNED(NvU64 base, 8);
+    NV_DECLARE_ALIGNED(NvU64 size, 8);
+    NvU32 addressSpace;
+    NvU32 cacheAttrib;
+} NV_MEMORY_DESC_PARAMS;
+
+#define NVOS04_FLAGS_CHANNEL_TYPE                                  1:0
+#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL                         0x00000000
+#define NVOS04_FLAGS_CHANNEL_TYPE_VIRTUAL                          0x00000001  // OBSOLETE
+#define NVOS04_FLAGS_CHANNEL_TYPE_PHYSICAL_FOR_VIRTUAL             0x00000002  // OBSOLETE
+
+#define NVOS04_FLAGS_VPR                                           2:2
+#define NVOS04_FLAGS_VPR_FALSE                                     0x00000000
+#define NVOS04_FLAGS_VPR_TRUE                                      0x00000001
+
+#define NVOS04_FLAGS_CC_SECURE                                     2:2
+#define NVOS04_FLAGS_CC_SECURE_FALSE                               0x00000000
+#define NVOS04_FLAGS_CC_SECURE_TRUE                                0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING                  3:3
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_FALSE            0x00000000
+#define NVOS04_FLAGS_CHANNEL_SKIP_MAP_REFCOUNTING_TRUE             0x00000001
+
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE                       4:4
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_DEFAULT               0x00000000
+#define NVOS04_FLAGS_GROUP_CHANNEL_RUNQUEUE_ONE                   0x00000001
+
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL                           5:5
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_FALSE                     0x00000000
+#define NVOS04_FLAGS_PRIVILEGED_CHANNEL_TRUE                      0x00000001
+
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING                     6:6
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_FALSE               0x00000000
+#define NVOS04_FLAGS_DELAY_CHANNEL_SCHEDULING_TRUE                0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE                7:7
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_FALSE          0x00000000
+#define NVOS04_FLAGS_CHANNEL_DENY_PHYSICAL_MODE_CE_TRUE           0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_VALUE                    10:8
+
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED                    11:11
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_FALSE              0x00000000
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_FIXED_TRUE               0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_VALUE               20:12
+
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED               21:21
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_FALSE         0x00000000
+#define NVOS04_FLAGS_CHANNEL_USERD_INDEX_PAGE_FIXED_TRUE          0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV                 22:22
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_FALSE           0x00000000
+#define NVOS04_FLAGS_CHANNEL_DENY_AUTH_LEVEL_PRIV_TRUE            0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER                        23:23
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_FALSE                  0x00000000
+#define NVOS04_FLAGS_CHANNEL_SKIP_SCRUBBER_TRUE                   0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO                      24:24
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_FALSE                0x00000000
+#define NVOS04_FLAGS_CHANNEL_CLIENT_MAP_FIFO_TRUE                 0x00000001
+
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL           25:25
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_FALSE     0x00000000
+#define NVOS04_FLAGS_SET_EVICT_LAST_CE_PREFETCH_CHANNEL_TRUE      0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT                  26:26
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_FALSE            0x00000000
+#define NVOS04_FLAGS_CHANNEL_VGPU_PLUGIN_CONTEXT_TRUE             0x00000001
+
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT                 27:27
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_FALSE           0x00000000
+#define NVOS04_FLAGS_CHANNEL_PBDMA_ACQUIRE_TIMEOUT_TRUE            0x00000001
+
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD                          29:28
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_DEFAULT                  0x00000000
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_ONE                      0x00000001
+#define NVOS04_FLAGS_GROUP_CHANNEL_THREAD_TWO                      0x00000002
+
+#define NVOS04_FLAGS_MAP_CHANNEL                                   30:30
+#define NVOS04_FLAGS_MAP_CHANNEL_FALSE                             0x00000000
+#define NVOS04_FLAGS_MAP_CHANNEL_TRUE                              0x00000001
+
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC                          31:31
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_FALSE                    0x00000000
+#define NVOS04_FLAGS_SKIP_CTXBUFFER_ALLOC_TRUE                     0x00000001
+
+#define CC_CHAN_ALLOC_IV_SIZE_DWORD    3U
+#define CC_CHAN_ALLOC_NONCE_SIZE_DWORD 8U
+
+typedef struct NV_CHANNEL_ALLOC_PARAMS {
+
+    NvHandle hObjectError; // error context DMA
+    NvHandle hObjectBuffer; // no longer used
+    NV_DECLARE_ALIGNED(NvU64 gpFifoOffset, 8);    // offset to beginning of GP FIFO
+    NvU32    gpFifoEntries;    // number of GP FIFO entries
+
+    NvU32    flags;
+
+
+    NvHandle hContextShare; // context share handle
+    NvHandle hVASpace; // VASpace for the channel
+
+    // handle to UserD memory object for channel, ignored if hUserdMemory[0]=0
+    NvHandle hUserdMemory[NV_MAX_SUBDEVICES];
+
+    // offset to beginning of UserD within hUserdMemory[x]
+    NV_DECLARE_ALIGNED(NvU64 userdOffset[NV_MAX_SUBDEVICES], 8);
+
+    // engine type(NV2080_ENGINE_TYPE_*) with which this channel is associated
+    NvU32    engineType;
+    // Channel identifier that is unique for the duration of a RM session
+    NvU32    cid;
+    // One-hot encoded bitmask to match SET_SUBDEVICE_MASK methods
+    NvU32    subDeviceId;
+    NvHandle hObjectEccError; // ECC error context DMA
+
+    NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS instanceMem, 8);
+    NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS userdMem, 8);
+    NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS ramfcMem, 8);
+    NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS mthdbufMem, 8);
+
+    NvHandle hPhysChannelGroup;              // reserved
+    NvU32    internalFlags;                 // reserved
+    NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS errorNotifierMem, 8); // reserved
+    NV_DECLARE_ALIGNED(NV_MEMORY_DESC_PARAMS eccErrorNotifierMem, 8); // reserved
+    NvU32    ProcessID;                 // reserved
+    NvU32    SubProcessID;                 // reserved
+    // IV used for CPU-side encryption / GPU-side decryption.
+    NvU32    encryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD];          // reserved
+    // IV used for CPU-side decryption / GPU-side encryption.
+    NvU32    decryptIv[CC_CHAN_ALLOC_IV_SIZE_DWORD];          // reserved
+    // Nonce used CPU-side signing / GPU-side signature verification.
+    NvU32    hmacNonce[CC_CHAN_ALLOC_NONCE_SIZE_DWORD];       // reserved
+} NV_CHANNEL_ALLOC_PARAMS;
+
+typedef NV_CHANNEL_ALLOC_PARAMS NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h
new file mode 100644 (file)
index 0000000..7a3fc02
--- /dev/null
@@ -0,0 +1,38 @@
+#ifndef __src_common_sdk_nvidia_inc_class_cl0000_h__
+#define __src_common_sdk_nvidia_inc_class_cl0000_h__
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV01_ROOT        (0x0U) /* finn: Evaluated from "NV0000_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+typedef struct NV0000_ALLOC_PARAMETERS {
+    NvHandle hClient; /* CORERM-2934: hClient must remain the first member until all allocations use these params */
+    NvU32    processID;
+    char     processName[NV_PROC_NAME_MAX_LENGTH];
+} NV0000_ALLOC_PARAMETERS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h
new file mode 100644 (file)
index 0000000..e4de36d
--- /dev/null
@@ -0,0 +1,38 @@
+#ifndef __src_common_sdk_nvidia_inc_class_cl0005_h__
+#define __src_common_sdk_nvidia_inc_class_cl0005_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NV0005_ALLOC_PARAMETERS {
+    NvHandle hParentClient;
+    NvHandle hSrcResource;
+
+    NvV32    hClass;
+    NvV32    notifyIndex;
+    NV_DECLARE_ALIGNED(NvP64 data, 8);
+} NV0005_ALLOC_PARAMETERS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h
new file mode 100644 (file)
index 0000000..8868118
--- /dev/null
@@ -0,0 +1,43 @@
+#ifndef __src_common_sdk_nvidia_inc_class_cl0080_h__
+#define __src_common_sdk_nvidia_inc_class_cl0080_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV01_DEVICE_0      (0x80U) /* finn: Evaluated from "NV0080_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+typedef struct NV0080_ALLOC_PARAMETERS {
+    NvU32    deviceId;
+    NvHandle hClientShare;
+    NvHandle hTargetClient;
+    NvHandle hTargetDevice;
+    NvV32    flags;
+    NV_DECLARE_ALIGNED(NvU64 vaSpaceSize, 8);
+    NV_DECLARE_ALIGNED(NvU64 vaStartInternal, 8);
+    NV_DECLARE_ALIGNED(NvU64 vaLimitInternal, 8);
+    NvV32    vaMode;
+} NV0080_ALLOC_PARAMETERS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h
new file mode 100644 (file)
index 0000000..9040ea5
--- /dev/null
@@ -0,0 +1,35 @@
+#ifndef __src_common_sdk_nvidia_inc_class_cl2080_h__
+#define __src_common_sdk_nvidia_inc_class_cl2080_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2002-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV20_SUBDEVICE_0      (0x2080U) /* finn: Evaluated from "NV2080_ALLOC_PARAMETERS_MESSAGE_ID" */
+
+typedef struct NV2080_ALLOC_PARAMETERS {
+    NvU32 subDeviceId;
+} NV2080_ALLOC_PARAMETERS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h
new file mode 100644 (file)
index 0000000..ba659d6
--- /dev/null
@@ -0,0 +1,62 @@
+#ifndef __src_common_sdk_nvidia_inc_class_cl2080_notification_h__
+#define __src_common_sdk_nvidia_inc_class_cl2080_notification_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV2080_NOTIFIERS_HOTPLUG                                   (1)
+
+#define NV2080_NOTIFIERS_DP_IRQ                                    (7)
+
+#define NV2080_ENGINE_TYPE_GRAPHICS                   (0x00000001)
+#define NV2080_ENGINE_TYPE_GR0                        NV2080_ENGINE_TYPE_GRAPHICS
+
+#define NV2080_ENGINE_TYPE_COPY0                      (0x00000009)
+
+#define NV2080_ENGINE_TYPE_BSP                        (0x00000013)
+#define NV2080_ENGINE_TYPE_NVDEC0                     NV2080_ENGINE_TYPE_BSP
+
+#define NV2080_ENGINE_TYPE_MSENC                      (0x0000001b)
+#define NV2080_ENGINE_TYPE_NVENC0                      NV2080_ENGINE_TYPE_MSENC  /* Mutually exclusive alias */
+
+#define NV2080_ENGINE_TYPE_SW                         (0x00000022)
+
+#define NV2080_ENGINE_TYPE_SEC2                       (0x00000026)
+
+#define NV2080_ENGINE_TYPE_NVJPG                      (0x0000002b)
+#define NV2080_ENGINE_TYPE_NVJPEG0                     NV2080_ENGINE_TYPE_NVJPG
+
+#define NV2080_ENGINE_TYPE_OFA                        (0x00000033)
+
+typedef struct {
+    NvU32 plugDisplayMask;
+    NvU32 unplugDisplayMask;
+} Nv2080HotplugNotification;
+
+typedef struct Nv2080DpIrqNotificationRec {
+    NvU32 displayId;
+} Nv2080DpIrqNotification;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h
new file mode 100644 (file)
index 0000000..9eb780a
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef __src_common_sdk_nvidia_inc_class_cl84a0_h__
+#define __src_common_sdk_nvidia_inc_class_cl84a0_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2001-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV01_MEMORY_LIST_SYSTEM (0x00000081)
+
+#define NV01_MEMORY_LIST_FBMEM  (0x00000082)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h
new file mode 100644 (file)
index 0000000..f1d2177
--- /dev/null
@@ -0,0 +1,31 @@
+#ifndef __src_common_sdk_nvidia_inc_class_cl90f1_h__
+#define __src_common_sdk_nvidia_inc_class_cl90f1_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2011 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define FERMI_VASPACE_A                                     (0x000090f1)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h
new file mode 100644 (file)
index 0000000..b8f3257
--- /dev/null
@@ -0,0 +1,34 @@
+#ifndef __src_common_sdk_nvidia_inc_class_clc0b5sw_h__
+#define __src_common_sdk_nvidia_inc_class_clc0b5sw_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NVC0B5_ALLOCATION_PARAMETERS {
+    NvU32 version;
+    NvU32 engineType;
+} NVC0B5_ALLOCATION_PARAMETERS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h
new file mode 100644 (file)
index 0000000..58b3ba7
--- /dev/null
@@ -0,0 +1,39 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073common_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073common_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NV0073_CTRL_CMD_DSC_CAP_PARAMS {
+    NvBool bDscSupported;
+    NvU32  encoderColorFormatMask;
+    NvU32  lineBufferSizeKB;
+    NvU32  rateBufferSizeKB;
+    NvU32  bitsPerPixelPrecision;
+    NvU32  maxNumHztSlices;
+    NvU32  lineBufferBitDepth;
+} NV0073_CTRL_CMD_DSC_CAP_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h
new file mode 100644 (file)
index 0000000..596f2ea
--- /dev/null
@@ -0,0 +1,166 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dfp_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dfp_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV0073_CTRL_CMD_DFP_GET_INFO (0x731140U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_GET_INFO_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_DFP_GET_INFO_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 displayId;
+    NvU32 flags;
+    NvU32 flags2;
+} NV0073_CTRL_DFP_GET_INFO_PARAMS;
+
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL                                       2:0
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_TMDS                       (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_LVDS                       (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_SDI                        (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DISPLAYPORT                (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_DSI                        (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_SIGNAL_WRBK                       (0x00000005U)
+#define NV0073_CTRL_DFP_FLAGS_LANE                                         5:3
+#define NV0073_CTRL_DFP_FLAGS_LANE_NONE                         (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_SINGLE                       (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_DUAL                         (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_QUAD                         (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_LANE_OCT                          (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT                                        6:6
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_DISABLE                     (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LIMIT_60HZ_RR                     (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER                                   7:7
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_NORMAL                 (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_SLI_SCALER_DISABLE                (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE                                 8:8
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_FALSE                (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_CAPABLE_TRUE                 (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE                        9:9
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_FALSE       (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_LIMITED_CAPABLE_TRUE        (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE                         10:10
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_FALSE          (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_RANGE_AUTO_CAPABLE_TRUE           (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE                    11:11
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_FALSE     (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR422_CAPABLE_TRUE      (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE                    12:12
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_FALSE     (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_FORMAT_YCBCR444_CAPABLE_TRUE      (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED                               14:14
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_FALSE                (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_HDMI_ALLOWED_TRUE                 (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT                       15:15
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_FALSE        (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_EMBEDDED_DISPLAYPORT_TRUE         (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT                         16:16
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_NONE           (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_CONSTRAINT_PREFER_RBR     (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW                                 19:17
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_1_62GBPS               (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_2_70GBPS               (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_5_40GBPS               (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_LINK_BW_8_10GBPS               (0x00000004U)
+#define NV0073_CTRL_DFP_FLAGS_LINK                                       21:20
+#define NV0073_CTRL_DFP_FLAGS_LINK_NONE                         (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_SINGLE                       (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_LINK_DUAL                         (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID                           22:22
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_FALSE            (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_FORCE_RM_EDID_TRUE             (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID                              24:23
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_NONE            (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_A               (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_B               (0x00000002U)
+#define NV0073_CTRL_DFP_FLAGS_DSI_DEVICE_ID_DSI_GANGED          (0x00000003U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED                   25:25
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_FALSE    (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DP_POST_CURSOR2_DISABLED_TRUE     (0x00000001U)
+#define NV0073_CTRL_DFP_FLAGS_DP_PHY_REPEATER_COUNT                      29:26
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE                        30:30
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_FALSE         (0x00000000U)
+#define NV0073_CTRL_DFP_FLAGS_DYNAMIC_MUX_CAPABLE_TRUE          (0x00000001U)
+
+#define NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS                         (0x731144U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER                      96U
+
+typedef struct NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 displayId;
+    NvU32 numELDSize;
+    NvU8  bufferELD[NV0073_CTRL_DFP_ELD_AUDIO_CAPS_ELD_BUFFER];
+    NvU32 maxFreqSupported;
+    NvU32 ctrl;
+    NvU32 deviceEntry;
+} NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS;
+
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD                                     0:0
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_FALSE              (0x00000000U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_PD_TRUE               (0x00000001U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV                                   1:1
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_FALSE            (0x00000000U)
+#define NV0073_CTRL_DFP_ELD_AUDIO_CAPS_CTRL_ELDV_TRUE             (0x00000001U)
+
+#define NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE                (0x731150U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS {
+    NvU32  subDeviceInstance;
+    NvU32  displayId;
+    NvBool enable;
+} NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS;
+
+typedef NvU32 NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG;
+
+typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_INFO {
+    NvU32 displayMask;
+    NvU32 sorType;
+} NV0073_CTRL_DFP_ASSIGN_SOR_INFO;
+
+#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR           (0x731152U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DFP_INTERFACE_ID << 8) | NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS  4U
+
+typedef struct NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS {
+    NvU32                                 subDeviceInstance;
+    NvU32                                 displayId;
+    NvU8                                  sorExcludeMask;
+    NvU32                                 slaveDisplayId;
+    NV0073_CTRL_DFP_ASSIGN_SOR_LINKCONFIG forceSublinkConfig;
+    NvBool                                bIs2Head1Or;
+    NvU32                                 sorAssignList[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
+    NV0073_CTRL_DFP_ASSIGN_SOR_INFO       sorAssignListWithTag[NV0073_CTRL_CMD_DFP_ASSIGN_SOR_MAX_SORS];
+    NvU8                                  reservedSorMask;
+    NvU32                                 flags;
+} NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS;
+
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO                                      0:0
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_OPTIMAL                    (0x00000001U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_AUDIO_DEFAULT                    (0x00000000U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE               1:1
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_NO  (0x00000000U)
+#define NV0073_CTRL_DFP_ASSIGN_SOR_FLAGS_ACTIVE_SOR_NOT_AUDIO_CAPABLE_YES (0x00000001U)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h
new file mode 100644 (file)
index 0000000..bae4b19
--- /dev/null
@@ -0,0 +1,335 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dp_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073dp_h__
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073common.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2005-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV0073_CTRL_CMD_DP_AUXCH_CTRL      (0x731341U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_AUXCH_CTRL_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE 16U
+
+typedef struct NV0073_CTRL_DP_AUXCH_CTRL_PARAMS {
+    NvU32  subDeviceInstance;
+    NvU32  displayId;
+    NvBool bAddrOnly;
+    NvU32  cmd;
+    NvU32  addr;
+    NvU8   data[NV0073_CTRL_DP_AUXCH_MAX_DATA_SIZE];
+    NvU32  size;
+    NvU32  replyType;
+    NvU32  retryTimeMs;
+} NV0073_CTRL_DP_AUXCH_CTRL_PARAMS;
+
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE                          3:3
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_I2C               (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_TYPE_AUX               (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT                       2:2
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_FALSE          (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_I2C_MOT_TRUE           (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE                      1:0
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE         (0x00000000U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_READ          (0x00000001U)
+#define NV0073_CTRL_DP_AUXCH_CMD_REQ_TYPE_WRITE_STATUS  (0x00000002U)
+
+#define NV0073_CTRL_CMD_DP_CTRL                     (0x731343U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_CTRL_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_DP_CTRL_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 displayId;
+    NvU32 cmd;
+    NvU32 data;
+    NvU32 err;
+    NvU32 retryTimeMs;
+    NvU32 eightLaneDpcdBaseAddr;
+} NV0073_CTRL_DP_CTRL_PARAMS;
+
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT                           0:0
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_FALSE                         (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_LANE_COUNT_TRUE                          (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW                              1:1
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW_FALSE                            (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_LINK_BW_TRUE                             (0x00000001U)
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD                       2:2
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_FALSE                     (0x00000000U)
+#define NV0073_CTRL_DP_CMD_DISABLE_DOWNSPREAD_TRUE                      (0x00000001U)
+#define NV0073_CTRL_DP_CMD_UNUSED                                   3:3
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE                          4:4
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_SINGLE_STREAM                (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_FORMAT_MODE_MULTI_STREAM                 (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING                       5:5
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_NO                        (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FAST_LINK_TRAINING_YES                       (0x00000001U)
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING                         6:6
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_NO                          (0x00000000U)
+#define NV0073_CTRL_DP_CMD_NO_LINK_TRAINING_YES                         (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING                     7:7
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_FALSE                   (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SET_ENHANCED_FRAMING_TRUE                    (0x00000001U)
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING                   8:8
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_DEFAULT               (0x00000000U)
+#define NV0073_CTRL_DP_CMD_USE_DOWNSPREAD_SETTING_FORCE                 (0x00000001U)
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING                      9:9
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_NO                       (0x00000000U)
+#define NV0073_CTRL_DP_CMD_SKIP_HW_PROGRAMMING_YES                      (0x00000001U)
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED                10:10
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_NO                   (0x00000000U)
+#define NV0073_CTRL_DP_CMD_POST_LT_ADJ_REQ_GRANTED_YES                  (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING                     12:11
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_NO                        (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_DONOT_TOGGLE_TRANSMISSION (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FAKE_LINK_TRAINING_TOGGLE_TRANSMISSION_ON    (0x00000002U)
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER                     13:13
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_NO                        (0x00000000U)
+#define NV0073_CTRL_DP_CMD_TRAIN_PHY_REPEATER_YES                       (0x00000001U)
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG                        14:14
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_FALSE                        (0x00000000U)
+#define NV0073_CTRL_DP_CMD_FALLBACK_CONFIG_TRUE                         (0x00000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC                             15:15
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC_FALSE                             (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_FEC_TRUE                              (0x00000001U)
+
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST                         29:29
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_NO                            (0x00000000U)
+#define NV0073_CTRL_DP_CMD_BANDWIDTH_TEST_YES                           (0x00000001U)
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE              30:30
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_FALSE              (0x00000000U)
+#define NV0073_CTRL_DP_CMD_LINK_CONFIG_CHECK_DISABLE_TRUE               (0x00000001U)
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG                    31:31
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_FALSE                    (0x00000000U)
+#define NV0073_CTRL_DP_CMD_DISABLE_LINK_CONFIG_TRUE                     (0x00000001U)
+
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT                          4:0
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_0                            (0x00000000U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_1                            (0x00000001U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_2                            (0x00000002U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_4                            (0x00000004U)
+#define NV0073_CTRL_DP_DATA_SET_LANE_COUNT_8                            (0x00000008U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW                            15:8
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_1_62GBPS                        (0x00000006U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_16GBPS                        (0x00000008U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_43GBPS                        (0x00000009U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_2_70GBPS                        (0x0000000AU)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_3_24GBPS                        (0x0000000CU)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_4_32GBPS                        (0x00000010U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_5_40GBPS                        (0x00000014U)
+#define NV0073_CTRL_DP_DATA_SET_LINK_BW_8_10GBPS                        (0x0000001EU)
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING                  18:18
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_NO                     (0x00000000U)
+#define NV0073_CTRL_DP_DATA_SET_ENHANCED_FRAMING_YES                    (0x00000001U)
+#define NV0073_CTRL_DP_DATA_TARGET                                22:19
+#define NV0073_CTRL_DP_DATA_TARGET_SINK                                 (0x00000000U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_0                       (0x00000001U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_1                       (0x00000002U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_2                       (0x00000003U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_3                       (0x00000004U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_4                       (0x00000005U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_5                       (0x00000006U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_6                       (0x00000007U)
+#define NV0073_CTRL_DP_DATA_TARGET_PHY_REPEATER_7                       (0x00000008U)
+
+#define NV0073_CTRL_MAX_LANES                                           8U
+
+typedef struct NV0073_CTRL_DP_LANE_DATA_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 displayId;
+    NvU32 numLanes;
+    NvU32 data[NV0073_CTRL_MAX_LANES];
+} NV0073_CTRL_DP_LANE_DATA_PARAMS;
+
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS                   1:0
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_NONE    (0x00000000U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL1  (0x00000001U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL2  (0x00000002U)
+#define NV0073_CTRL_DP_LANE_DATA_PREEMPHASIS_LEVEL3  (0x00000003U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT                  3:2
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL0 (0x00000000U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL1 (0x00000001U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL2 (0x00000002U)
+#define NV0073_CTRL_DP_LANE_DATA_DRIVECURRENT_LEVEL3 (0x00000003U)
+
+#define NV0073_CTRL_CMD_DP_SET_LANE_DATA (0x731346U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_LANE_DATA_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM      (0x731359U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 displayId;
+    NvU32 mute;
+} NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID  (0x73135bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS {
+    NvU32  subDeviceInstance;
+    NvU32  displayId;
+    NvU32  preferredDisplayId;
+
+    NvBool force;
+    NvBool useBFM;
+
+    NvU32  displayIdAssigned;
+    NvU32  allDisplayMask;
+} NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID (0x73135cU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 displayId;
+} NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_CONFIG_STREAM                   (0x731362U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS {
+    NvU32  subDeviceInstance;
+    NvU32  head;
+    NvU32  sorIndex;
+    NvU32  dpLink;
+
+    NvBool bEnableOverride;
+    NvBool bMST;
+    NvU32  singleHeadMultistreamMode;
+    NvU32  hBlankSym;
+    NvU32  vBlankSym;
+    NvU32  colorFormat;
+    NvBool bEnableTwoHeadOneOr;
+
+    struct {
+        NvU32  slotStart;
+        NvU32  slotEnd;
+        NvU32  PBN;
+        NvU32  Timeslice;
+        NvBool sendACT;          // deprecated -Use NV0073_CTRL_CMD_DP_SEND_ACT
+        NvU32  singleHeadMSTPipeline;
+        NvBool bEnableAudioOverRightPanel;
+    } MST;
+
+    struct {
+        NvBool bEnhancedFraming;
+        NvU32  tuSize;
+        NvU32  waterMark;
+        NvU32  actualPclkHz;     // deprecated  -Use MvidWarParams
+        NvU32  linkClkFreqHz;    // deprecated  -Use MvidWarParams
+        NvBool bEnableAudioOverRightPanel;
+        struct {
+            NvU32  activeCnt;
+            NvU32  activeFrac;
+            NvU32  activePolarity;
+            NvBool mvidWarEnabled;
+            struct {
+                NvU32 actualPclkHz;
+                NvU32 linkClkFreqHz;
+            } MvidWarParams;
+        } Legacy;
+    } SST;
+} NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT                    (0x731365U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS {
+    NvU32 subDeviceInstance;
+} NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS   (0x731369U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS_MESSAGE_ID (0x69U)
+
+typedef struct NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS {
+    NvU32                          subDeviceInstance;
+    NvU32                          sorIndex;
+    NvU32                          maxLinkRate;
+    NvU32                          dpVersionsSupported;
+    NvU32                          UHBRSupported;
+    NvBool                         bIsMultistreamSupported;
+    NvBool                         bIsSCEnabled;
+    NvBool                         bHasIncreasedWatermarkLimits;
+    NvBool                         bIsPC2Disabled;
+    NvBool                         isSingleHeadMSTSupported;
+    NvBool                         bFECSupported;
+    NvBool                         bIsTrainPhyRepeater;
+    NvBool                         bOverrideLinkBw;
+    NV0073_CTRL_CMD_DSC_CAP_PARAMS DSC;
+} NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS;
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2                0:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_NO              (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_2_YES             (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4                1:1
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_NO              (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DP_VERSIONS_SUPPORTED_DP1_4_YES             (0x00000001U)
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE                           2:0
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_NONE                          (0x00000000U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62                          (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70                          (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40                          (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10                          (0x00000004U)
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_RGB                (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_444        (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_422 (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_ENCODER_COLOR_FORMAT_Y_CB_CR_NATIVE_420 (0x00000008U)
+
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_16           (0x00000001U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_8            (0x00000002U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_4            (0x00000003U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1_2            (0x00000004U)
+#define NV0073_CTRL_CMD_DP_GET_CAPS_DSC_BITS_PER_PIXEL_PRECISION_1              (0x00000005U)
+
+#define NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES (0x731377U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_DP_INTERFACE_ID << 8) | NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES        8U
+
+typedef struct NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS {
+    // In
+    NvU32 subDeviceInstance;
+    NvU32 displayId;
+    NvU16 linkRateTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+
+    // Out
+    NvU8  linkBwTbl[NV0073_CTRL_DP_MAX_INDEXED_LINK_RATES];
+    NvU8  linkBwCount;
+} NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS;
+
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE                                   3:0
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_BEGIN     (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHALLENGE (0x00000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_MONITOR_ENABLE_CHECK     (0x00000002U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_BEGIN      (0x00000003U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHALLENGE  (0x00000004U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_DRIVER_ENABLE_CHECK      (0x00000005U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_RESET_MONITOR            (0x00000006U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_INIT_PUBLIC_INFO         (0x00000007U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_GET_PUBLIC_INFO          (0x00000008U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_CMD_STAGE_STATUS_CHECK             (0x00000009U)
+
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_OK                          (0x00000000U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_PENDING                     (0x80000001U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_READ_ERROR                  (0x80000002U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_WRITE_ERROR                 (0x80000003U)
+#define NV0073_CTRL_DP_CMD_ENABLE_VRR_STATUS_DEVICE_ERROR                (0x80000004U)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h
new file mode 100644 (file)
index 0000000..954958d
--- /dev/null
@@ -0,0 +1,216 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073specific_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073specific_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2         (0x730245U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES 2048U
+
+typedef struct NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 displayId;
+    NvU32 bufferSize;
+    NvU32 flags;
+    NvU8  edidBuffer[NV0073_CTRL_SPECIFIC_GET_EDID_MAX_EDID_BYTES];
+} NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA   (0x730250U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_MAX_CONNECTORS                    4U
+
+typedef struct NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 displayId;
+    NvU32 flags;
+    NvU32 DDCPartners;
+    NvU32 count;
+    struct {
+        NvU32 index;
+        NvU32 type;
+        NvU32 location;
+    } data[NV0073_CTRL_MAX_CONNECTORS];
+    NvU32 platform;
+} NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE (0x730273U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS {
+    NvU8  subDeviceInstance;
+    NvU32 displayId;
+    NvU8  enable;
+} NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM (0x730275U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS {
+    NvU8  subDeviceInstance;
+    NvU32 displayId;
+    NvU8  mute;
+} NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK (0x730287U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 headMask;
+} NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET (0x730288U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_SET_OD_MAX_PACKET_SIZE     36U
+
+typedef struct NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS {
+    NvU32  subDeviceInstance;
+    NvU32  displayId;
+    NvU32  transmitControl;
+    NvU32  packetSize;
+    NvU32  targetHead;
+    NvBool bUsePsrHeadforSdp;
+    NvU8   aPacket[NV0073_CTRL_SET_OD_MAX_PACKET_SIZE];
+} NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS;
+
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE                                     0:0
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_NO                      (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ENABLE_YES                     (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME                                1:1
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_DISABLE            (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_OTHER_FRAME_ENABLE             (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME                               2:2
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_DISABLE           (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SINGLE_FRAME_ENABLE            (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK                                 3:3
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_DISABLE              (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_ON_HBLANK_ENABLE               (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE                                  4:4
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_DISABLE              (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_IMMEDIATE_ENABLE               (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT                                  5:5
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_SW_CONTROLLED        (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_VIDEO_FMT_HW_CONTROLLED        (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY                        6:6
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_FALSE      (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_STEREO_POLARITY_TRUE       (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING                   7:7
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_FALSE (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_SET_SELF_REFRESH_SETTING_TRUE  (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE                         9:8
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME0  (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_GEN_INFOFRAME_MODE_INFOFRAME1  (0x0000001U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE                     31:31
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_NO        (0x0000000U)
+#define NV0073_CTRL_SPECIFIC_SET_OD_PACKET_TRANSMIT_CONTROL_RESERVED_LEGACY_MODE_YES       (0x0000001U)
+
+#define NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO (0x73028bU) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS {
+    NvU32  subDeviceInstance;
+    NvU32  displayId;
+    NvU32  index;
+    NvU32  type;
+    NvU32  protocol;
+    NvU32  ditherType;
+    NvU32  ditherAlgo;
+    NvU32  location;
+    NvU32  rootPortId;
+    NvU32  dcbIndex;
+    NV_DECLARE_ALIGNED(NvU64 vbiosAddress, 8);
+    NvBool bIsLitByVbios;
+    NvBool bIsDispDynamic;
+} NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS;
+
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_NONE                   (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_DAC                    (0x00000001U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_SOR                    (0x00000002U)
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_PIOR                   (0x00000003U)
+
+#define NV0073_CTRL_SPECIFIC_OR_TYPE_DSI                    (0x00000005U)
+
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DAC_RGB_CRT        (0x00000000U)
+
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_LVDS_CUSTOM    (0x00000000U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A  (0x00000001U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B  (0x00000002U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS      (0x00000005U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A           (0x00000008U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B           (0x00000009U)
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DSI            (0x00000010U)
+
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_DSI                (0x00000011U)
+
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_PIOR_EXT_TMDS_ENC  (0x00000000U)
+
+#define NV0073_CTRL_SPECIFIC_OR_PROTOCOL_UNKNOWN            (0xFFFFFFFFU)
+
+#define NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS (0x730291U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS {
+    NvU32  subDeviceInstance;
+    NvU32  displayId;
+    NvU32  brightness;
+    NvBool bUncalibrated;
+} NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS (0x730292U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS_PARAMS_MESSAGE_ID" */
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS (0x730293U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SPECIFIC_INTERFACE_ID << 8) | NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 displayId;
+    NvU32 caps;
+} NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS;
+
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED                           0:0
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_FALSE       (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_GT_340MHZ_CLOCK_SUPPORTED_TRUE        (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED                     1:1
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_FALSE (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_LTE_340MHZ_SCRAMBLING_SUPPORTED_TRUE  (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED                                      2:2
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_FALSE                  (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_SCDC_SUPPORTED_TRUE                   (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED                              5:3
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_NONE           (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_3G      (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_3LANES_6G      (0x00000002U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_6G      (0x00000003U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_8G      (0x00000004U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_10G     (0x00000005U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_MAX_FRL_RATE_SUPPORTED_4LANES_12G     (0x00000006U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED                                    6:6
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_FALSE                (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_12_SUPPORTED_TRUE                 (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED                          9:7
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_NONE       (0x00000000U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_3G  (0x00000001U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_3LANES_6G  (0x00000002U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_6G  (0x00000003U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_8G  (0x00000004U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_10G (0x00000005U)
+#define NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS_DSC_MAX_FRL_RATE_SUPPORTED_4LANES_12G (0x00000006U)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h
new file mode 100644 (file)
index 0000000..d69cef3
--- /dev/null
@@ -0,0 +1,65 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073system_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0073_ctrl0073system_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS (0x730102U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 flags;
+    NvU32 numHeads;
+} NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED (0x730120U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 displayMask;
+    NvU32 displayMaskDDC;
+} NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE (0x730122U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 flags;
+    NvU32 displayMask;
+    NvU32 retryTimeMs;
+} NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS;
+
+#define NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE                (0x730126U) /* finn: Evaluated from "(FINN_NV04_DISPLAY_COMMON_SYSTEM_INTERFACE_ID << 8) | NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS_MESSAGE_ID" */
+
+typedef struct NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS {
+    NvU32 subDeviceInstance;
+    NvU32 head;
+    NvU32 flags;
+    NvU32 displayId;
+} NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS;
+
+#define NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS             (16U)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h
new file mode 100644 (file)
index 0000000..6acb3f7
--- /dev/null
@@ -0,0 +1,57 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080fifo_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080fifo_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID                          4:0
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS                 (0x00000000)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VLD                      (0x00000001)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_VIDEO                    (0x00000002)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_MPEG                     (0x00000003)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_CAPTURE                  (0x00000004)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_DISPLAY                  (0x00000005)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_ENCRYPTION               (0x00000006)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_POSTPROCESS              (0x00000007)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ZCULL           (0x00000008)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PM              (0x00000009)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COMPUTE_PREEMPT          (0x0000000a)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PREEMPT         (0x0000000b)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_SPILL           (0x0000000c)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL        (0x0000000d)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BETACB          (0x0000000e)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV             (0x0000000f)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PATCH           (0x00000010)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_BUNDLE_CB       (0x00000011)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PAGEPOOL_GLOBAL (0x00000012)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_ATTRIBUTE_CB    (0x00000013)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_RTV_CB_GLOBAL   (0x00000014)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_POOL       (0x00000015)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_GFXP_CTRL_BLK   (0x00000016)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_FECS_EVENT      (0x00000017)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_GRAPHICS_PRIV_ACCESS_MAP (0x00000018)
+#define NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT                    (0x00000019)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h
new file mode 100644 (file)
index 0000000..3db099e
--- /dev/null
@@ -0,0 +1,48 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gpu_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gpu_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS {
+    NvU32  totalVFs;
+    NvU32  firstVfOffset;
+    NvU32  vfFeatureMask;
+    NV_DECLARE_ALIGNED(NvU64 FirstVFBar0Address, 8);
+    NV_DECLARE_ALIGNED(NvU64 FirstVFBar1Address, 8);
+    NV_DECLARE_ALIGNED(NvU64 FirstVFBar2Address, 8);
+    NV_DECLARE_ALIGNED(NvU64 bar0Size, 8);
+    NV_DECLARE_ALIGNED(NvU64 bar1Size, 8);
+    NV_DECLARE_ALIGNED(NvU64 bar2Size, 8);
+    NvBool b64bitBar0;
+    NvBool b64bitBar1;
+    NvBool b64bitBar2;
+    NvBool bSriovEnabled;
+    NvBool bSriovHeavyEnabled;
+    NvBool bEmulateVFBar0TlbInvalidationRegister;
+    NvBool bClientRmAllocatedCtxBuffer;
+} NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h
new file mode 100644 (file)
index 0000000..ed01df9
--- /dev/null
@@ -0,0 +1,31 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gr_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl0080_ctrl0080gr_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2004-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV0080_CTRL_GR_CAPS_TBL_SIZE            23
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h
new file mode 100644 (file)
index 0000000..b5b7631
--- /dev/null
@@ -0,0 +1,40 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080bios_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080bios_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2005-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS {
+    NvU32 BoardID;
+    char  chipSKU[4];
+    char  chipSKUMod[2];
+    char  project[5];
+    char  projectSKU[5];
+    char  CDP[6];
+    char  projectSKUMod[2];
+    NvU32 businessCycle;
+} NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h
new file mode 100644 (file)
index 0000000..fe912d2
--- /dev/null
@@ -0,0 +1,35 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080ce_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080ce_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS {
+    NvU32 size;
+} NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS;
+
+#define NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE (0x20802a08) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_CE_INTERFACE_ID << 8) | NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS_MESSAGE_ID" */
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h
new file mode 100644 (file)
index 0000000..87bc4ff
--- /dev/null
@@ -0,0 +1,41 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080event_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080event_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION (0x20800301) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_EVENT_INTERFACE_ID << 8) | NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS {
+    NvU32  event;
+    NvU32  action;
+    NvBool bNotifyState;
+    NvU32  info32;
+    NvU16  info16;
+} NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS;
+
+#define NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT  (0x00000002)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h
new file mode 100644 (file)
index 0000000..68c81f9
--- /dev/null
@@ -0,0 +1,51 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fb_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fb_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2006-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES   17U
+
+typedef NvBool NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MEM_TYPES];
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO {
+    NV_DECLARE_ALIGNED(NvU64 base, 8);
+    NV_DECLARE_ALIGNED(NvU64 limit, 8);
+    NV_DECLARE_ALIGNED(NvU64 reserved, 8);
+    NvU32                                                  performance;
+    NvBool                                                 supportCompressed;
+    NvBool                                                 supportISO;
+    NvBool                                                 bProtected;
+    NV2080_CTRL_CMD_FB_GET_FB_REGION_SURFACE_MEM_TYPE_FLAG blackList;
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO;
+
+#define NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES 16U
+
+typedef struct NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS {
+    NvU32 numFBRegions;
+    NV_DECLARE_ALIGNED(NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO fbRegion[NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_MAX_ENTRIES], 8);
+} NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h
new file mode 100644 (file)
index 0000000..bc0f636
--- /dev/null
@@ -0,0 +1,52 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fifo_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080fifo_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2006-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE                 (0x20801112) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_FIFO_INTERFACE_ID << 8) | NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS_MESSAGE_ID" */
+
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES         32
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES   16
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA    2
+#define NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN 16
+
+typedef struct NV2080_CTRL_FIFO_DEVICE_ENTRY {
+    NvU32 engineData[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_DATA_TYPES];
+    NvU32 pbdmaIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
+    NvU32 pbdmaFaultIds[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_PBDMA];
+    NvU32 numPbdmas;
+    char  engineName[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_ENGINE_MAX_NAME_LEN];
+} NV2080_CTRL_FIFO_DEVICE_ENTRY;
+
+typedef struct NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS {
+    NvU32                         baseIndex;
+    NvU32                         numEntries;
+    NvBool                        bMore;
+    // C form: NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
+    NV2080_CTRL_FIFO_DEVICE_ENTRY entries[NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_MAX_ENTRIES];
+} NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h
new file mode 100644 (file)
index 0000000..29d7a10
--- /dev/null
@@ -0,0 +1,100 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gpu_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gpu_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2006-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV2080_GPU_MAX_NAME_STRING_LENGTH                  (0x0000040U)
+
+#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0            (0x00000000U)
+
+#define NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3            (0x00000003U)
+
+typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY {
+    NV_DECLARE_ALIGNED(NvU64 gpuPhysAddr, 8);
+    NV_DECLARE_ALIGNED(NvU64 gpuVirtAddr, 8);
+    NV_DECLARE_ALIGNED(NvU64 size, 8);
+    NvU32 physAttr;
+    NvU16 bufferId;
+    NvU8  bInitialize;
+    NvU8  bNonmapped;
+} NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY;
+
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN                         0U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PM                           1U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PATCH                        2U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_BUFFER_BUNDLE_CB             3U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PAGEPOOL                     4U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB                 5U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_RTV_CB_GLOBAL                6U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_POOL                    7U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GFXP_CTRL_BLK                8U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_FECS_EVENT                   9U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP              10U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP 11U
+#define NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_GLOBAL_PRIV_ACCESS_MAP       12U
+
+#define NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES                        16U
+
+#define NV2080_CTRL_CMD_GPU_PROMOTE_CTX                                    (0x2080012bU) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_GPU_INTERFACE_ID << 8) | NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS {
+    NvU32    engineType;
+    NvHandle hClient;
+    NvU32    ChID;
+    NvHandle hChanClient;
+    NvHandle hObject;
+    NvHandle hVirtMemory;
+    NV_DECLARE_ALIGNED(NvU64 virtAddress, 8);
+    NV_DECLARE_ALIGNED(NvU64 size, 8);
+    NvU32    entryCount;
+    // C form: NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES];
+    NV_DECLARE_ALIGNED(NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY promoteEntry[NV2080_CTRL_GPU_PROMOTE_CONTEXT_MAX_ENTRIES], 8);
+} NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS {
+    NvU32 gpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS {
+    NvU32 gpcId;
+    NvU32 tpcMask;
+} NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS;
+
+typedef struct NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS {
+    NvU32 gpcId;
+    NvU32 zcullMask;
+} NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS;
+
+#define NV2080_GPU_MAX_GID_LENGTH             (0x000000100ULL)
+
+typedef struct NV2080_CTRL_GPU_GET_GID_INFO_PARAMS {
+    NvU32 index;
+    NvU32 flags;
+    NvU32 length;
+    NvU8  data[NV2080_GPU_MAX_GID_LENGTH];
+} NV2080_CTRL_GPU_GET_GID_INFO_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h
new file mode 100644 (file)
index 0000000..59f8895
--- /dev/null
@@ -0,0 +1,41 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gr_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080gr_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2006-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef enum NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS {
+    NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_MAIN = 0,
+    NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_SPILL = 1,
+    NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_PAGEPOOL = 2,
+    NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_BETACB = 3,
+    NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_RTV = 4,
+    NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL = 5,
+    NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL = 6,
+    NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL_CONTROL_CPU = 7,
+    NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_END = 8,
+} NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h
new file mode 100644 (file)
index 0000000..e11b2db
--- /dev/null
@@ -0,0 +1,162 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080internal_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl2080_ctrl2080internal_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO (0x20800a01) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS {
+    NvU32  feHwSysCap;
+    NvU32  windowPresentMask;
+    NvBool bFbRemapperEnabled;
+    NvU32  numHeads;
+    NvBool bPrimaryVga;
+    NvU32  i2cPort;
+    NvU32  internalDispActiveMask;
+} NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS;
+
+#define NV2080_CTRL_INTERNAL_GR_MAX_ENGINES         8
+
+#define NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT 0x19
+
+typedef struct NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO {
+    NvU32 size;
+    NvU32 alignment;
+} NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO {
+    NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_BUFFER_INFO engine[NV2080_CTRL_INTERNAL_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_COUNT];
+} NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO;
+
+typedef struct NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS {
+    NV2080_CTRL_INTERNAL_STATIC_GR_CONTEXT_BUFFERS_INFO engineContextBuffersInfo[NV2080_CTRL_INTERNAL_GR_MAX_ENGINES];
+} NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO (0x20800a32) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO {
+    NvU32 engDesc;
+    NvU32 ctxAttr;
+    NvU32 ctxBufferSize;
+    NvU32 addrSpaceList;
+    NvU32 registerBase;
+} NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO;
+#define NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS     0x40
+
+#define NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO (0x20800a42) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS {
+    NvU32                                        numConstructedFalcons;
+    NV2080_CTRL_INTERNAL_CONSTRUCTED_FALCON_INFO constructedFalconsTable[NV2080_CTRL_CMD_INTERNAL_MAX_CONSTRUCTED_FALCONS];
+} NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM (0x20800a49) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS {
+    NV_DECLARE_ALIGNED(NvU64 instMemPhysAddr, 8);
+    NV_DECLARE_ALIGNED(NvU64 instMemSize, 8);
+    NvU32 instMemAddrSpace;
+    NvU32 instMemCpuCacheAttr;
+} NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER (0x20800a58) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS {
+    NvU32  addressSpace;
+    NV_DECLARE_ALIGNED(NvU64 physicalAddr, 8);
+    NV_DECLARE_ALIGNED(NvU64 limit, 8);
+    NvU32  cacheSnoop;
+    NvU32  hclass;
+    NvU32  channelInstance;
+    NvBool valid;
+} NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE (0x20800a5c) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS_MESSAGE_ID" */
+
+#define NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE       128
+
+typedef enum NV2080_INTR_CATEGORY {
+    NV2080_INTR_CATEGORY_DEFAULT = 0,
+    NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE = 1,
+    NV2080_INTR_CATEGORY_ESCHED_DRIVEN_ENGINE_NOTIFICATION = 2,
+    NV2080_INTR_CATEGORY_RUNLIST = 3,
+    NV2080_INTR_CATEGORY_RUNLIST_NOTIFICATION = 4,
+    NV2080_INTR_CATEGORY_UVM_OWNED = 5,
+    NV2080_INTR_CATEGORY_UVM_SHARED = 6,
+    NV2080_INTR_CATEGORY_ENUM_COUNT = 7,
+} NV2080_INTR_CATEGORY;
+
+typedef struct NV2080_INTR_CATEGORY_SUBTREE_MAP {
+    NvU8 subtreeStart;
+    NvU8 subtreeEnd;
+} NV2080_INTR_CATEGORY_SUBTREE_MAP;
+
+typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY {
+    NvU16 engineIdx;
+    NvU32 pmcIntrMask;
+    NvU32 vectorStall;
+    NvU32 vectorNonStall;
+} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY;
+
+typedef struct NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS {
+    NvU32                                            tableLen;
+    NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_ENTRY table[NV2080_CTRL_INTERNAL_INTR_MAX_TABLE_SIZE];
+    NV2080_INTR_CATEGORY_SUBTREE_MAP                 subtreeMap[NV2080_INTR_CATEGORY_ENUM_COUNT];
+} NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_FBSR_INIT (0x20800ac2) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS {
+    NvU32    fbsrType;
+    NvU32    numRegions;
+    NvHandle hClient;
+    NvHandle hSysMem;
+    NV_DECLARE_ALIGNED(NvU64 gspFbAllocsSysOffset, 8);
+    NvBool   bEnteringGcoffState;
+} NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO (0x20800ac3) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS_MESSAGE_ID" */
+
+typedef struct NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS {
+    NvU32    fbsrType;
+    NvHandle hClient;
+    NvHandle hVidMem;
+    NV_DECLARE_ALIGNED(NvU64 vidOffset, 8);
+    NV_DECLARE_ALIGNED(NvU64 sysOffset, 8);
+    NV_DECLARE_ALIGNED(NvU64 size, 8);
+} NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS;
+
+#define NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD (0x20800ac6) /* finn: Evaluated from "(FINN_NV20_SUBDEVICE_0_INTERNAL_INTERFACE_ID << 8) | NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS_MESSAGE_ID" */
+
+#define NV2080_CTRL_ACPI_DSM_READ_SIZE                   (0x1000) /* finn: Evaluated from "(4 * 1024)" */
+
+typedef struct NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS {
+    NvU32 status;
+    NvU16 backLightDataSize;
+    NvU8  backLightData[NV2080_CTRL_ACPI_DSM_READ_SIZE];
+} NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h
new file mode 100644 (file)
index 0000000..977e598
--- /dev/null
@@ -0,0 +1,95 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrl90f1_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrl90f1_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2014-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define GMMU_FMT_MAX_LEVELS  6U
+
+#define NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES (0x90f10106U) /* finn: Evaluated from "(FINN_FERMI_VASPACE_A_VASPACE_INTERFACE_ID << 8) | NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS_MESSAGE_ID" */
+
+typedef struct NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS {
+    /*!
+     * [in] GPU sub-device handle - this API only supports unicast.
+     *      Pass 0 to use subDeviceId instead.
+     */
+    NvHandle hSubDevice;
+
+    /*!
+     * [in] GPU sub-device ID. Ignored if hSubDevice is non-zero.
+     */
+    NvU32    subDeviceId;
+
+    /*!
+     * [in] Page size (VA coverage) of the level to reserve.
+     *      This need not be a leaf (page table) page size - it can be
+     *      the coverage of an arbitrary level (including root page directory).
+     */
+    NV_DECLARE_ALIGNED(NvU64 pageSize, 8);
+
+    /*!
+     * [in] First GPU virtual address of the range to reserve.
+     *      This must be aligned to pageSize.
+     */
+    NV_DECLARE_ALIGNED(NvU64 virtAddrLo, 8);
+
+    /*!
+     * [in] Last GPU virtual address of the range to reserve.
+     *      This (+1) must be aligned to pageSize.
+     */
+    NV_DECLARE_ALIGNED(NvU64 virtAddrHi, 8);
+
+    /*! 
+     * [in] Number of PDE levels to copy.
+     */
+    NvU32    numLevelsToCopy;
+
+   /*!
+     * [in] Per-level information.
+     */
+    struct {
+        /*!
+         * Physical address of this page level instance.
+         */
+        NV_DECLARE_ALIGNED(NvU64 physAddress, 8);
+
+        /*!
+         * Size in bytes allocated for this level instance.
+         */
+        NV_DECLARE_ALIGNED(NvU64 size, 8);
+
+        /*!
+         * Aperture in which this page level instance resides.
+         */
+        NvU32 aperture;
+
+        /*!
+         * Page shift corresponding to the level
+         */
+        NvU8  pageShift;
+    } levels[GMMU_FMT_MAX_LEVELS];
+} NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h
new file mode 100644 (file)
index 0000000..6840457
--- /dev/null
@@ -0,0 +1,42 @@
+#ifndef __src_common_sdk_nvidia_inc_ctrl_ctrla06f_ctrla06fgpfifo_h__
+#define __src_common_sdk_nvidia_inc_ctrl_ctrla06f_ctrla06fgpfifo_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2007-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ * 
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ * 
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ * 
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NVA06F_CTRL_CMD_GPFIFO_SCHEDULE (0xa06f0103) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS_MESSAGE_ID" */
+
+typedef struct NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS {
+    NvBool bEnable;
+    NvBool bSkipSubmit;
+} NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS;
+
+#define NVA06F_CTRL_CMD_BIND (0xa06f0104) /* finn: Evaluated from "(FINN_KEPLER_CHANNEL_GPFIFO_A_GPFIFO_INTERFACE_ID << 8) | NVA06F_CTRL_BIND_PARAMS_MESSAGE_ID" */
+
+typedef struct NVA06F_CTRL_BIND_PARAMS {
+    NvU32 engineType;
+} NVA06F_CTRL_BIND_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvlimits.h
new file mode 100644 (file)
index 0000000..5c5a004
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef __src_common_sdk_nvidia_inc_nvlimits_h__
+#define __src_common_sdk_nvidia_inc_nvlimits_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2017 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV_MAX_SUBDEVICES       8
+
+#define NV_PROC_NAME_MAX_LENGTH 100U
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h
new file mode 100644 (file)
index 0000000..51b5591
--- /dev/null
@@ -0,0 +1,148 @@
+#ifndef __src_common_sdk_nvidia_inc_nvos_h__
+#define __src_common_sdk_nvidia_inc_nvos_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NVOS02_FLAGS_PHYSICALITY                                   7:4
+#define NVOS02_FLAGS_PHYSICALITY_CONTIGUOUS                        (0x00000000)
+#define NVOS02_FLAGS_PHYSICALITY_NONCONTIGUOUS                     (0x00000001)
+#define NVOS02_FLAGS_LOCATION                                      11:8
+#define NVOS02_FLAGS_LOCATION_PCI                                  (0x00000000)
+#define NVOS02_FLAGS_LOCATION_AGP                                  (0x00000001)
+#define NVOS02_FLAGS_LOCATION_VIDMEM                               (0x00000002)
+#define NVOS02_FLAGS_COHERENCY                                     15:12
+#define NVOS02_FLAGS_COHERENCY_UNCACHED                            (0x00000000)
+#define NVOS02_FLAGS_COHERENCY_CACHED                              (0x00000001)
+#define NVOS02_FLAGS_COHERENCY_WRITE_COMBINE                       (0x00000002)
+#define NVOS02_FLAGS_COHERENCY_WRITE_THROUGH                       (0x00000003)
+#define NVOS02_FLAGS_COHERENCY_WRITE_PROTECT                       (0x00000004)
+#define NVOS02_FLAGS_COHERENCY_WRITE_BACK                          (0x00000005)
+#define NVOS02_FLAGS_ALLOC                                         17:16
+#define NVOS02_FLAGS_ALLOC_NONE                                    (0x00000001)
+#define NVOS02_FLAGS_GPU_CACHEABLE                                 18:18
+#define NVOS02_FLAGS_GPU_CACHEABLE_NO                              (0x00000000)
+#define NVOS02_FLAGS_GPU_CACHEABLE_YES                             (0x00000001)
+
+#define NVOS02_FLAGS_KERNEL_MAPPING                                19:19
+#define NVOS02_FLAGS_KERNEL_MAPPING_NO_MAP                         (0x00000000)
+#define NVOS02_FLAGS_KERNEL_MAPPING_MAP                            (0x00000001)
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY                            20:20
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_NO                         (0x00000000)
+#define NVOS02_FLAGS_ALLOC_NISO_DISPLAY_YES                        (0x00000001)
+
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY                          21:21
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_NO                       (0x00000000)
+#define NVOS02_FLAGS_ALLOC_USER_READ_ONLY_YES                      (0x00000001)
+
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY                        22:22
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_NO                     (0x00000000)
+#define NVOS02_FLAGS_ALLOC_DEVICE_READ_ONLY_YES                    (0x00000001)
+
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE                             23:23
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_DEFAULT                     (0x00000000)
+#define NVOS02_FLAGS_PEER_MAP_OVERRIDE_REQUIRED                    (0x00000001)
+
+#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT                          24:24
+#define NVOS02_FLAGS_ALLOC_TYPE_SYNCPOINT_APERTURE                 (0x00000001)
+
+#define NVOS02_FLAGS_MEMORY_PROTECTION                             26:25
+#define NVOS02_FLAGS_MEMORY_PROTECTION_DEFAULT                     (0x00000000)
+#define NVOS02_FLAGS_MEMORY_PROTECTION_PROTECTED                   (0x00000001)
+#define NVOS02_FLAGS_MEMORY_PROTECTION_UNPROTECTED                 (0x00000002)
+
+#define NVOS02_FLAGS_MAPPING                                       31:30
+#define NVOS02_FLAGS_MAPPING_DEFAULT                               (0x00000000)
+#define NVOS02_FLAGS_MAPPING_NO_MAP                                (0x00000001)
+#define NVOS02_FLAGS_MAPPING_NEVER_MAP                             (0x00000002)
+
+#define NV01_EVENT_CLIENT_RM                                       (0x04000000)
+
+typedef struct
+{
+    NvV32    channelInstance;            // One of the n channel instances of a given channel type.
+                                         // Note that core channel has only one instance
+                                         // while all others have two (one per head).
+    NvHandle hObjectBuffer;              // ctx dma handle for DMA push buffer
+    NvHandle hObjectNotify;              // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors/notifications
+    NvU32    offset;                     // Initial offset for put/get, usually zero.
+    NvP64    pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of UDISP GET/PUT regs
+
+    NvU32    flags;
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB                1:1
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_YES            0x00000000
+#define NV50VAIO_CHANNELDMA_ALLOCATION_FLAGS_CONNECT_PB_AT_GRAB_NO             0x00000001
+
+} NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS;
+
+typedef struct
+{
+    NvV32    channelInstance;            // One of the n channel instances of a given channel type.
+                                         // All PIO channels have two instances (one per head).
+    NvHandle hObjectNotify;              // ctx dma handle for an area (of type NvNotification defined in sdk/nvidia/inc/nvtypes.h) where RM can write errors.
+    NvP64    pControl NV_ALIGN_BYTES(8); // pControl gives virt addr of control region for PIO channel
+} NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS;
+
+typedef struct
+{
+    NvU32 size;
+    NvU32 prohibitMultipleInstances;
+    NvU32 engineInstance;               // Select NVDEC0 or NVDEC1 or NVDEC2
+} NV_BSP_ALLOCATION_PARAMETERS;
+
+typedef struct
+{
+    NvU32 size;
+    NvU32 prohibitMultipleInstances;  // Prohibit multiple allocations of MSENC?
+    NvU32 engineInstance;             // Select MSENC/NVENC0 or NVENC1 or NVENC2
+} NV_MSENC_ALLOCATION_PARAMETERS;
+
+typedef struct
+{
+    NvU32 size;
+    NvU32 prohibitMultipleInstances;  // Prohibit multiple allocations of NVJPG?
+    NvU32 engineInstance;
+} NV_NVJPG_ALLOCATION_PARAMETERS;
+
+typedef struct
+{
+    NvU32 size;
+    NvU32 prohibitMultipleInstances;  // Prohibit multiple allocations of OFA?
+} NV_OFA_ALLOCATION_PARAMETERS;
+
+typedef struct
+{
+    NvU32   index;
+    NvV32   flags;
+    NvU64   vaSize NV_ALIGN_BYTES(8);
+    NvU64   vaStartInternal NV_ALIGN_BYTES(8);
+    NvU64   vaLimitInternal NV_ALIGN_BYTES(8);
+    NvU32   bigPageSize;
+    NvU64   vaBase NV_ALIGN_BYTES(8);
+} NV_VASPACE_ALLOCATION_PARAMETERS;
+
+#define NV_VASPACE_ALLOCATION_INDEX_GPU_NEW                                 0x00 //<! Create new VASpace, by default
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h
new file mode 100644 (file)
index 0000000..5a2f273
--- /dev/null
@@ -0,0 +1,46 @@
+#ifndef __src_common_shared_msgq_inc_msgq_msgq_priv_h__
+#define __src_common_shared_msgq_inc_msgq_msgq_priv_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct
+{
+    NvU32 version;   // queue version
+    NvU32 size;      // bytes, page aligned
+    NvU32 msgSize;   // entry size, bytes, must be power-of-2, 16 is minimum
+    NvU32 msgCount;  // number of entries in queue
+    NvU32 writePtr;  // message id of next slot
+    NvU32 flags;     // if set it means "i want to swap RX"
+    NvU32 rxHdrOff;  // Offset of msgqRxHeader from start of backing store.
+    NvU32 entryOff;  // Offset of entries from start of backing store.
+} msgqTxHeader;
+
+typedef struct
+{
+    NvU32 readPtr; // message id of last message read
+} msgqRxHeader;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h
new file mode 100644 (file)
index 0000000..83cf1b2
--- /dev/null
@@ -0,0 +1,52 @@
+#ifndef __src_common_uproc_os_common_include_libos_init_args_h__
+#define __src_common_uproc_os_common_include_libos_init_args_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef NvU64 LibosAddress;
+
+typedef enum {
+    LIBOS_MEMORY_REGION_NONE,
+    LIBOS_MEMORY_REGION_CONTIGUOUS,
+    LIBOS_MEMORY_REGION_RADIX3
+} LibosMemoryRegionKind;
+
+typedef enum {
+    LIBOS_MEMORY_REGION_LOC_NONE,
+    LIBOS_MEMORY_REGION_LOC_SYSMEM,
+    LIBOS_MEMORY_REGION_LOC_FB
+} LibosMemoryRegionLoc;
+
+typedef struct
+{
+    LibosAddress          id8;  // Id tag.
+    LibosAddress          pa;   // Physical address.
+    LibosAddress          size; // Size of memory area.
+    NvU8                  kind; // See LibosMemoryRegionKind above.
+    NvU8                  loc;  // See LibosMemoryRegionLoc above.
+} LibosMemoryRegionInitArgument;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h
new file mode 100644 (file)
index 0000000..73213bd
--- /dev/null
@@ -0,0 +1,79 @@
+#ifndef __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_sr_meta_h__
+#define __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_sr_meta_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define GSP_FW_SR_META_MAGIC     0x8a3bb9e6c6c39d93ULL
+#define GSP_FW_SR_META_REVISION  2
+
+typedef struct
+{
+    //
+    // Magic
+    // Use for verification by Booter
+    //
+    NvU64 magic;  // = GSP_FW_SR_META_MAGIC;
+
+    //
+    // Revision number
+    // Bumped up when we change this interface so it is not backward compatible.
+    // Bumped up when we revoke GSP-RM ucode
+    //
+    NvU64 revision;  // = GSP_FW_SR_META_MAGIC_REVISION;
+
+    //
+    // ---- Members regarding data in SYSMEM ----------------------------
+    // Consumed by Booter for DMA
+    //
+    NvU64 sysmemAddrOfSuspendResumeData;
+    NvU64 sizeOfSuspendResumeData;
+
+    // ---- Members for crypto ops across S/R ---------------------------
+
+    //
+    // HMAC over the entire GspFwSRMeta structure (including padding)
+    // with the hmac field itself zeroed.
+    //
+    NvU8 hmac[32];
+
+    // Hash over GspFwWprMeta structure
+    NvU8 wprMetaHash[32];
+
+    // Hash over GspFwHeapFreeList structure. All zeros signifies no free list.
+    NvU8 heapFreeListHash[32];
+
+    // Hash over data in WPR2 (skipping over free heap chunks; see Booter for details)
+    NvU8 dataHash[32];
+
+    //
+    // Pad structure to exactly 256 bytes (1 DMA chunk).
+    // Padding initialized to zero.
+    //
+    NvU32 padding[24];
+
+} GspFwSRMeta;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h
new file mode 100644 (file)
index 0000000..a2e141e
--- /dev/null
@@ -0,0 +1,170 @@
+#ifndef __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_wpr_meta_h__
+#define __src_nvidia_arch_nvalloc_common_inc_gsp_gsp_fw_wpr_meta_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct
+{
+    // Magic
+    // BL to use for verification (i.e. Booter locked it in WPR2)
+    NvU64 magic; // = 0xdc3aae21371a60b3;
+
+    // Revision number of Booter-BL-Sequencer handoff interface
+    // Bumped up when we change this interface so it is not backward compatible.
+    // Bumped up when we revoke GSP-RM ucode
+    NvU64 revision; // = 1;
+
+    // ---- Members regarding data in SYSMEM ----------------------------
+    // Consumed by Booter for DMA
+
+    NvU64 sysmemAddrOfRadix3Elf;
+    NvU64 sizeOfRadix3Elf;
+
+    NvU64 sysmemAddrOfBootloader;
+    NvU64 sizeOfBootloader;
+
+    // Offsets inside bootloader image needed by Booter
+    NvU64 bootloaderCodeOffset;
+    NvU64 bootloaderDataOffset;
+    NvU64 bootloaderManifestOffset;
+
+    union
+    {
+        // Used only at initial boot
+        struct
+        {
+            NvU64 sysmemAddrOfSignature;
+            NvU64 sizeOfSignature;
+        };
+
+        //
+        // Used at suspend/resume to read GspFwHeapFreeList
+        // Offset relative to GspFwWprMeta FBMEM PA (gspFwWprStart)
+        //
+        struct
+        {
+            NvU32 gspFwHeapFreeListWprOffset;
+            NvU32 unused0;
+            NvU64 unused1;
+        };
+    };
+
+    // ---- Members describing FB layout --------------------------------
+    NvU64 gspFwRsvdStart;
+
+    NvU64 nonWprHeapOffset;
+    NvU64 nonWprHeapSize;
+
+    NvU64 gspFwWprStart;
+
+    // GSP-RM to use to setup heap.
+    NvU64 gspFwHeapOffset;
+    NvU64 gspFwHeapSize;
+
+    // BL to use to find ELF for jump
+    NvU64 gspFwOffset;
+    // Size is sizeOfRadix3Elf above.
+
+    NvU64 bootBinOffset;
+    // Size is sizeOfBootloader above.
+
+    NvU64 frtsOffset;
+    NvU64 frtsSize;
+
+    NvU64 gspFwWprEnd;
+
+    // GSP-RM to use for fbRegionInfo?
+    NvU64 fbSize;
+
+    // ---- Other members -----------------------------------------------
+
+    // GSP-RM to use for fbRegionInfo?
+    NvU64 vgaWorkspaceOffset;
+    NvU64 vgaWorkspaceSize;
+
+    // Boot count.  Used to determine whether to load the firmware image.
+    NvU64 bootCount;
+
+    // TODO: the partitionRpc* fields below do not really belong in this
+    //       structure. The values are patched in by the partition bootstrapper
+    //       when GSP-RM is booted in a partition, and this structure was a
+    //       convenient place for the bootstrapper to access them. These should
+    //       be moved to a different comm. mechanism between the bootstrapper
+    //       and the GSP-RM tasks.
+
+    union
+    {
+       struct
+       {
+           // Shared partition RPC memory (physical address)
+           NvU64 partitionRpcAddr;
+
+           // Offsets relative to partitionRpcAddr
+           NvU16 partitionRpcRequestOffset;
+           NvU16 partitionRpcReplyOffset;
+
+           // Code section and dataSection offset and size.
+           NvU32 elfCodeOffset;
+           NvU32 elfDataOffset;
+           NvU32 elfCodeSize;
+           NvU32 elfDataSize;
+
+           // Used during GSP-RM resume to check for revocation
+           NvU32 lsUcodeVersion;
+       };
+
+        struct
+       {
+           // Pad for the partitionRpc* fields, plus 4 bytes
+           NvU32 partitionRpcPadding[4];
+
+            // CrashCat (contiguous) buffer size/location - occupies same bytes as the
+            // elf(Code|Data)(Offset|Size) fields above.
+            // TODO: move to GSP_FMC_INIT_PARAMS
+            NvU64 sysmemAddrOfCrashReportQueue;
+            NvU32 sizeOfCrashReportQueue;
+
+            // Pad for the lsUcodeVersion field
+            NvU32 lsUcodeVersionPadding[1];
+        };
+    };
+
+    // Number of VF partitions allocating sub-heaps from the WPR heap
+    // Used during boot to ensure the heap is adequately sized
+    NvU8 gspFwHeapVfPartitionCount;
+
+    // Pad structure to exactly 256 bytes.  Can replace padding with additional
+    // fields without incrementing revision.  Padding initialized to 0.
+    NvU8 padding[7];
+
+    // BL to use for verification (i.e. Booter says OK to boot)
+    NvU64 verified;  // 0x0 -> unverified, 0xa0a0a0a0a0a0a0a0 -> verified
+} GspFwWprMeta;
+
+#define GSP_FW_WPR_META_REVISION  1
+#define GSP_FW_WPR_META_MAGIC     0xdc3aae21371a60b3ULL
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h
new file mode 100644 (file)
index 0000000..4eff473
--- /dev/null
@@ -0,0 +1,82 @@
+#ifndef __src_nvidia_arch_nvalloc_common_inc_rmRiscvUcode_h__
+#define __src_nvidia_arch_nvalloc_common_inc_rmRiscvUcode_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2018-2019 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct {
+    //
+    // Version 1
+    // Version 2
+    // Version 3 = for Partition boot
+    // Version 4 = for eb riscv boot
+    // Version 5 = Support signing entire RISC-V image as "code" in code section for hopper and later.
+    //
+    NvU32  version;                         // structure version
+    NvU32  bootloaderOffset;
+    NvU32  bootloaderSize;
+    NvU32  bootloaderParamOffset;
+    NvU32  bootloaderParamSize;
+    NvU32  riscvElfOffset;
+    NvU32  riscvElfSize;
+    NvU32  appVersion;                      // Changelist number associated with the image
+    //
+    // Manifest contains information about Monitor and it is
+    // input to BR
+    //
+    NvU32  manifestOffset;
+    NvU32  manifestSize;
+    //
+    // Monitor Data offset within RISCV image and size
+    //
+    NvU32  monitorDataOffset;
+    NvU32  monitorDataSize;
+    //
+    // Monitor Code offset withtin RISCV image and size
+    //
+    NvU32  monitorCodeOffset;
+    NvU32  monitorCodeSize;
+    NvU32  bIsMonitorEnabled;
+    //
+    // Swbrom Code offset within RISCV image and size
+    //
+    NvU32  swbromCodeOffset;
+    NvU32  swbromCodeSize;
+    //
+    // Swbrom Data offset within RISCV image and size
+    //
+    NvU32  swbromDataOffset;
+    NvU32  swbromDataSize;
+    //
+    // Total size of FB carveout (image and reserved space).  
+    //
+    NvU32  fbReservedSize;
+    //
+    // Indicates whether the entire RISC-V image is signed as "code" in code section.
+    //
+    NvU32  bSignedAsCode;
+} RM_RISCV_UCODE_DESC;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h
new file mode 100644 (file)
index 0000000..341ab0d
--- /dev/null
@@ -0,0 +1,100 @@
+#ifndef __src_nvidia_arch_nvalloc_common_inc_rmgspseq_h__
+#define __src_nvidia_arch_nvalloc_common_inc_rmgspseq_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2019-2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef enum GSP_SEQ_BUF_OPCODE
+{
+    GSP_SEQ_BUF_OPCODE_REG_WRITE = 0,
+    GSP_SEQ_BUF_OPCODE_REG_MODIFY,
+    GSP_SEQ_BUF_OPCODE_REG_POLL,
+    GSP_SEQ_BUF_OPCODE_DELAY_US,
+    GSP_SEQ_BUF_OPCODE_REG_STORE,
+    GSP_SEQ_BUF_OPCODE_CORE_RESET,
+    GSP_SEQ_BUF_OPCODE_CORE_START,
+    GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT,
+    GSP_SEQ_BUF_OPCODE_CORE_RESUME,
+} GSP_SEQ_BUF_OPCODE;
+
+#define GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(opcode)                       \
+    ((opcode == GSP_SEQ_BUF_OPCODE_REG_WRITE)  ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_WRITE)  / sizeof(NvU32)) : \
+     (opcode == GSP_SEQ_BUF_OPCODE_REG_MODIFY) ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_MODIFY) / sizeof(NvU32)) : \
+     (opcode == GSP_SEQ_BUF_OPCODE_REG_POLL)   ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_POLL)   / sizeof(NvU32)) : \
+     (opcode == GSP_SEQ_BUF_OPCODE_DELAY_US)   ? (sizeof(GSP_SEQ_BUF_PAYLOAD_DELAY_US)   / sizeof(NvU32)) : \
+     (opcode == GSP_SEQ_BUF_OPCODE_REG_STORE)  ? (sizeof(GSP_SEQ_BUF_PAYLOAD_REG_STORE)  / sizeof(NvU32)) : \
+    /* GSP_SEQ_BUF_OPCODE_CORE_RESET */                                 \
+    /* GSP_SEQ_BUF_OPCODE_CORE_START */                                 \
+    /* GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT */                         \
+    /* GSP_SEQ_BUF_OPCODE_CORE_RESUME */                                \
+    0)
+
+typedef struct
+{
+    NvU32 addr;
+    NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_REG_WRITE;
+
+typedef struct
+{
+    NvU32 addr;
+    NvU32 mask;
+    NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_REG_MODIFY;
+
+typedef struct
+{
+    NvU32 addr;
+    NvU32 mask;
+    NvU32 val;
+    NvU32 timeout;
+    NvU32 error;
+} GSP_SEQ_BUF_PAYLOAD_REG_POLL;
+
+typedef struct
+{
+    NvU32 val;
+} GSP_SEQ_BUF_PAYLOAD_DELAY_US;
+
+typedef struct
+{
+    NvU32 addr;
+    NvU32 index;
+} GSP_SEQ_BUF_PAYLOAD_REG_STORE;
+
+typedef struct GSP_SEQUENCER_BUFFER_CMD
+{
+    GSP_SEQ_BUF_OPCODE opCode;
+    union
+    {
+        GSP_SEQ_BUF_PAYLOAD_REG_WRITE regWrite;
+        GSP_SEQ_BUF_PAYLOAD_REG_MODIFY regModify;
+        GSP_SEQ_BUF_PAYLOAD_REG_POLL regPoll;
+        GSP_SEQ_BUF_PAYLOAD_DELAY_US delayUs;
+        GSP_SEQ_BUF_PAYLOAD_REG_STORE regStore;
+    } payload;
+} GSP_SEQUENCER_BUFFER_CMD;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_allclasses.h
new file mode 100644 (file)
index 0000000..3144e9b
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef __src_nvidia_generated_g_allclasses_h__
+#define __src_nvidia_generated_g_allclasses_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2021-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV01_EVENT_KERNEL_CALLBACK_EX            (0x0000007e)
+
+#define NV04_DISPLAY_COMMON                      (0x00000073)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h
new file mode 100644 (file)
index 0000000..6b89211
--- /dev/null
@@ -0,0 +1,38 @@
+#ifndef __src_nvidia_generated_g_chipset_nvoc_h__
+#define __src_nvidia_generated_g_chipset_nvoc_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct
+{
+    NvU16               deviceID;           // deviceID
+    NvU16               vendorID;           // vendorID
+    NvU16               subdeviceID;        // subsystem deviceID
+    NvU16               subvendorID;        // subsystem vendorID
+    NvU8                revisionID;         // revision ID
+} BUSINFO;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h
new file mode 100644 (file)
index 0000000..a5128f0
--- /dev/null
@@ -0,0 +1,31 @@
+#ifndef __src_nvidia_generated_g_fbsr_nvoc_h__
+#define __src_nvidia_generated_g_fbsr_nvoc_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2009-2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define FBSR_TYPE_DMA                                 4   // Copy using DMA. Fastest.
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h
new file mode 100644 (file)
index 0000000..5641a21
--- /dev/null
@@ -0,0 +1,35 @@
+#ifndef __src_nvidia_generated_g_gpu_nvoc_h__
+#define __src_nvidia_generated_g_gpu_nvoc_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2004-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef enum
+{
+    COMPUTE_BRANDING_TYPE_NONE,
+    COMPUTE_BRANDING_TYPE_TESLA,
+} COMPUTE_BRANDING_TYPE;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h
new file mode 100644 (file)
index 0000000..b5ad55f
--- /dev/null
@@ -0,0 +1,62 @@
+#ifndef __src_nvidia_generated_g_kernel_channel_nvoc_h__
+#define __src_nvidia_generated_g_kernel_channel_nvoc_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2020-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef enum {
+    /*!
+     * Initial state as passed in NV_CHANNEL_ALLOC_PARAMS by
+     * kernel CPU-RM clients.
+     */
+    ERROR_NOTIFIER_TYPE_UNKNOWN = 0,
+    /*! @brief Error notifier is explicitly not set.
+     *
+     * The corresponding hErrorContext or hEccErrorContext must be
+     * NV01_NULL_OBJECT.
+     */
+    ERROR_NOTIFIER_TYPE_NONE,
+    /*! @brief Error notifier is a ContextDma */
+    ERROR_NOTIFIER_TYPE_CTXDMA,
+    /*! @brief Error notifier is a NvNotification array in sysmem/vidmem */
+    ERROR_NOTIFIER_TYPE_MEMORY
+} ErrorNotifierType;
+
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE                       1:0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_USER                  0x0
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_ADMIN                 0x1
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_PRIVILEGE_KERNEL                0x2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE             3:2
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_UNKNOWN     ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_NONE        ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_CTXDMA      ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ERROR_NOTIFIER_TYPE_MEMORY      ERROR_NOTIFIER_TYPE_MEMORY
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE         5:4
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_UNKNOWN ERROR_NOTIFIER_TYPE_UNKNOWN
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_NONE    ERROR_NOTIFIER_TYPE_NONE
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_CTXDMA  ERROR_NOTIFIER_TYPE_CTXDMA
+#define NV_KERNELCHANNEL_ALLOC_INTERNALFLAGS_ECC_ERROR_NOTIFIER_TYPE_MEMORY  ERROR_NOTIFIER_TYPE_MEMORY
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h
new file mode 100644 (file)
index 0000000..946954a
--- /dev/null
@@ -0,0 +1,119 @@
+#ifndef __src_nvidia_generated_g_kernel_fifo_nvoc_h__
+#define __src_nvidia_generated_g_kernel_fifo_nvoc_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef enum
+{
+    /* *************************************************************************
+     * Bug 3820969
+     * THINK BEFORE CHANGING ENUM ORDER HERE.
+     * VGPU-guest uses this same ordering. Because this enum is not versioned,
+     * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
+     * ************************************************************************/
+
+    // *ENG_XYZ, e.g.: ENG_GR, ENG_CE etc.,
+    ENGINE_INFO_TYPE_ENG_DESC = 0,
+
+    // HW engine ID
+    ENGINE_INFO_TYPE_FIFO_TAG,
+
+    // RM_ENGINE_TYPE_*
+    ENGINE_INFO_TYPE_RM_ENGINE_TYPE,
+
+    //
+    // runlist id (meaning varies by GPU)
+    // Valid only for Esched-driven engines
+    //
+    ENGINE_INFO_TYPE_RUNLIST,
+
+    // NV_PFIFO_INTR_MMU_FAULT_ENG_ID_*
+    ENGINE_INFO_TYPE_MMU_FAULT_ID,
+
+    // ROBUST_CHANNEL_*
+    ENGINE_INFO_TYPE_RC_MASK,
+
+    // Reset Bit Position. On Ampere, only valid if not _INVALID
+    ENGINE_INFO_TYPE_RESET,
+
+    // Interrupt Bit Position
+    ENGINE_INFO_TYPE_INTR,
+
+    // log2(MC_ENGINE_*)
+    ENGINE_INFO_TYPE_MC,
+
+    // The DEV_TYPE_ENUM for this engine
+    ENGINE_INFO_TYPE_DEV_TYPE_ENUM,
+
+    // The particular instance of this engine type
+    ENGINE_INFO_TYPE_INSTANCE_ID,
+
+    //
+    // The base address for this engine's NV_RUNLIST. Valid only on Ampere+
+    // Valid only for Esched-driven engines
+    //
+    ENGINE_INFO_TYPE_RUNLIST_PRI_BASE,
+
+    //
+    // If this entry is a host-driven engine.
+    // Update _isEngineInfoTypeValidForOnlyHostDriven when adding any new entry.
+    //
+    ENGINE_INFO_TYPE_IS_HOST_DRIVEN_ENGINE,
+
+    //
+    // The index into the per-engine NV_RUNLIST registers. Valid only on Ampere+
+    // Valid only for Esched-driven engines
+    //
+    ENGINE_INFO_TYPE_RUNLIST_ENGINE_ID,
+
+    //
+    // The base address for this engine's NV_CHRAM registers. Valid only on
+    // Ampere+
+    //
+    // Valid only for Esched-driven engines
+    //
+    ENGINE_INFO_TYPE_CHRAM_PRI_BASE,
+
+    // This entry added to copy data at RMCTRL_EXPORT() call for Kernel RM
+    ENGINE_INFO_TYPE_KERNEL_RM_MAX,
+    // Used for iterating the engine info table by the index passed.
+    ENGINE_INFO_TYPE_INVALID = ENGINE_INFO_TYPE_KERNEL_RM_MAX,
+
+    // Size of FIFO_ENGINE_LIST.engineData
+    ENGINE_INFO_TYPE_ENGINE_DATA_ARRAY_SIZE = ENGINE_INFO_TYPE_INVALID,
+
+    // Input-only parameter for kfifoEngineInfoXlate.
+    ENGINE_INFO_TYPE_PBDMA_ID
+
+    /* *************************************************************************
+     * Bug 3820969
+     * THINK BEFORE CHANGING ENUM ORDER HERE.
+     * VGPU-guest uses this same ordering. Because this enum is not versioned,
+     * changing the order here WILL BREAK old-guest-on-newer-host compatibility.
+     * ************************************************************************/
+} ENGINE_INFO_TYPE;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h
new file mode 100644 (file)
index 0000000..daabaee
--- /dev/null
@@ -0,0 +1,32 @@
+#ifndef __src_nvidia_generated_g_mem_desc_nvoc_h__
+#define __src_nvidia_generated_g_mem_desc_nvoc_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define ADDR_SYSMEM     1         // System memory (PCI)
+#define ADDR_FBMEM      2         // Frame buffer memory space
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_os_nvoc.h
new file mode 100644 (file)
index 0000000..754c6af
--- /dev/null
@@ -0,0 +1,44 @@
+#ifndef __src_nvidia_generated_g_os_nvoc_h__
+#define __src_nvidia_generated_g_os_nvoc_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct PACKED_REGISTRY_ENTRY
+{
+    NvU32                   nameOffset;
+    NvU8                    type;
+    NvU32                   data;
+    NvU32                   length;
+} PACKED_REGISTRY_ENTRY;
+
+typedef struct PACKED_REGISTRY_TABLE
+{
+    NvU32                   size;
+    NvU32                   numEntries;
+    PACKED_REGISTRY_ENTRY   entries[0];
+} PACKED_REGISTRY_TABLE;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_rpc-structures.h
new file mode 100644 (file)
index 0000000..8d925e2
--- /dev/null
@@ -0,0 +1,124 @@
+#ifndef __src_nvidia_generated_g_rpc_structures_h__
+#define __src_nvidia_generated_g_rpc_structures_h__
+#include <nvrm/535.113.01/nvidia/generated/g_sdk-structures.h>
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct rpc_alloc_memory_v13_01
+{
+    NvHandle   hClient;
+    NvHandle   hDevice;
+    NvHandle   hMemory;
+    NvU32      hClass;
+    NvU32      flags;
+    NvU32      pteAdjust;
+    NvU32      format;
+    NvU64      length NV_ALIGN_BYTES(8);
+    NvU32      pageCount;
+    struct pte_desc pteDesc;
+} rpc_alloc_memory_v13_01;
+
+typedef struct rpc_free_v03_00
+{
+    NVOS00_PARAMETERS_v03_00 params;
+} rpc_free_v03_00;
+
+typedef struct rpc_unloading_guest_driver_v1F_07
+{
+    NvBool     bInPMTransition;
+    NvBool     bGc6Entering;
+    NvU32      newLevel;
+} rpc_unloading_guest_driver_v1F_07;
+
+typedef struct rpc_update_bar_pde_v15_00
+{
+    UpdateBarPde_v15_00 info;
+} rpc_update_bar_pde_v15_00;
+
+typedef struct rpc_gsp_rm_alloc_v03_00
+{
+    NvHandle   hClient;
+    NvHandle   hParent;
+    NvHandle   hObject;
+    NvU32      hClass;
+    NvU32      status;
+    NvU32      paramsSize;
+    NvU32      flags;
+    NvU8       reserved[4];
+    NvU8       params[];
+} rpc_gsp_rm_alloc_v03_00;
+
+typedef struct rpc_gsp_rm_control_v03_00
+{
+    NvHandle   hClient;
+    NvHandle   hObject;
+    NvU32      cmd;
+    NvU32      status;
+    NvU32      paramsSize;
+    NvU32      flags;
+    NvU8       params[];
+} rpc_gsp_rm_control_v03_00;
+
+typedef struct rpc_run_cpu_sequencer_v17_00
+{
+    NvU32      bufferSizeDWord;
+    NvU32      cmdIndex;
+    NvU32      regSaveArea[8];
+    NvU32      commandBuffer[];
+} rpc_run_cpu_sequencer_v17_00;
+
+typedef struct rpc_post_event_v17_00
+{
+    NvHandle   hClient;
+    NvHandle   hEvent;
+    NvU32      notifyIndex;
+    NvU32      data;
+    NvU16      info16;
+    NvU32      status;
+    NvU32      eventDataSize;
+    NvBool     bNotifyList;
+    NvU8       eventData[];
+} rpc_post_event_v17_00;
+
+typedef struct rpc_rc_triggered_v17_02
+{
+    NvU32      nv2080EngineType;
+    NvU32      chid;
+    NvU32      exceptType;
+    NvU32      scope;
+    NvU16      partitionAttributionId;
+} rpc_rc_triggered_v17_02;
+
+typedef struct rpc_os_error_log_v17_00
+{
+    NvU32      exceptType;
+    NvU32      runlistId;
+    NvU32      chid;
+    char       errString[0x100];
+} rpc_os_error_log_v17_00;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/generated/g_sdk-structures.h
new file mode 100644 (file)
index 0000000..e9fed41
--- /dev/null
@@ -0,0 +1,45 @@
+#ifndef __src_nvidia_generated_g_sdk_structures_h__
+#define __src_nvidia_generated_g_sdk_structures_h__
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2008-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct NVOS00_PARAMETERS_v03_00
+{
+    NvHandle   hRoot;
+    NvHandle   hObjectParent;
+    NvHandle   hObjectOld;
+    NvV32      status;
+} NVOS00_PARAMETERS_v03_00;
+
+typedef struct UpdateBarPde_v15_00
+{
+    NV_RPC_UPDATE_PDE_BAR_TYPE barType;
+    NvU64      entryValue NV_ALIGN_BYTES(8);
+    NvU64      entryLevelShift NV_ALIGN_BYTES(8);
+} UpdateBarPde_v15_00;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h
new file mode 100644 (file)
index 0000000..af50b11
--- /dev/null
@@ -0,0 +1,74 @@
+#ifndef __src_nvidia_inc_kernel_gpu_gpu_acpi_data_h__
+#define __src_nvidia_inc_kernel_gpu_gpu_acpi_data_h__
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct DOD_METHOD_DATA
+{
+    NV_STATUS status;
+    NvU32     acpiIdListLen;
+    NvU32     acpiIdList[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} DOD_METHOD_DATA;
+
+typedef struct JT_METHOD_DATA
+{
+    NV_STATUS status;
+    NvU32     jtCaps;
+    NvU16     jtRevId;
+    NvBool    bSBIOSCaps;
+} JT_METHOD_DATA;
+
+typedef struct MUX_METHOD_DATA_ELEMENT
+{
+    NvU32       acpiId;
+    NvU32       mode;
+    NV_STATUS   status;
+} MUX_METHOD_DATA_ELEMENT;
+
+typedef struct MUX_METHOD_DATA
+{
+    NvU32                       tableLen;
+    MUX_METHOD_DATA_ELEMENT     acpiIdMuxModeTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+    MUX_METHOD_DATA_ELEMENT     acpiIdMuxPartTable[NV0073_CTRL_SYSTEM_ACPI_ID_MAP_MAX_DISPLAYS];
+} MUX_METHOD_DATA;
+
+typedef struct CAPS_METHOD_DATA
+{
+    NV_STATUS status;
+    NvU32     optimusCaps;
+} CAPS_METHOD_DATA;
+
+typedef struct ACPI_METHOD_DATA
+{
+    NvBool                                               bValid;
+    DOD_METHOD_DATA                                      dodMethodData;
+    JT_METHOD_DATA                                       jtMethodData;
+    MUX_METHOD_DATA                                      muxMethodData;
+    CAPS_METHOD_DATA                                     capsMethodData;
+} ACPI_METHOD_DATA;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h
new file mode 100644 (file)
index 0000000..e3160c6
--- /dev/null
@@ -0,0 +1,86 @@
+#ifndef __src_nvidia_inc_kernel_gpu_gpu_engine_type_h__
+#define __src_nvidia_inc_kernel_gpu_gpu_engine_type_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef enum
+{
+    RM_ENGINE_TYPE_NULL                 =       (0x00000000),
+    RM_ENGINE_TYPE_GR0                  =       (0x00000001),
+    RM_ENGINE_TYPE_GR1                  =       (0x00000002),
+    RM_ENGINE_TYPE_GR2                  =       (0x00000003),
+    RM_ENGINE_TYPE_GR3                  =       (0x00000004),
+    RM_ENGINE_TYPE_GR4                  =       (0x00000005),
+    RM_ENGINE_TYPE_GR5                  =       (0x00000006),
+    RM_ENGINE_TYPE_GR6                  =       (0x00000007),
+    RM_ENGINE_TYPE_GR7                  =       (0x00000008),
+    RM_ENGINE_TYPE_COPY0                =       (0x00000009),
+    RM_ENGINE_TYPE_COPY1                =       (0x0000000a),
+    RM_ENGINE_TYPE_COPY2                =       (0x0000000b),
+    RM_ENGINE_TYPE_COPY3                =       (0x0000000c),
+    RM_ENGINE_TYPE_COPY4                =       (0x0000000d),
+    RM_ENGINE_TYPE_COPY5                =       (0x0000000e),
+    RM_ENGINE_TYPE_COPY6                =       (0x0000000f),
+    RM_ENGINE_TYPE_COPY7                =       (0x00000010),
+    RM_ENGINE_TYPE_COPY8                =       (0x00000011),
+    RM_ENGINE_TYPE_COPY9                =       (0x00000012),
+    RM_ENGINE_TYPE_NVDEC0               =       (0x0000001d),
+    RM_ENGINE_TYPE_NVDEC1               =       (0x0000001e),
+    RM_ENGINE_TYPE_NVDEC2               =       (0x0000001f),
+    RM_ENGINE_TYPE_NVDEC3               =       (0x00000020),
+    RM_ENGINE_TYPE_NVDEC4               =       (0x00000021),
+    RM_ENGINE_TYPE_NVDEC5               =       (0x00000022),
+    RM_ENGINE_TYPE_NVDEC6               =       (0x00000023),
+    RM_ENGINE_TYPE_NVDEC7               =       (0x00000024),
+    RM_ENGINE_TYPE_NVENC0               =       (0x00000025),
+    RM_ENGINE_TYPE_NVENC1               =       (0x00000026),
+    RM_ENGINE_TYPE_NVENC2               =       (0x00000027),
+    RM_ENGINE_TYPE_VP                   =       (0x00000028),
+    RM_ENGINE_TYPE_ME                   =       (0x00000029),
+    RM_ENGINE_TYPE_PPP                  =       (0x0000002a),
+    RM_ENGINE_TYPE_MPEG                 =       (0x0000002b),
+    RM_ENGINE_TYPE_SW                   =       (0x0000002c),
+    RM_ENGINE_TYPE_TSEC                 =       (0x0000002d),
+    RM_ENGINE_TYPE_VIC                  =       (0x0000002e),
+    RM_ENGINE_TYPE_MP                   =       (0x0000002f),
+    RM_ENGINE_TYPE_SEC2                 =       (0x00000030),
+    RM_ENGINE_TYPE_HOST                 =       (0x00000031),
+    RM_ENGINE_TYPE_DPU                  =       (0x00000032),
+    RM_ENGINE_TYPE_PMU                  =       (0x00000033),
+    RM_ENGINE_TYPE_FBFLCN               =       (0x00000034),
+    RM_ENGINE_TYPE_NVJPEG0              =       (0x00000035),
+    RM_ENGINE_TYPE_NVJPEG1              =       (0x00000036),
+    RM_ENGINE_TYPE_NVJPEG2              =       (0x00000037),
+    RM_ENGINE_TYPE_NVJPEG3              =       (0x00000038),
+    RM_ENGINE_TYPE_NVJPEG4              =       (0x00000039),
+    RM_ENGINE_TYPE_NVJPEG5              =       (0x0000003a),
+    RM_ENGINE_TYPE_NVJPEG6              =       (0x0000003b),
+    RM_ENGINE_TYPE_NVJPEG7              =       (0x0000003c),
+    RM_ENGINE_TYPE_OFA                  =       (0x0000003d),
+    RM_ENGINE_TYPE_LAST                 =       (0x0000003e),
+} RM_ENGINE_TYPE;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h
new file mode 100644 (file)
index 0000000..3abec59
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_fw_heap_h__
+#define __src_nvidia_inc_kernel_gpu_gsp_gsp_fw_heap_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2022-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB                  (96 << 10)   // All architectures
+
+#define GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE      ((48 << 10) * 2048)   // Support 2048 channels
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h
new file mode 100644 (file)
index 0000000..4033a6f
--- /dev/null
@@ -0,0 +1,57 @@
+#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_init_args_h__
+#define __src_nvidia_inc_kernel_gpu_gsp_gsp_init_args_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct {
+    RmPhysAddr sharedMemPhysAddr;
+    NvU32 pageTableEntryCount;
+    NvLength cmdQueueOffset;
+    NvLength statQueueOffset;
+    NvLength locklessCmdQueueOffset;
+    NvLength locklessStatQueueOffset;
+} MESSAGE_QUEUE_INIT_ARGUMENTS;
+
+typedef struct {
+    NvU32 oldLevel;
+    NvU32 flags;
+    NvBool bInPMTransition;
+} GSP_SR_INIT_ARGUMENTS;
+
+typedef struct
+{
+    MESSAGE_QUEUE_INIT_ARGUMENTS      messageQueueInitArguments;
+    GSP_SR_INIT_ARGUMENTS             srInitArguments;
+    NvU32                             gpuInstance;
+
+    struct
+    {
+        NvU64                         pa;
+        NvU64                         size;
+    } profilerArgs;
+} GSP_ARGUMENTS_CACHED;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h
new file mode 100644 (file)
index 0000000..eeab25a
--- /dev/null
@@ -0,0 +1,174 @@
+#ifndef __src_nvidia_inc_kernel_gpu_gsp_gsp_static_config_h__
+#define __src_nvidia_inc_kernel_gpu_gsp_gsp_static_config_h__
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gpu.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080gr.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080bios.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fb.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gr.h>
+#include <nvrm/535.113.01/nvidia/generated/g_chipset_nvoc.h>
+#include <nvrm/535.113.01/nvidia/generated/g_gpu_nvoc.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_acpi_data.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h>
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2019-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+typedef struct GSP_VF_INFO
+{
+    NvU32  totalVFs;
+    NvU32  firstVFOffset;
+    NvU64  FirstVFBar0Address;
+    NvU64  FirstVFBar1Address;
+    NvU64  FirstVFBar2Address;
+    NvBool b64bitBar0;
+    NvBool b64bitBar1;
+    NvBool b64bitBar2;
+} GSP_VF_INFO;
+
+typedef struct GspSMInfo_t
+{
+    NvU32 version;
+    NvU32 regBankCount;
+    NvU32 regBankRegCount;
+    NvU32 maxWarpsPerSM;
+    NvU32 maxThreadsPerWarp;
+    NvU32 geomGsObufEntries;
+    NvU32 geomXbufEntries;
+    NvU32 maxSPPerSM;
+    NvU32 rtCoreCount;
+} GspSMInfo;
+
+typedef struct GspStaticConfigInfo_t
+{
+    NvU8 grCapsBits[NV0080_CTRL_GR_CAPS_TBL_SIZE];
+    NV2080_CTRL_GPU_GET_GID_INFO_PARAMS gidInfo;
+    NV2080_CTRL_GPU_GET_FERMI_GPC_INFO_PARAMS gpcInfo;
+    NV2080_CTRL_GPU_GET_FERMI_TPC_INFO_PARAMS tpcInfo[MAX_GPC_COUNT];
+    NV2080_CTRL_GPU_GET_FERMI_ZCULL_INFO_PARAMS zcullInfo[MAX_GPC_COUNT];
+    NV2080_CTRL_BIOS_GET_SKU_INFO_PARAMS SKUInfo;
+    NV2080_CTRL_CMD_FB_GET_FB_REGION_INFO_PARAMS fbRegionInfoParams;
+    COMPUTE_BRANDING_TYPE computeBranding;
+
+    NV0080_CTRL_GPU_GET_SRIOV_CAPS_PARAMS sriovCaps;
+    NvU32 sriovMaxGfid;
+
+    NvU32 engineCaps[NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX];
+
+    GspSMInfo SM_info;
+
+    NvBool poisonFuseEnabled;
+
+    NvU64 fb_length;
+    NvU32 fbio_mask;
+    NvU32 fb_bus_width;
+    NvU32 fb_ram_type;
+    NvU32 fbp_mask;
+    NvU32 l2_cache_size;
+
+    NvU32 gfxpBufferSize[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
+    NvU32 gfxpBufferAlignment[NV2080_CTRL_CMD_GR_CTXSW_PREEMPTION_BIND_BUFFERS_CONTEXT_POOL];
+
+    NvU8 gpuNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+    NvU8 gpuShortNameString[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+    NvU16 gpuNameString_Unicode[NV2080_GPU_MAX_NAME_STRING_LENGTH];
+    NvBool bGpuInternalSku;
+    NvBool bIsQuadroGeneric;
+    NvBool bIsQuadroAd;
+    NvBool bIsNvidiaNvs;
+    NvBool bIsVgx;
+    NvBool bGeforceSmb;
+    NvBool bIsTitan;
+    NvBool bIsTesla;
+    NvBool bIsMobile;
+    NvBool bIsGc6Rtd3Allowed;
+    NvBool bIsGcOffRtd3Allowed;
+    NvBool bIsGcoffLegacyAllowed;
+
+    NvU64 bar1PdeBase;
+    NvU64 bar2PdeBase;
+
+    NvBool bVbiosValid;
+    NvU32 vbiosSubVendor;
+    NvU32 vbiosSubDevice;
+
+    NvBool bPageRetirementSupported;
+
+    NvBool bSplitVasBetweenServerClientRm;
+
+    NvBool bClRootportNeedsNosnoopWAR;
+
+    VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS displaylessMaxHeads;
+    VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS displaylessMaxResolution;
+    NvU64 displaylessMaxPixels;
+
+    // Client handle for internal RMAPI control.
+    NvHandle hInternalClient;
+
+    // Device handle for internal RMAPI control.
+    NvHandle hInternalDevice;
+
+    // Subdevice handle for internal RMAPI control.
+    NvHandle hInternalSubdevice;
+
+    NvBool bSelfHostedMode;
+    NvBool bAtsSupported;
+
+    NvBool bIsGpuUefi;
+} GspStaticConfigInfo;
+
+typedef struct GspSystemInfo
+{
+    NvU64 gpuPhysAddr;
+    NvU64 gpuPhysFbAddr;
+    NvU64 gpuPhysInstAddr;
+    NvU64 nvDomainBusDeviceFunc;
+    NvU64 simAccessBufPhysAddr;
+    NvU64 pcieAtomicsOpMask;
+    NvU64 consoleMemSize;
+    NvU64 maxUserVa;
+    NvU32 pciConfigMirrorBase;
+    NvU32 pciConfigMirrorSize;
+    NvU8 oorArch;
+    NvU64 clPdbProperties;
+    NvU32 Chipset;
+    NvBool bGpuBehindBridge;
+    NvBool bMnocAvailable;
+    NvBool bUpstreamL0sUnsupported;
+    NvBool bUpstreamL1Unsupported;
+    NvBool bUpstreamL1PorSupported;
+    NvBool bUpstreamL1PorMobileOnly;
+    NvU8   upstreamAddressValid;
+    BUSINFO FHBBusInfo;
+    BUSINFO chipsetIDInfo;
+    ACPI_METHOD_DATA acpiMethodData;
+    NvU32 hypervisorType;
+    NvBool bIsPassthru;
+    NvU64 sysTimerOffsetNs;
+    GSP_VF_INFO gspVFInfo;
+} GspSystemInfo;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h
new file mode 100644 (file)
index 0000000..bd5e01f
--- /dev/null
@@ -0,0 +1,57 @@
+#ifndef __src_nvidia_inc_kernel_gpu_intr_engine_idx_h__
+#define __src_nvidia_inc_kernel_gpu_intr_engine_idx_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define MC_ENGINE_IDX_DISP                          2
+
+#define MC_ENGINE_IDX_CE0                           15
+
+#define MC_ENGINE_IDX_CE9                           24
+
+#define MC_ENGINE_IDX_MSENC                         38
+
+#define MC_ENGINE_IDX_MSENC2                        40
+
+#define MC_ENGINE_IDX_GSP                           49
+#define MC_ENGINE_IDX_NVJPG                         50
+#define MC_ENGINE_IDX_NVJPEG                        MC_ENGINE_IDX_NVJPG
+#define MC_ENGINE_IDX_NVJPEG0                       MC_ENGINE_IDX_NVJPEG
+
+#define MC_ENGINE_IDX_NVJPEG7                       57
+
+#define MC_ENGINE_IDX_BSP                           64
+#define MC_ENGINE_IDX_NVDEC                         MC_ENGINE_IDX_BSP
+#define MC_ENGINE_IDX_NVDEC0                        MC_ENGINE_IDX_NVDEC
+
+#define MC_ENGINE_IDX_NVDEC7                        71
+
+#define MC_ENGINE_IDX_OFA0                          80
+
+#define MC_ENGINE_IDX_GR                            82
+#define MC_ENGINE_IDX_GR0                           MC_ENGINE_IDX_GR
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/gpu/nvbitmask.h
new file mode 100644 (file)
index 0000000..366447a
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef __src_nvidia_inc_kernel_gpu_nvbitmask_h__
+#define __src_nvidia_inc_kernel_gpu_nvbitmask_h__
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h>
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NVGPU_ENGINE_CAPS_MASK_BITS                32
+#define NVGPU_ENGINE_CAPS_MASK_ARRAY_MAX           ((RM_ENGINE_TYPE_LAST-1)/NVGPU_ENGINE_CAPS_MASK_BITS + 1)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h
new file mode 100644 (file)
index 0000000..4a850da
--- /dev/null
@@ -0,0 +1,31 @@
+#ifndef __src_nvidia_inc_kernel_os_nv_memory_type_h__
+#define __src_nvidia_inc_kernel_os_nv_memory_type_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2020 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define NV_MEMORY_WRITECOMBINED      2
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h
new file mode 100644 (file)
index 0000000..73c57f2
--- /dev/null
@@ -0,0 +1,262 @@
+#ifndef __src_nvidia_kernel_inc_vgpu_rpc_global_enums_h__
+#define __src_nvidia_kernel_inc_vgpu_rpc_global_enums_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+#ifndef X
+#    define X(UNIT, RPC) NV_VGPU_MSG_FUNCTION_##RPC,
+#    define DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+enum {
+#endif
+    X(RM, NOP)                             // 0
+    X(RM, SET_GUEST_SYSTEM_INFO)           // 1
+    X(RM, ALLOC_ROOT)                      // 2
+    X(RM, ALLOC_DEVICE)                    // 3 deprecated
+    X(RM, ALLOC_MEMORY)                    // 4
+    X(RM, ALLOC_CTX_DMA)                   // 5
+    X(RM, ALLOC_CHANNEL_DMA)               // 6
+    X(RM, MAP_MEMORY)                      // 7
+    X(RM, BIND_CTX_DMA)                    // 8 deprecated
+    X(RM, ALLOC_OBJECT)                    // 9
+    X(RM, FREE)                            //10
+    X(RM, LOG)                             //11
+    X(RM, ALLOC_VIDMEM)                    //12
+    X(RM, UNMAP_MEMORY)                    //13
+    X(RM, MAP_MEMORY_DMA)                  //14
+    X(RM, UNMAP_MEMORY_DMA)                //15
+    X(RM, GET_EDID)                        //16
+    X(RM, ALLOC_DISP_CHANNEL)              //17
+    X(RM, ALLOC_DISP_OBJECT)               //18
+    X(RM, ALLOC_SUBDEVICE)                 //19
+    X(RM, ALLOC_DYNAMIC_MEMORY)            //20
+    X(RM, DUP_OBJECT)                      //21
+    X(RM, IDLE_CHANNELS)                   //22
+    X(RM, ALLOC_EVENT)                     //23
+    X(RM, SEND_EVENT)                      //24
+    X(RM, REMAPPER_CONTROL)                //25 deprecated
+    X(RM, DMA_CONTROL)                     //26
+    X(RM, DMA_FILL_PTE_MEM)                //27
+    X(RM, MANAGE_HW_RESOURCE)              //28
+    X(RM, BIND_ARBITRARY_CTX_DMA)          //29 deprecated
+    X(RM, CREATE_FB_SEGMENT)               //30
+    X(RM, DESTROY_FB_SEGMENT)              //31
+    X(RM, ALLOC_SHARE_DEVICE)              //32
+    X(RM, DEFERRED_API_CONTROL)            //33
+    X(RM, REMOVE_DEFERRED_API)             //34
+    X(RM, SIM_ESCAPE_READ)                 //35
+    X(RM, SIM_ESCAPE_WRITE)                //36
+    X(RM, SIM_MANAGE_DISPLAY_CONTEXT_DMA)  //37
+    X(RM, FREE_VIDMEM_VIRT)                //38
+    X(RM, PERF_GET_PSTATE_INFO)            //39  deprecated for vGPU, used by GSP
+    X(RM, PERF_GET_PERFMON_SAMPLE)         //40
+    X(RM, PERF_GET_VIRTUAL_PSTATE_INFO)    //41  deprecated
+    X(RM, PERF_GET_LEVEL_INFO)             //42
+    X(RM, MAP_SEMA_MEMORY)                 //43
+    X(RM, UNMAP_SEMA_MEMORY)               //44
+    X(RM, SET_SURFACE_PROPERTIES)          //45
+    X(RM, CLEANUP_SURFACE)                 //46
+    X(RM, UNLOADING_GUEST_DRIVER)          //47
+    X(RM, TDR_SET_TIMEOUT_STATE)           //48
+    X(RM, SWITCH_TO_VGA)                   //49
+    X(RM, GPU_EXEC_REG_OPS)                //50
+    X(RM, GET_STATIC_INFO)                 //51
+    X(RM, ALLOC_VIRTMEM)                   //52
+    X(RM, UPDATE_PDE_2)                    //53
+    X(RM, SET_PAGE_DIRECTORY)              //54
+    X(RM, GET_STATIC_PSTATE_INFO)          //55
+    X(RM, TRANSLATE_GUEST_GPU_PTES)        //56
+    X(RM, RESERVED_57)                     //57
+    X(RM, RESET_CURRENT_GR_CONTEXT)        //58
+    X(RM, SET_SEMA_MEM_VALIDATION_STATE)   //59
+    X(RM, GET_ENGINE_UTILIZATION)          //60
+    X(RM, UPDATE_GPU_PDES)                 //61
+    X(RM, GET_ENCODER_CAPACITY)            //62
+    X(RM, VGPU_PF_REG_READ32)              //63
+    X(RM, SET_GUEST_SYSTEM_INFO_EXT)       //64
+    X(GSP, GET_GSP_STATIC_INFO)            //65
+    X(RM, RMFS_INIT)                       //66
+    X(RM, RMFS_CLOSE_QUEUE)                //67
+    X(RM, RMFS_CLEANUP)                    //68
+    X(RM, RMFS_TEST)                       //69
+    X(RM, UPDATE_BAR_PDE)                  //70
+    X(RM, CONTINUATION_RECORD)             //71
+    X(RM, GSP_SET_SYSTEM_INFO)             //72
+    X(RM, SET_REGISTRY)                    //73
+    X(GSP, GSP_INIT_POST_OBJGPU)           //74 deprecated
+    X(RM, SUBDEV_EVENT_SET_NOTIFICATION)   //75 deprecated
+    X(GSP, GSP_RM_CONTROL)                 //76
+    X(RM, GET_STATIC_INFO2)                //77
+    X(RM, DUMP_PROTOBUF_COMPONENT)         //78
+    X(RM, UNSET_PAGE_DIRECTORY)            //79
+    X(RM, GET_CONSOLIDATED_STATIC_INFO)    //80
+    X(RM, GMMU_REGISTER_FAULT_BUFFER)      //81 deprecated
+    X(RM, GMMU_UNREGISTER_FAULT_BUFFER)    //82 deprecated
+    X(RM, GMMU_REGISTER_CLIENT_SHADOW_FAULT_BUFFER)   //83 deprecated
+    X(RM, GMMU_UNREGISTER_CLIENT_SHADOW_FAULT_BUFFER) //84 deprecated
+    X(RM, CTRL_SET_VGPU_FB_USAGE)          //85
+    X(RM, CTRL_NVFBC_SW_SESSION_UPDATE_INFO)    //86
+    X(RM, CTRL_NVENC_SW_SESSION_UPDATE_INFO)    //87
+    X(RM, CTRL_RESET_CHANNEL)                   //88
+    X(RM, CTRL_RESET_ISOLATED_CHANNEL)          //89
+    X(RM, CTRL_GPU_HANDLE_VF_PRI_FAULT)         //90
+    X(RM, CTRL_CLK_GET_EXTENDED_INFO)           //91
+    X(RM, CTRL_PERF_BOOST)                      //92
+    X(RM, CTRL_PERF_VPSTATES_GET_CONTROL)       //93
+    X(RM, CTRL_GET_ZBC_CLEAR_TABLE)             //94
+    X(RM, CTRL_SET_ZBC_COLOR_CLEAR)             //95
+    X(RM, CTRL_SET_ZBC_DEPTH_CLEAR)             //96
+    X(RM, CTRL_GPFIFO_SCHEDULE)                 //97
+    X(RM, CTRL_SET_TIMESLICE)                   //98
+    X(RM, CTRL_PREEMPT)                         //99
+    X(RM, CTRL_FIFO_DISABLE_CHANNELS)           //100
+    X(RM, CTRL_SET_TSG_INTERLEAVE_LEVEL)        //101
+    X(RM, CTRL_SET_CHANNEL_INTERLEAVE_LEVEL)    //102
+    X(GSP, GSP_RM_ALLOC)                        //103
+    X(RM, CTRL_GET_P2P_CAPS_V2)                 //104
+    X(RM, CTRL_CIPHER_AES_ENCRYPT)              //105
+    X(RM, CTRL_CIPHER_SESSION_KEY)              //106
+    X(RM, CTRL_CIPHER_SESSION_KEY_STATUS)       //107
+    X(RM, CTRL_DBG_CLEAR_ALL_SM_ERROR_STATES)   //108
+    X(RM, CTRL_DBG_READ_ALL_SM_ERROR_STATES)    //109
+    X(RM, CTRL_DBG_SET_EXCEPTION_MASK)          //110
+    X(RM, CTRL_GPU_PROMOTE_CTX)                 //111
+    X(RM, CTRL_GR_CTXSW_PREEMPTION_BIND)        //112
+    X(RM, CTRL_GR_SET_CTXSW_PREEMPTION_MODE)    //113
+    X(RM, CTRL_GR_CTXSW_ZCULL_BIND)             //114
+    X(RM, CTRL_GPU_INITIALIZE_CTX)              //115
+    X(RM, CTRL_VASPACE_COPY_SERVER_RESERVED_PDES)    //116
+    X(RM, CTRL_FIFO_CLEAR_FAULTED_BIT)          //117
+    X(RM, CTRL_GET_LATEST_ECC_ADDRESSES)        //118
+    X(RM, CTRL_MC_SERVICE_INTERRUPTS)           //119
+    X(RM, CTRL_DMA_SET_DEFAULT_VASPACE)         //120
+    X(RM, CTRL_GET_CE_PCE_MASK)                 //121
+    X(RM, CTRL_GET_ZBC_CLEAR_TABLE_ENTRY)       //122
+    X(RM, CTRL_GET_NVLINK_PEER_ID_MASK)         //123
+    X(RM, CTRL_GET_NVLINK_STATUS)               //124
+    X(RM, CTRL_GET_P2P_CAPS)                    //125
+    X(RM, CTRL_GET_P2P_CAPS_MATRIX)             //126
+    X(RM, RESERVED_0)                           //127
+    X(RM, CTRL_RESERVE_PM_AREA_SMPC)            //128
+    X(RM, CTRL_RESERVE_HWPM_LEGACY)             //129
+    X(RM, CTRL_B0CC_EXEC_REG_OPS)               //130
+    X(RM, CTRL_BIND_PM_RESOURCES)               //131
+    X(RM, CTRL_DBG_SUSPEND_CONTEXT)             //132
+    X(RM, CTRL_DBG_RESUME_CONTEXT)              //133
+    X(RM, CTRL_DBG_EXEC_REG_OPS)                //134
+    X(RM, CTRL_DBG_SET_MODE_MMU_DEBUG)          //135
+    X(RM, CTRL_DBG_READ_SINGLE_SM_ERROR_STATE)  //136
+    X(RM, CTRL_DBG_CLEAR_SINGLE_SM_ERROR_STATE) //137
+    X(RM, CTRL_DBG_SET_MODE_ERRBAR_DEBUG)       //138
+    X(RM, CTRL_DBG_SET_NEXT_STOP_TRIGGER_TYPE)  //139
+    X(RM, CTRL_ALLOC_PMA_STREAM)                //140
+    X(RM, CTRL_PMA_STREAM_UPDATE_GET_PUT)       //141
+    X(RM, CTRL_FB_GET_INFO_V2)                  //142
+    X(RM, CTRL_FIFO_SET_CHANNEL_PROPERTIES)     //143
+    X(RM, CTRL_GR_GET_CTX_BUFFER_INFO)          //144
+    X(RM, CTRL_KGR_GET_CTX_BUFFER_PTES)         //145
+    X(RM, CTRL_GPU_EVICT_CTX)                   //146
+    X(RM, CTRL_FB_GET_FS_INFO)                  //147
+    X(RM, CTRL_GRMGR_GET_GR_FS_INFO)            //148
+    X(RM, CTRL_STOP_CHANNEL)                    //149
+    X(RM, CTRL_GR_PC_SAMPLING_MODE)             //150
+    X(RM, CTRL_PERF_RATED_TDP_GET_STATUS)       //151
+    X(RM, CTRL_PERF_RATED_TDP_SET_CONTROL)      //152
+    X(RM, CTRL_FREE_PMA_STREAM)                 //153
+    X(RM, CTRL_TIMER_SET_GR_TICK_FREQ)          //154
+    X(RM, CTRL_FIFO_SETUP_VF_ZOMBIE_SUBCTX_PDB) //155
+    X(RM, GET_CONSOLIDATED_GR_STATIC_INFO)      //156
+    X(RM, CTRL_DBG_SET_SINGLE_SM_SINGLE_STEP)   //157
+    X(RM, CTRL_GR_GET_TPC_PARTITION_MODE)       //158
+    X(RM, CTRL_GR_SET_TPC_PARTITION_MODE)       //159
+    X(UVM, UVM_PAGING_CHANNEL_ALLOCATE)         //160
+    X(UVM, UVM_PAGING_CHANNEL_DESTROY)          //161
+    X(UVM, UVM_PAGING_CHANNEL_MAP)              //162
+    X(UVM, UVM_PAGING_CHANNEL_UNMAP)            //163
+    X(UVM, UVM_PAGING_CHANNEL_PUSH_STREAM)      //164
+    X(UVM, UVM_PAGING_CHANNEL_SET_HANDLES)      //165
+    X(UVM, UVM_METHOD_STREAM_GUEST_PAGES_OPERATION)  //166
+    X(RM, CTRL_INTERNAL_QUIESCE_PMA_CHANNEL)    //167
+    X(RM, DCE_RM_INIT)                          //168
+    X(RM, REGISTER_VIRTUAL_EVENT_BUFFER)        //169
+    X(RM, CTRL_EVENT_BUFFER_UPDATE_GET)         //170
+    X(RM, GET_PLCABLE_ADDRESS_KIND)             //171
+    X(RM, CTRL_PERF_LIMITS_SET_STATUS_V2)       //172
+    X(RM, CTRL_INTERNAL_SRIOV_PROMOTE_PMA_STREAM)    //173
+    X(RM, CTRL_GET_MMU_DEBUG_MODE)              //174
+    X(RM, CTRL_INTERNAL_PROMOTE_FAULT_METHOD_BUFFERS) //175
+    X(RM, CTRL_FLCN_GET_CTX_BUFFER_SIZE)        //176
+    X(RM, CTRL_FLCN_GET_CTX_BUFFER_INFO)        //177
+    X(RM, DISABLE_CHANNELS)                     //178
+    X(RM, CTRL_FABRIC_MEMORY_DESCRIBE)          //179
+    X(RM, CTRL_FABRIC_MEM_STATS)                //180
+    X(RM, SAVE_HIBERNATION_DATA)                //181
+    X(RM, RESTORE_HIBERNATION_DATA)             //182
+    X(RM, CTRL_INTERNAL_MEMSYS_SET_ZBC_REFERENCED) //183
+    X(RM, CTRL_EXEC_PARTITIONS_CREATE)          //184
+    X(RM, CTRL_EXEC_PARTITIONS_DELETE)          //185
+    X(RM, CTRL_GPFIFO_GET_WORK_SUBMIT_TOKEN)    //186
+    X(RM, CTRL_GPFIFO_SET_WORK_SUBMIT_TOKEN_NOTIF_INDEX) //187
+    X(RM, PMA_SCRUBBER_SHARED_BUFFER_GUEST_PAGES_OPERATION)  //188
+    X(RM, CTRL_MASTER_GET_VIRTUAL_FUNCTION_ERROR_CONT_INTR_MASK)    //189
+    X(RM, SET_SYSMEM_DIRTY_PAGE_TRACKING_BUFFER)  //190
+    X(RM, CTRL_SUBDEVICE_GET_P2P_CAPS)          // 191
+    X(RM, CTRL_BUS_SET_P2P_MAPPING)             // 192
+    X(RM, CTRL_BUS_UNSET_P2P_MAPPING)           // 193
+    X(RM, CTRL_FLA_SETUP_INSTANCE_MEM_BLOCK)    // 194
+    X(RM, CTRL_GPU_MIGRATABLE_OPS)              // 195
+    X(RM, CTRL_GET_TOTAL_HS_CREDITS)            // 196
+    X(RM, CTRL_GET_HS_CREDITS)                  // 197
+    X(RM, CTRL_SET_HS_CREDITS)                  // 198
+    X(RM, CTRL_PM_AREA_PC_SAMPLER)              // 199
+    X(RM, INVALIDATE_TLB)                       // 200
+    X(RM, NUM_FUNCTIONS)                        //END
+#ifdef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+};
+#   undef X
+#   undef DEFINING_X_IN_RPC_GLOBAL_ENUMS_H
+#endif
+
+#ifndef E
+#    define E(RPC) NV_VGPU_MSG_EVENT_##RPC,
+#    define DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+enum {
+#endif
+    E(FIRST_EVENT = 0x1000)                      // 0x1000
+    E(GSP_INIT_DONE)                             // 0x1001
+    E(GSP_RUN_CPU_SEQUENCER)                     // 0x1002
+    E(POST_EVENT)                                // 0x1003
+    E(RC_TRIGGERED)                              // 0x1004
+    E(MMU_FAULT_QUEUED)                          // 0x1005
+    E(OS_ERROR_LOG)                              // 0x1006
+    E(RG_LINE_INTR)                              // 0x1007
+    E(GPUACCT_PERFMON_UTIL_SAMPLES)              // 0x1008
+    E(SIM_READ)                                  // 0x1009
+    E(SIM_WRITE)                                 // 0x100a
+    E(SEMAPHORE_SCHEDULE_CALLBACK)               // 0x100b
+    E(UCODE_LIBOS_PRINT)                         // 0x100c
+    E(VGPU_GSP_PLUGIN_TRIGGERED)                 // 0x100d
+    E(PERF_GPU_BOOST_SYNC_LIMITS_CALLBACK)       // 0x100e
+    E(PERF_BRIDGELESS_INFO_UPDATE)               // 0x100f
+    E(VGPU_CONFIG)                               // 0x1010
+    E(DISPLAY_MODESET)                           // 0x1011
+    E(EXTDEV_INTR_SERVICE)                       // 0x1012
+    E(NVLINK_INBAND_RECEIVED_DATA_256)           // 0x1013
+    E(NVLINK_INBAND_RECEIVED_DATA_512)           // 0x1014
+    E(NVLINK_INBAND_RECEIVED_DATA_1024)          // 0x1015
+    E(NVLINK_INBAND_RECEIVED_DATA_2048)          // 0x1016
+    E(NVLINK_INBAND_RECEIVED_DATA_4096)          // 0x1017
+    E(TIMED_SEMAPHORE_RELEASE)                   // 0x1018
+    E(NVLINK_IS_GPU_DEGRADED)                    // 0x1019
+    E(PFM_REQ_HNDLR_STATE_SYNC_CALLBACK)         // 0x101a
+    E(GSP_SEND_USER_SHARED_DATA)                 // 0x101b
+    E(NVLINK_FAULT_UP)                           // 0x101c
+    E(GSP_LOCKDOWN_NOTICE)                       // 0x101d
+    E(MIG_CI_CONFIG_UPDATE)                      // 0x101e
+    E(NUM_EVENTS)                                // END
+#ifdef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+};
+#   undef E
+#   undef DEFINING_E_IN_RPC_GLOBAL_ENUMS_H
+#endif
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h
new file mode 100644 (file)
index 0000000..f14b238
--- /dev/null
@@ -0,0 +1,51 @@
+#ifndef __src_nvidia_kernel_inc_vgpu_rpc_headers_h__
+#define __src_nvidia_kernel_inc_vgpu_rpc_headers_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2017-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#define MAX_GPC_COUNT           32
+
+typedef enum
+{
+    NV_RPC_UPDATE_PDE_BAR_1,
+    NV_RPC_UPDATE_PDE_BAR_2,
+    NV_RPC_UPDATE_PDE_BAR_INVALID,
+} NV_RPC_UPDATE_PDE_BAR_TYPE;
+
+typedef struct VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS 
+{
+    NvU32 headIndex;
+    NvU32 maxHResolution;
+    NvU32 maxVResolution;
+} VIRTUAL_DISPLAY_GET_MAX_RESOLUTION_PARAMS;
+
+typedef struct VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS 
+{
+    NvU32 numHeads;
+    NvU32 maxNumHeads;
+} VIRTUAL_DISPLAY_GET_NUM_HEADS_PARAMS;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h b/drivers/gpu/drm/nouveau/include/nvrm/535.113.01/nvidia/kernel/inc/vgpu/sdk-structures.h
new file mode 100644 (file)
index 0000000..7801af2
--- /dev/null
@@ -0,0 +1,40 @@
+#ifndef __src_nvidia_kernel_inc_vgpu_sdk_structures_h__
+#define __src_nvidia_kernel_inc_vgpu_sdk_structures_h__
+
+/* Excerpt of RM headers from https://github.com/NVIDIA/open-gpu-kernel-modules/tree/535.113.01 */
+
+/*
+ * SPDX-FileCopyrightText: Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+ * SPDX-License-Identifier: MIT
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+struct pte_desc
+{
+    NvU32 idr:2;
+    NvU32 reserved1:14;
+    NvU32 length:16;
+    union {
+        NvU64 pte; // PTE when IDR==0; PDE when IDR > 0
+        NvU64 pde; // PTE when IDR==0; PDE when IDR > 0
+    } pte_pde[]  NV_ALIGN_BYTES(8); // PTE when IDR==0; PDE when IDR > 0
+};
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h b/drivers/gpu/drm/nouveau/include/nvrm/nvtypes.h
new file mode 100644 (file)
index 0000000..e6833df
--- /dev/null
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVRM_NVTYPES_H__
+#define __NVRM_NVTYPES_H__
+
+#define NV_ALIGN_BYTES(a) __attribute__ ((__aligned__(a)))
+#define NV_DECLARE_ALIGNED(f,a) f __attribute__ ((__aligned__(a)))
+
+typedef u32 NvV32;
+
+typedef u8 NvU8;
+typedef u16 NvU16;
+typedef u32 NvU32;
+typedef u64 NvU64;
+
+typedef void* NvP64;
+
+typedef NvU8 NvBool;
+typedef NvU32 NvHandle;
+typedef NvU64 NvLength;
+
+typedef NvU64 RmPhysAddr;
+
+typedef NvU32 NV_STATUS;
+#endif
index 9e878cdc8e38e141ecf551a09f34b3fab90e9cef..479effcf607e261fac73361958a0a855cf90d315 100644 (file)
@@ -27,6 +27,8 @@
 #include "dispnv04/hw.h"
 #include "nouveau_encoder.h"
 
+#include <subdev/gsp.h>
+
 #include <linux/io-mapping.h>
 #include <linux/firmware.h>
 
@@ -2087,7 +2089,8 @@ nouveau_bios_init(struct drm_device *dev)
        int ret;
 
        /* only relevant for PCI devices */
-       if (!dev_is_pci(dev->dev))
+       if (!dev_is_pci(dev->dev) ||
+           nvkm_gsp_rm(nvxx_device(&drm->client.device)->gsp))
                return 0;
 
        if (!NVInitVBIOS(dev))
index 097246e10cdb738b95fd8a81c5479e91a3f72267..14da22fa3b5b72751ad4955c8456ef84e647e347 100644 (file)
@@ -36,6 +36,7 @@ int
 nvif_disp_ctor(struct nvif_device *device, const char *name, s32 oclass, struct nvif_disp *disp)
 {
        static const struct nvif_mclass disps[] = {
+               { AD102_DISP, 0 },
                { GA102_DISP, 0 },
                { TU102_DISP, 0 },
                { GV100_DISP, 0 },
index 374212da9e959479c656ac36bfaba6a135dc0785..adc60b25f8e6c61e51c7db37805fa24a89378fae 100644 (file)
@@ -112,6 +112,22 @@ nvkm_firmware_put(const struct firmware *fw)
 
 #define nvkm_firmware_mem(p) container_of((p), struct nvkm_firmware, mem.memory)
 
+static struct scatterlist *
+nvkm_firmware_mem_sgl(struct nvkm_memory *memory)
+{
+       struct nvkm_firmware *fw = nvkm_firmware_mem(memory);
+
+       switch (fw->func->type) {
+       case NVKM_FIRMWARE_IMG_DMA: return &fw->mem.sgl;
+       case NVKM_FIRMWARE_IMG_SGT: return  fw->mem.sgt.sgl;
+       default:
+               WARN_ON(1);
+               break;
+       }
+
+       return NULL;
+}
+
 static int
 nvkm_firmware_mem_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *vmm,
                      struct nvkm_vma *vma, void *argv, u32 argc)
@@ -120,10 +136,10 @@ nvkm_firmware_mem_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *v
        struct nvkm_vmm_map map = {
                .memory = &fw->mem.memory,
                .offset = offset,
-               .sgl = &fw->mem.sgl,
+               .sgl = nvkm_firmware_mem_sgl(memory),
        };
 
-       if (WARN_ON(fw->func->type != NVKM_FIRMWARE_IMG_DMA))
+       if (!map.sgl)
                return -ENOSYS;
 
        return nvkm_vmm_map(vmm, vma, argv, argc, &map);
@@ -132,12 +148,15 @@ nvkm_firmware_mem_map(struct nvkm_memory *memory, u64 offset, struct nvkm_vmm *v
 static u64
 nvkm_firmware_mem_size(struct nvkm_memory *memory)
 {
-       return sg_dma_len(&nvkm_firmware_mem(memory)->mem.sgl);
+       struct scatterlist *sgl = nvkm_firmware_mem_sgl(memory);
+
+       return sgl ? sg_dma_len(sgl) : 0;
 }
 
 static u64
 nvkm_firmware_mem_addr(struct nvkm_memory *memory)
 {
+       BUG_ON(nvkm_firmware_mem(memory)->func->type != NVKM_FIRMWARE_IMG_DMA);
        return nvkm_firmware_mem(memory)->phys;
 }
 
@@ -188,6 +207,12 @@ nvkm_firmware_dtor(struct nvkm_firmware *fw)
                nvkm_memory_unref(&memory);
                dma_free_coherent(fw->device->dev, sg_dma_len(&fw->mem.sgl), fw->img, fw->phys);
                break;
+       case NVKM_FIRMWARE_IMG_SGT:
+               nvkm_memory_unref(&memory);
+               dma_unmap_sgtable(fw->device->dev, &fw->mem.sgt, DMA_TO_DEVICE, 0);
+               sg_free_table(&fw->mem.sgt);
+               vfree(fw->img);
+               break;
        default:
                WARN_ON(1);
                break;
@@ -225,6 +250,49 @@ nvkm_firmware_ctor(const struct nvkm_firmware_func *func, const char *name,
                sg_dma_len(&fw->mem.sgl) = len;
        }
                break;
+       case NVKM_FIRMWARE_IMG_SGT:
+               len = ALIGN(fw->len, PAGE_SIZE);
+
+               fw->img = vmalloc(len);
+               if (fw->img) {
+                       int pages = len >> PAGE_SHIFT;
+                       int ret = 0;
+
+                       memcpy(fw->img, src, fw->len);
+
+                       ret = sg_alloc_table(&fw->mem.sgt, pages, GFP_KERNEL);
+                       if (ret == 0) {
+                               struct scatterlist *sgl;
+                               u8 *data = fw->img;
+                               int i;
+
+                               for_each_sgtable_sg(&fw->mem.sgt, sgl, i) {
+                                       struct page *page = vmalloc_to_page(data);
+
+                                       if (!page) {
+                                               ret = -EFAULT;
+                                               break;
+                                       }
+
+                                       sg_set_page(sgl, page, PAGE_SIZE, 0);
+                                       data += PAGE_SIZE;
+                               }
+
+                               if (ret == 0) {
+                                       ret = dma_map_sgtable(fw->device->dev, &fw->mem.sgt,
+                                                             DMA_TO_DEVICE, 0);
+                               }
+
+                               if (ret)
+                                       sg_free_table(&fw->mem.sgt);
+                       }
+
+                       if (ret) {
+                               vfree(fw->img);
+                               fw->img = NULL;
+                       }
+               }
+               break;
        default:
                WARN_ON(1);
                return -EINVAL;
index c6dfed18f35b4777f36096f733ff19993d2ab001..bfaaff645a3475608fd5626144c02eb1fef54ffd 100644 (file)
@@ -17,6 +17,8 @@ include $(src)/nvkm/engine/msppp/Kbuild
 include $(src)/nvkm/engine/msvld/Kbuild
 include $(src)/nvkm/engine/nvenc/Kbuild
 include $(src)/nvkm/engine/nvdec/Kbuild
+include $(src)/nvkm/engine/nvjpg/Kbuild
+include $(src)/nvkm/engine/ofa/Kbuild
 include $(src)/nvkm/engine/pm/Kbuild
 include $(src)/nvkm/engine/sec/Kbuild
 include $(src)/nvkm/engine/sec2/Kbuild
index 8bf1635ffabc093f8c1fca62c86fd70e71a357ec..165d61fc5d6c55dc056f37b882e65cc77b49527a 100644 (file)
@@ -10,3 +10,5 @@ nvkm-y += nvkm/engine/ce/gv100.o
 nvkm-y += nvkm/engine/ce/tu102.o
 nvkm-y += nvkm/engine/ce/ga100.o
 nvkm-y += nvkm/engine/ce/ga102.o
+
+nvkm-y += nvkm/engine/ce/r535.o
index 315a69f7fdd128d3d57a3de27c97b03bdb9f8630..9427a592bd16c05d38c089bf438020feb2163786 100644 (file)
@@ -21,6 +21,7 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
 #include <subdev/vfn.h>
 
 #include <nvif/class.h>
@@ -88,5 +89,8 @@ int
 ga100_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
             struct nvkm_engine **pengine)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_ce_new(&ga100_ce, device, type, inst, pengine);
+
        return nvkm_engine_new_(&ga100_ce, device, type, inst, true, pengine);
 }
index 461b73c7e2e0f351506597714acc85cf2f643d5a..ce56ede7c2e9433cdab33040b2998888f2053045 100644 (file)
@@ -21,6 +21,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 #include <nvif/class.h>
 
 static const struct nvkm_engine_func
@@ -41,5 +43,8 @@ int
 ga102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
             struct nvkm_engine **pengine)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_ce_new(&ga102_ce, device, type, inst, pengine);
+
        return nvkm_engine_new_(&ga102_ce, device, type, inst, true, pengine);
 }
index 0be72c463b21a2c9f42a6b5a1aec62a4ed9cc456..806a76a72249306f8879da5efebf1b6d5d607a59 100644 (file)
@@ -3,6 +3,9 @@
 #define __NVKM_CE_PRIV_H__
 #include <engine/ce.h>
 
+int r535_ce_new(const struct nvkm_engine_func *, struct nvkm_device *,
+               enum nvkm_subdev_type, int, struct nvkm_engine **);
+
 void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_chan *);
 void gk104_ce_intr(struct nvkm_engine *);
 void gp100_ce_intr(struct nvkm_engine *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/r535.c
new file mode 100644 (file)
index 0000000..bd0d435
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/object.h>
+#include <subdev/gsp.h>
+#include <engine/fifo.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/clc0b5sw.h>
+
+struct r535_ce_obj {
+       struct nvkm_object object;
+       struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_ce_obj_dtor(struct nvkm_object *object)
+{
+       struct r535_ce_obj *obj = container_of(object, typeof(*obj), object);
+
+       nvkm_gsp_rm_free(&obj->rm);
+       return obj;
+}
+
+static const struct nvkm_object_func
+r535_ce_obj = {
+       .dtor = r535_ce_obj_dtor,
+};
+
+static int
+r535_ce_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+                struct nvkm_object **pobject)
+{
+       struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+       struct r535_ce_obj *obj;
+       NVC0B5_ALLOCATION_PARAMETERS *args;
+
+       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&r535_ce_obj, oclass, &obj->object);
+       *pobject = &obj->object;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
+                                    sizeof(*args), &obj->rm);
+       if (WARN_ON(IS_ERR(args)))
+               return PTR_ERR(args);
+
+       args->version = 1;
+       args->engineType = NV2080_ENGINE_TYPE_COPY0 + oclass->engine->subdev.inst;
+
+       return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
+}
+
+static void *
+r535_ce_dtor(struct nvkm_engine *engine)
+{
+       kfree(engine->func);
+       return engine;
+}
+
+int
+r535_ce_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
+           enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
+{
+       struct nvkm_engine_func *rm;
+       int nclass, ret;
+
+       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_ce_dtor;
+       for (int i = 0; i < nclass; i++) {
+               rm->sclass[i].minver = hw->sclass[i].minver;
+               rm->sclass[i].maxver = hw->sclass[i].maxver;
+               rm->sclass[i].oclass = hw->sclass[i].oclass;
+               rm->sclass[i].ctor = r535_ce_obj_ctor;
+       }
+
+       ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
+       if (ret)
+               kfree(rm);
+
+       return ret;
+}
index 9563c0175142f6651dadc49d9eade581bfe173ff..7c8647dcb349e1bb8bd225ef7641c1b25dd4fa3c 100644 (file)
@@ -21,6 +21,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 #include <nvif/class.h>
 
 static const struct nvkm_engine_func
@@ -37,5 +39,8 @@ int
 tu102_ce_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
             struct nvkm_engine **pengine)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_ce_new(&tu102_ce, device, type, inst, pengine);
+
        return nvkm_engine_new_(&tu102_ce, device, type, inst, true, pengine);
 }
index 1c81e5b34d29d871b35431c47b9d1ff651fc5245..31ed3da32fe7e87655a8796cb47b60d86c9213da 100644 (file)
@@ -2408,7 +2408,7 @@ nv162_chipset = {
        .fb       = { 0x00000001, tu102_fb_new },
        .fuse     = { 0x00000001, gm107_fuse_new },
        .gpio     = { 0x00000001, gk104_gpio_new },
-       .gsp      = { 0x00000001, gv100_gsp_new },
+       .gsp      = { 0x00000001, tu102_gsp_new },
        .i2c      = { 0x00000001, gm200_i2c_new },
        .imem     = { 0x00000001, nv50_instmem_new },
        .ltc      = { 0x00000001, gp102_ltc_new },
@@ -2426,8 +2426,8 @@ nv162_chipset = {
        .dma      = { 0x00000001, gv100_dma_new },
        .fifo     = { 0x00000001, tu102_fifo_new },
        .gr       = { 0x00000001, tu102_gr_new },
-       .nvdec    = { 0x00000001, gm107_nvdec_new },
-       .nvenc    = { 0x00000001, gm107_nvenc_new },
+       .nvdec    = { 0x00000001, tu102_nvdec_new },
+       .nvenc    = { 0x00000001, tu102_nvenc_new },
        .sec2     = { 0x00000001, tu102_sec2_new },
 };
 
@@ -2443,7 +2443,7 @@ nv164_chipset = {
        .fb       = { 0x00000001, tu102_fb_new },
        .fuse     = { 0x00000001, gm107_fuse_new },
        .gpio     = { 0x00000001, gk104_gpio_new },
-       .gsp      = { 0x00000001, gv100_gsp_new },
+       .gsp      = { 0x00000001, tu102_gsp_new },
        .i2c      = { 0x00000001, gm200_i2c_new },
        .imem     = { 0x00000001, nv50_instmem_new },
        .ltc      = { 0x00000001, gp102_ltc_new },
@@ -2461,8 +2461,8 @@ nv164_chipset = {
        .dma      = { 0x00000001, gv100_dma_new },
        .fifo     = { 0x00000001, tu102_fifo_new },
        .gr       = { 0x00000001, tu102_gr_new },
-       .nvdec    = { 0x00000003, gm107_nvdec_new },
-       .nvenc    = { 0x00000001, gm107_nvenc_new },
+       .nvdec    = { 0x00000003, tu102_nvdec_new },
+       .nvenc    = { 0x00000001, tu102_nvenc_new },
        .sec2     = { 0x00000001, tu102_sec2_new },
 };
 
@@ -2478,7 +2478,7 @@ nv166_chipset = {
        .fb       = { 0x00000001, tu102_fb_new },
        .fuse     = { 0x00000001, gm107_fuse_new },
        .gpio     = { 0x00000001, gk104_gpio_new },
-       .gsp      = { 0x00000001, gv100_gsp_new },
+       .gsp      = { 0x00000001, tu102_gsp_new },
        .i2c      = { 0x00000001, gm200_i2c_new },
        .imem     = { 0x00000001, nv50_instmem_new },
        .ltc      = { 0x00000001, gp102_ltc_new },
@@ -2496,8 +2496,8 @@ nv166_chipset = {
        .dma      = { 0x00000001, gv100_dma_new },
        .fifo     = { 0x00000001, tu102_fifo_new },
        .gr       = { 0x00000001, tu102_gr_new },
-       .nvdec    = { 0x00000007, gm107_nvdec_new },
-       .nvenc    = { 0x00000001, gm107_nvenc_new },
+       .nvdec    = { 0x00000007, tu102_nvdec_new },
+       .nvenc    = { 0x00000001, tu102_nvenc_new },
        .sec2     = { 0x00000001, tu102_sec2_new },
 };
 
@@ -2513,7 +2513,7 @@ nv167_chipset = {
        .fb       = { 0x00000001, tu102_fb_new },
        .fuse     = { 0x00000001, gm107_fuse_new },
        .gpio     = { 0x00000001, gk104_gpio_new },
-       .gsp      = { 0x00000001, gv100_gsp_new },
+       .gsp      = { 0x00000001, tu116_gsp_new },
        .i2c      = { 0x00000001, gm200_i2c_new },
        .imem     = { 0x00000001, nv50_instmem_new },
        .ltc      = { 0x00000001, gp102_ltc_new },
@@ -2531,8 +2531,8 @@ nv167_chipset = {
        .dma      = { 0x00000001, gv100_dma_new },
        .fifo     = { 0x00000001, tu102_fifo_new },
        .gr       = { 0x00000001, tu102_gr_new },
-       .nvdec    = { 0x00000001, gm107_nvdec_new },
-       .nvenc    = { 0x00000001, gm107_nvenc_new },
+       .nvdec    = { 0x00000001, tu102_nvdec_new },
+       .nvenc    = { 0x00000001, tu102_nvenc_new },
        .sec2     = { 0x00000001, tu102_sec2_new },
 };
 
@@ -2548,7 +2548,7 @@ nv168_chipset = {
        .fb       = { 0x00000001, tu102_fb_new },
        .fuse     = { 0x00000001, gm107_fuse_new },
        .gpio     = { 0x00000001, gk104_gpio_new },
-       .gsp      = { 0x00000001, gv100_gsp_new },
+       .gsp      = { 0x00000001, tu116_gsp_new },
        .i2c      = { 0x00000001, gm200_i2c_new },
        .imem     = { 0x00000001, nv50_instmem_new },
        .ltc      = { 0x00000001, gp102_ltc_new },
@@ -2566,8 +2566,8 @@ nv168_chipset = {
        .dma      = { 0x00000001, gv100_dma_new },
        .fifo     = { 0x00000001, tu102_fifo_new },
        .gr       = { 0x00000001, tu102_gr_new },
-       .nvdec    = { 0x00000001, gm107_nvdec_new },
-       .nvenc    = { 0x00000001, gm107_nvenc_new },
+       .nvdec    = { 0x00000001, tu102_nvdec_new },
+       .nvenc    = { 0x00000001, tu102_nvenc_new },
        .sec2     = { 0x00000001, tu102_sec2_new },
 };
 
@@ -2580,6 +2580,7 @@ nv170_chipset = {
        .fault    = { 0x00000001, tu102_fault_new },
        .fb       = { 0x00000001, ga100_fb_new },
        .gpio     = { 0x00000001, gk104_gpio_new },
+       .gsp      = { 0x00000001, ga100_gsp_new },
        .i2c      = { 0x00000001, gm200_i2c_new },
        .imem     = { 0x00000001, nv50_instmem_new },
        .mc       = { 0x00000001, ga100_mc_new },
@@ -2591,6 +2592,9 @@ nv170_chipset = {
        .vfn      = { 0x00000001, ga100_vfn_new },
        .ce       = { 0x000003ff, ga100_ce_new },
        .fifo     = { 0x00000001, ga100_fifo_new },
+       .nvdec    = { 0x0000001f, ga100_nvdec_new },
+       .nvjpg    = { 0x00000001, ga100_nvjpg_new },
+       .ofa      = { 0x00000001, ga100_ofa_new },
 };
 
 static const struct nvkm_device_chip
@@ -2619,7 +2623,9 @@ nv172_chipset = {
        .dma      = { 0x00000001, gv100_dma_new },
        .fifo     = { 0x00000001, ga102_fifo_new },
        .gr       = { 0x00000001, ga102_gr_new },
-       .nvdec    = { 0x00000001, ga102_nvdec_new },
+       .nvdec    = { 0x00000003, ga102_nvdec_new },
+       .nvenc    = { 0x00000001, ga102_nvenc_new },
+       .ofa      = { 0x00000001, ga102_ofa_new },
        .sec2     = { 0x00000001, ga102_sec2_new },
 };
 
@@ -2649,7 +2655,9 @@ nv173_chipset = {
        .dma      = { 0x00000001, gv100_dma_new },
        .fifo     = { 0x00000001, ga102_fifo_new },
        .gr       = { 0x00000001, ga102_gr_new },
-       .nvdec    = { 0x00000001, ga102_nvdec_new },
+       .nvdec    = { 0x00000003, ga102_nvdec_new },
+       .nvenc    = { 0x00000001, ga102_nvenc_new },
+       .ofa      = { 0x00000001, ga102_ofa_new },
        .sec2     = { 0x00000001, ga102_sec2_new },
 };
 
@@ -2679,7 +2687,9 @@ nv174_chipset = {
        .dma      = { 0x00000001, gv100_dma_new },
        .fifo     = { 0x00000001, ga102_fifo_new },
        .gr       = { 0x00000001, ga102_gr_new },
-       .nvdec    = { 0x00000001, ga102_nvdec_new },
+       .nvdec    = { 0x00000003, ga102_nvdec_new },
+       .nvenc    = { 0x00000001, ga102_nvenc_new },
+       .ofa      = { 0x00000001, ga102_ofa_new },
        .sec2     = { 0x00000001, ga102_sec2_new },
 };
 
@@ -2709,7 +2719,9 @@ nv176_chipset = {
        .dma      = { 0x00000001, gv100_dma_new },
        .fifo     = { 0x00000001, ga102_fifo_new },
        .gr       = { 0x00000001, ga102_gr_new },
-       .nvdec    = { 0x00000001, ga102_nvdec_new },
+       .nvdec    = { 0x00000003, ga102_nvdec_new },
+       .nvenc    = { 0x00000001, ga102_nvenc_new },
+       .ofa      = { 0x00000001, ga102_ofa_new },
        .sec2     = { 0x00000001, ga102_sec2_new },
 };
 
@@ -2739,7 +2751,139 @@ nv177_chipset = {
        .dma      = { 0x00000001, gv100_dma_new },
        .fifo     = { 0x00000001, ga102_fifo_new },
        .gr       = { 0x00000001, ga102_gr_new },
-       .nvdec    = { 0x00000001, ga102_nvdec_new },
+       .nvdec    = { 0x00000003, ga102_nvdec_new },
+       .nvenc    = { 0x00000001, ga102_nvenc_new },
+       .ofa      = { 0x00000001, ga102_ofa_new },
+       .sec2     = { 0x00000001, ga102_sec2_new },
+};
+
+static const struct nvkm_device_chip
+nv192_chipset = {
+       .name = "AD102",
+       .bar      = { 0x00000001, tu102_bar_new },
+       .bios     = { 0x00000001, nvkm_bios_new },
+       .devinit  = { 0x00000001, ga100_devinit_new },
+       .fault    = { 0x00000001, tu102_fault_new },
+       .fb       = { 0x00000001, ga102_fb_new },
+       .gsp      = { 0x00000001, ad102_gsp_new },
+       .imem     = { 0x00000001, nv50_instmem_new },
+       .mmu      = { 0x00000001, tu102_mmu_new },
+       .pci      = { 0x00000001, gp100_pci_new },
+       .timer    = { 0x00000001, gk20a_timer_new },
+       .vfn      = { 0x00000001, ga100_vfn_new },
+       .ce       = { 0x0000001f, ga102_ce_new },
+       .disp     = { 0x00000001, ad102_disp_new },
+       .dma      = { 0x00000001, gv100_dma_new },
+       .fifo     = { 0x00000001, ga102_fifo_new },
+       .gr       = { 0x00000001, ad102_gr_new },
+       .nvdec    = { 0x0000000f, ad102_nvdec_new },
+       .nvenc    = { 0x00000007, ad102_nvenc_new },
+       .nvjpg    = { 0x0000000f, ad102_nvjpg_new },
+       .ofa      = { 0x00000001, ad102_ofa_new },
+       .sec2     = { 0x00000001, ga102_sec2_new },
+};
+
+static const struct nvkm_device_chip
+nv193_chipset = {
+       .name = "AD103",
+       .bar      = { 0x00000001, tu102_bar_new },
+       .bios     = { 0x00000001, nvkm_bios_new },
+       .devinit  = { 0x00000001, ga100_devinit_new },
+       .fault    = { 0x00000001, tu102_fault_new },
+       .fb       = { 0x00000001, ga102_fb_new },
+       .gsp      = { 0x00000001, ad102_gsp_new },
+       .imem     = { 0x00000001, nv50_instmem_new },
+       .mmu      = { 0x00000001, tu102_mmu_new },
+       .pci      = { 0x00000001, gp100_pci_new },
+       .timer    = { 0x00000001, gk20a_timer_new },
+       .vfn      = { 0x00000001, ga100_vfn_new },
+       .ce       = { 0x0000001f, ga102_ce_new },
+       .disp     = { 0x00000001, ad102_disp_new },
+       .dma      = { 0x00000001, gv100_dma_new },
+       .fifo     = { 0x00000001, ga102_fifo_new },
+       .gr       = { 0x00000001, ad102_gr_new },
+       .nvdec    = { 0x0000000f, ad102_nvdec_new },
+       .nvenc    = { 0x00000007, ad102_nvenc_new },
+       .nvjpg    = { 0x0000000f, ad102_nvjpg_new },
+       .ofa      = { 0x00000001, ad102_ofa_new },
+       .sec2     = { 0x00000001, ga102_sec2_new },
+};
+
+static const struct nvkm_device_chip
+nv194_chipset = {
+       .name = "AD104",
+       .bar      = { 0x00000001, tu102_bar_new },
+       .bios     = { 0x00000001, nvkm_bios_new },
+       .devinit  = { 0x00000001, ga100_devinit_new },
+       .fault    = { 0x00000001, tu102_fault_new },
+       .fb       = { 0x00000001, ga102_fb_new },
+       .gsp      = { 0x00000001, ad102_gsp_new },
+       .imem     = { 0x00000001, nv50_instmem_new },
+       .mmu      = { 0x00000001, tu102_mmu_new },
+       .pci      = { 0x00000001, gp100_pci_new },
+       .timer    = { 0x00000001, gk20a_timer_new },
+       .vfn      = { 0x00000001, ga100_vfn_new },
+       .ce       = { 0x0000001f, ga102_ce_new },
+       .disp     = { 0x00000001, ad102_disp_new },
+       .dma      = { 0x00000001, gv100_dma_new },
+       .fifo     = { 0x00000001, ga102_fifo_new },
+       .gr       = { 0x00000001, ad102_gr_new },
+       .nvdec    = { 0x0000000f, ad102_nvdec_new },
+       .nvenc    = { 0x00000007, ad102_nvenc_new },
+       .nvjpg    = { 0x0000000f, ad102_nvjpg_new },
+       .ofa      = { 0x00000001, ad102_ofa_new },
+       .sec2     = { 0x00000001, ga102_sec2_new },
+};
+
+static const struct nvkm_device_chip
+nv196_chipset = {
+       .name = "AD106",
+       .bar      = { 0x00000001, tu102_bar_new },
+       .bios     = { 0x00000001, nvkm_bios_new },
+       .devinit  = { 0x00000001, ga100_devinit_new },
+       .fault    = { 0x00000001, tu102_fault_new },
+       .fb       = { 0x00000001, ga102_fb_new },
+       .gsp      = { 0x00000001, ad102_gsp_new },
+       .imem     = { 0x00000001, nv50_instmem_new },
+       .mmu      = { 0x00000001, tu102_mmu_new },
+       .pci      = { 0x00000001, gp100_pci_new },
+       .timer    = { 0x00000001, gk20a_timer_new },
+       .vfn      = { 0x00000001, ga100_vfn_new },
+       .ce       = { 0x0000001f, ga102_ce_new },
+       .disp     = { 0x00000001, ad102_disp_new },
+       .dma      = { 0x00000001, gv100_dma_new },
+       .fifo     = { 0x00000001, ga102_fifo_new },
+       .gr       = { 0x00000001, ad102_gr_new },
+       .nvdec    = { 0x0000000f, ad102_nvdec_new },
+       .nvenc    = { 0x00000007, ad102_nvenc_new },
+       .nvjpg    = { 0x0000000f, ad102_nvjpg_new },
+       .ofa      = { 0x00000001, ad102_ofa_new },
+       .sec2     = { 0x00000001, ga102_sec2_new },
+};
+
+static const struct nvkm_device_chip
+nv197_chipset = {
+       .name = "AD107",
+       .bar      = { 0x00000001, tu102_bar_new },
+       .bios     = { 0x00000001, nvkm_bios_new },
+       .devinit  = { 0x00000001, ga100_devinit_new },
+       .fault    = { 0x00000001, tu102_fault_new },
+       .fb       = { 0x00000001, ga102_fb_new },
+       .gsp      = { 0x00000001, ad102_gsp_new },
+       .imem     = { 0x00000001, nv50_instmem_new },
+       .mmu      = { 0x00000001, tu102_mmu_new },
+       .pci      = { 0x00000001, gp100_pci_new },
+       .timer    = { 0x00000001, gk20a_timer_new },
+       .vfn      = { 0x00000001, ga100_vfn_new },
+       .ce       = { 0x0000001f, ga102_ce_new },
+       .disp     = { 0x00000001, ad102_disp_new },
+       .dma      = { 0x00000001, gv100_dma_new },
+       .fifo     = { 0x00000001, ga102_fifo_new },
+       .gr       = { 0x00000001, ad102_gr_new },
+       .nvdec    = { 0x0000000f, ad102_nvdec_new },
+       .nvenc    = { 0x00000007, ad102_nvenc_new },
+       .nvjpg    = { 0x0000000f, ad102_nvjpg_new },
+       .ofa      = { 0x00000001, ad102_ofa_new },
        .sec2     = { 0x00000001, ga102_sec2_new },
 };
 
@@ -3061,6 +3205,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                        case 0x140: device->card_type = GV100; break;
                        case 0x160: device->card_type = TU100; break;
                        case 0x170: device->card_type = GA100; break;
+                       case 0x190: device->card_type = AD100; break;
                        default:
                                break;
                        }
@@ -3163,6 +3308,11 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
                case 0x174: device->chip = &nv174_chipset; break;
                case 0x176: device->chip = &nv176_chipset; break;
                case 0x177: device->chip = &nv177_chipset; break;
+               case 0x192: device->chip = &nv192_chipset; break;
+               case 0x193: device->chip = &nv193_chipset; break;
+               case 0x194: device->chip = &nv194_chipset; break;
+               case 0x196: device->chip = &nv196_chipset; break;
+               case 0x197: device->chip = &nv197_chipset; break;
                default:
                        if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
                                switch (device->chipset) {
index 24faaac1589178a137c969ed4b7c36b97908d4d3..bf3176bec18a5fbd6e2f42dfe6a751ec7f6a587a 100644 (file)
@@ -43,6 +43,8 @@
 #include <engine/msvld.h>
 #include <engine/nvenc.h>
 #include <engine/nvdec.h>
+#include <engine/nvjpg.h>
+#include <engine/ofa.h>
 #include <engine/pm.h>
 #include <engine/sec.h>
 #include <engine/sec2.h>
index 9b39ec34161501bdcc91514e8ca4ec7b99e82719..7fd4800a876ab77f47ab51913ec841a9f881b639 100644 (file)
@@ -147,6 +147,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
        case GV100: args->v0.family = NV_DEVICE_INFO_V0_VOLTA; break;
        case TU100: args->v0.family = NV_DEVICE_INFO_V0_TURING; break;
        case GA100: args->v0.family = NV_DEVICE_INFO_V0_AMPERE; break;
+       case AD100: args->v0.family = NV_DEVICE_INFO_V0_ADA; break;
        default:
                args->v0.family = 0;
                break;
index e1aecd3fe96c1058d82565122adbaffa96df83b8..e346e924fee8bdb14740daa86614d0540e690a64 100644 (file)
@@ -27,6 +27,9 @@ nvkm-y += nvkm/engine/disp/gp102.o
 nvkm-y += nvkm/engine/disp/gv100.o
 nvkm-y += nvkm/engine/disp/tu102.o
 nvkm-y += nvkm/engine/disp/ga102.o
+nvkm-y += nvkm/engine/disp/ad102.o
+
+nvkm-y += nvkm/engine/disp/r535.o
 
 nvkm-y += nvkm/engine/disp/udisp.o
 nvkm-y += nvkm/engine/disp/uconn.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ad102.c
new file mode 100644 (file)
index 0000000..7f300a7
--- /dev/null
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "chan.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_disp_func
+ad102_disp = {
+       .uevent = &gv100_disp_chan_uevent,
+       .ramht_size = 0x2000,
+       .root = {  0, 0,AD102_DISP },
+       .user = {
+               {{-1,-1,GV100_DISP_CAPS                  }, gv100_disp_caps_new },
+               {{ 0, 0,GA102_DISP_CURSOR                }, nvkm_disp_chan_new, &gv100_disp_curs },
+               {{ 0, 0,GA102_DISP_WINDOW_IMM_CHANNEL_DMA}, nvkm_disp_wndw_new, &gv100_disp_wimm },
+               {{ 0, 0,AD102_DISP_CORE_CHANNEL_DMA      }, nvkm_disp_core_new, &gv100_disp_core },
+               {{ 0, 0,GA102_DISP_WINDOW_CHANNEL_DMA    }, nvkm_disp_wndw_new, &gv100_disp_wndw },
+               {}
+       },
+};
+
+int
+ad102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+              struct nvkm_disp **pdisp)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_disp_new(&ad102_disp, device, type, inst, pdisp);
+
+       return -ENODEV;
+}
index 39f7e7ce9f4a2130f555c3372cb0312adb0b4b7f..457ec5db794d0b2842b108626508bcad68daa8cc 100644 (file)
@@ -105,7 +105,7 @@ nvkm_disp_fini(struct nvkm_engine *engine, bool suspend)
        struct nvkm_outp *outp;
 
        if (disp->func->fini)
-               disp->func->fini(disp);
+               disp->func->fini(disp, suspend);
 
        list_for_each_entry(outp, &disp->outps, head) {
                if (outp->func->fini)
@@ -137,7 +137,8 @@ nvkm_disp_init(struct nvkm_engine *engine)
         * each output resource to 'fully enabled'.
         */
        list_for_each_entry(ior, &disp->iors, head) {
-               ior->func->power(ior, true, true, true, true, true);
+               if (ior->func->power)
+                       ior->func->power(ior, true, true, true, true, true);
        }
 
        return 0;
@@ -208,6 +209,9 @@ nvkm_disp_dtor(struct nvkm_engine *engine)
                nvkm_head_del(&head);
        }
 
+       if (disp->func->dtor)
+               disp->func->dtor(disp);
+
        return data;
 }
 
index 398336ffb685ae080807a869427be2f97b009e43..02029051015737fb34d2ab9dadf299e7834bcee9 100644 (file)
@@ -22,6 +22,10 @@ struct nvkm_disp_chan {
        u64 push;
 
        u32 suspend_put;
+
+       struct {
+               struct nvkm_gsp_object object;
+       } rm;
 };
 
 int nvkm_disp_core_new(const struct nvkm_oclass *, void *, u32, struct nvkm_object **);
index efe66ba3c61f8e49c6ce920298e8f884ee194044..ab0a85c9243047b91913c8a9205535beb5e01981 100644 (file)
@@ -24,6 +24,7 @@
 #include "head.h"
 #include "ior.h"
 
+#include <subdev/gsp.h>
 #include <subdev/timer.h>
 
 #include <nvif/class.h>
@@ -147,5 +148,8 @@ int
 ga102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
               struct nvkm_disp **pdisp)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_disp_new(&ga102_disp, device, type, inst, pdisp);
+
        return nvkm_disp_new_(&ga102_disp, device, type, inst, pdisp);
 }
index b48ead31da30e2617b6a82096f80fd8e59a7f6b3..83a1323600ae788dd917e5e38d8701aeb4150d00 100644 (file)
@@ -1154,7 +1154,7 @@ gf119_disp_intr(struct nvkm_disp *disp)
 }
 
 void
-gf119_disp_fini(struct nvkm_disp *disp)
+gf119_disp_fini(struct nvkm_disp *disp, bool suspend)
 {
        struct nvkm_device *device = disp->engine.subdev.device;
        /* disable all interrupts */
index e1634f7bca56f6b0df1f1320f657614876826d27..cfa3698d3a2fd675e0da00e88979a05e364ffa4a 100644 (file)
@@ -96,7 +96,7 @@ gv100_sor_dp = {
        .watermark = gv100_sor_dp_watermark,
 };
 
-static void
+void
 gv100_sor_hdmi_infoframe_vsi(struct nvkm_ior *ior, int head, void *data, u32 size)
 {
        struct nvkm_device *device = ior->disp->engine.subdev.device;
@@ -120,7 +120,7 @@ gv100_sor_hdmi_infoframe_vsi(struct nvkm_ior *ior, int head, void *data, u32 siz
        nvkm_mask(device, 0x6f0100 + hoff, 0x00000001, 0x00000001);
 }
 
-static void
+void
 gv100_sor_hdmi_infoframe_avi(struct nvkm_ior *ior, int head, void *data, u32 size)
 {
        struct nvkm_device *device = ior->disp->engine.subdev.device;
@@ -1115,7 +1115,7 @@ gv100_disp_intr(struct nvkm_disp *disp)
 }
 
 void
-gv100_disp_fini(struct nvkm_disp *disp)
+gv100_disp_fini(struct nvkm_disp *disp, bool suspend)
 {
        struct nvkm_device *device = disp->engine.subdev.device;
        nvkm_wr32(device, 0x611db0, 0x00000000);
index 9beb9d1e86334149779941139acef7b6ddb044a1..3ba04bead2f9cfc3b75b62469670c11d3fdc8476 100644 (file)
@@ -187,6 +187,8 @@ int gp100_sor_new(struct nvkm_disp *, int);
 int gv100_sor_cnt(struct nvkm_disp *, unsigned long *);
 void gv100_sor_state(struct nvkm_ior *, struct nvkm_ior_state *);
 extern const struct nvkm_ior_func_hdmi gv100_sor_hdmi;
+void gv100_sor_hdmi_infoframe_avi(struct nvkm_ior *, int, void *, u32);
+void gv100_sor_hdmi_infoframe_vsi(struct nvkm_ior *, int, void *, u32);
 void gv100_sor_dp_audio(struct nvkm_ior *, int, bool);
 void gv100_sor_dp_audio_sym(struct nvkm_ior *, int, u16, u32);
 void gv100_sor_dp_watermark(struct nvkm_ior *, int, u8);
index 2d05e2f7e46b8f760c28fe9b999a394a756683e8..03a5f88a4b993c45b02de1df3eaea10515ee21d6 100644 (file)
@@ -1504,7 +1504,7 @@ nv50_disp_intr(struct nvkm_disp *disp)
 }
 
 void
-nv50_disp_fini(struct nvkm_disp *disp)
+nv50_disp_fini(struct nvkm_disp *disp, bool suspend)
 {
        struct nvkm_device *device = disp->engine.subdev.device;
        /* disable all interrupts */
index bfb2a4db8d644730e9f05b42871f043eeaaa95fa..28adc5a30f2f24db05e3294f0ace97f9bb47079f 100644 (file)
@@ -386,7 +386,8 @@ nvkm_outp_new_(const struct nvkm_outp_func *func, struct nvkm_disp *disp,
        outp->disp = disp;
        outp->index = index;
        outp->info = *dcbE;
-       outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);
+       if (!disp->rm.client.gsp)
+               outp->i2c = nvkm_i2c_bus_find(i2c, dcbE->i2c_index);
 
        OUTP_DBG(outp, "type %02x loc %d or %d link %d con %x "
                       "edid %x bus %d head %x",
index ec5292a8f3c8513782b886aacf60429c84a382a5..a3fd7cb7c4883953f4940ff68ab02b4d4cbc09c9 100644 (file)
@@ -8,6 +8,9 @@ struct nvkm_head;
 struct nvkm_outp;
 struct dcb_output;
 
+int r535_disp_new(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+                 struct nvkm_disp **);
+
 int nvkm_disp_ctor(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
                   struct nvkm_disp *);
 int nvkm_disp_new_(const struct nvkm_disp_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
@@ -15,9 +18,10 @@ int nvkm_disp_new_(const struct nvkm_disp_func *, struct nvkm_device *, enum nvk
 void nvkm_disp_vblank(struct nvkm_disp *, int head);
 
 struct nvkm_disp_func {
+       void (*dtor)(struct nvkm_disp *);
        int (*oneinit)(struct nvkm_disp *);
        int (*init)(struct nvkm_disp *);
-       void (*fini)(struct nvkm_disp *);
+       void (*fini)(struct nvkm_disp *, bool suspend);
        void (*intr)(struct nvkm_disp *);
        void (*intr_error)(struct nvkm_disp *, int chid);
 
@@ -32,7 +36,7 @@ struct nvkm_disp_func {
 
        u16 ramht_size;
 
-       const struct nvkm_sclass root;
+       struct nvkm_sclass root;
 
        struct nvkm_disp_user {
                struct nvkm_sclass base;
@@ -44,7 +48,7 @@ struct nvkm_disp_func {
 
 int nv50_disp_oneinit(struct nvkm_disp *);
 int nv50_disp_init(struct nvkm_disp *);
-void nv50_disp_fini(struct nvkm_disp *);
+void nv50_disp_fini(struct nvkm_disp *, bool suspend);
 void nv50_disp_intr(struct nvkm_disp *);
 extern const struct nvkm_enum nv50_disp_intr_error_type[];
 void nv50_disp_super(struct work_struct *);
@@ -56,12 +60,12 @@ void nv50_disp_super_2_2(struct nvkm_disp *, struct nvkm_head *);
 void nv50_disp_super_3_0(struct nvkm_disp *, struct nvkm_head *);
 
 int gf119_disp_init(struct nvkm_disp *);
-void gf119_disp_fini(struct nvkm_disp *);
+void gf119_disp_fini(struct nvkm_disp *, bool suspend);
 void gf119_disp_intr(struct nvkm_disp *);
 void gf119_disp_super(struct work_struct *);
 void gf119_disp_intr_error(struct nvkm_disp *, int);
 
-void gv100_disp_fini(struct nvkm_disp *);
+void gv100_disp_fini(struct nvkm_disp *, bool suspend);
 void gv100_disp_intr(struct nvkm_disp *);
 void gv100_disp_super(struct work_struct *);
 int gv100_disp_wndw_cnt(struct nvkm_disp *, unsigned long *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/r535.c
new file mode 100644 (file)
index 0000000..2980350
--- /dev/null
@@ -0,0 +1,1671 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "chan.h"
+#include "conn.h"
+#include "dp.h"
+#include "head.h"
+#include "ior.h"
+#include "outp.h"
+
+#include <core/ramht.h>
+#include <subdev/bios.h>
+#include <subdev/bios/conn.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu.h>
+#include <subdev/vfn.h>
+
+#include <nvhw/drf.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dfp.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073dp.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073specific.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0073/ctrl0073system.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+#include <nvrm/535.113.01/nvidia/generated/g_allclasses.h>
+#include <nvrm/535.113.01/nvidia/generated/g_mem_desc_nvoc.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/os/nv_memory_type.h>
+
+#include <linux/acpi.h>
+
+static u64
+r535_chan_user(struct nvkm_disp_chan *chan, u64 *psize)
+{
+       switch (chan->object.oclass & 0xff) {
+       case 0x7d: *psize = 0x10000; return 0x680000;
+       case 0x7e: *psize = 0x01000; return 0x690000 + (chan->head * *psize);
+       case 0x7b: *psize = 0x01000; return 0x6b0000 + (chan->head * *psize);
+       case 0x7a: *psize = 0x01000; return 0x6d8000 + (chan->head * *psize);
+       default:
+               BUG_ON(1);
+               break;
+       }
+
+       return 0ULL;
+}
+
+static void
+r535_chan_intr(struct nvkm_disp_chan *chan, bool en)
+{
+}
+
+static void
+r535_chan_fini(struct nvkm_disp_chan *chan)
+{
+       nvkm_gsp_rm_free(&chan->rm.object);
+}
+
+static int
+r535_chan_push(struct nvkm_disp_chan *chan)
+{
+       struct nvkm_gsp *gsp = chan->disp->engine.subdev.device->gsp;
+       NV2080_CTRL_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+                                   NV2080_CTRL_CMD_INTERNAL_DISPLAY_CHANNEL_PUSHBUFFER,
+                                   sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       if (chan->memory) {
+               switch (nvkm_memory_target(chan->memory)) {
+               case NVKM_MEM_TARGET_NCOH:
+                       ctrl->addressSpace = ADDR_SYSMEM;
+                       ctrl->cacheSnoop = 0;
+                       break;
+               case NVKM_MEM_TARGET_HOST:
+                       ctrl->addressSpace = ADDR_SYSMEM;
+                       ctrl->cacheSnoop = 1;
+                       break;
+               case NVKM_MEM_TARGET_VRAM:
+                       ctrl->addressSpace = ADDR_FBMEM;
+                       break;
+               default:
+                       WARN_ON(1);
+                       return -EINVAL;
+               }
+
+               ctrl->physicalAddr = nvkm_memory_addr(chan->memory);
+               ctrl->limit = nvkm_memory_size(chan->memory) - 1;
+       }
+
+       ctrl->hclass = chan->object.oclass;
+       ctrl->channelInstance = chan->head;
+       ctrl->valid = ((chan->object.oclass & 0xff) != 0x7a) ? 1 : 0;
+
+       return nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+}
+
+static int
+r535_curs_init(struct nvkm_disp_chan *chan)
+{
+       NV50VAIO_CHANNELPIO_ALLOCATION_PARAMETERS *args;
+       int ret;
+
+       ret = r535_chan_push(chan);
+       if (ret)
+               return ret;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->disp->rm.object,
+                                    (chan->object.oclass << 16) | chan->head,
+                                    chan->object.oclass, sizeof(*args), &chan->rm.object);
+       if (IS_ERR(args))
+               return PTR_ERR(args);
+
+       args->channelInstance = chan->head;
+
+       return nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
+}
+
+static const struct nvkm_disp_chan_func
+r535_curs_func = {
+       .init = r535_curs_init,
+       .fini = r535_chan_fini,
+       .intr = r535_chan_intr,
+       .user = r535_chan_user,
+};
+
+static const struct nvkm_disp_chan_user
+r535_curs = {
+       .func = &r535_curs_func,
+       .user = 73,
+};
+
+static int
+r535_dmac_bind(struct nvkm_disp_chan *chan, struct nvkm_object *object, u32 handle)
+{
+       return nvkm_ramht_insert(chan->disp->ramht, object, chan->chid.user, -9, handle,
+                                chan->chid.user << 25 |
+                                (chan->disp->rm.client.object.handle & 0x3fff));
+}
+
+static void
+r535_dmac_fini(struct nvkm_disp_chan *chan)
+{
+       struct nvkm_device *device = chan->disp->engine.subdev.device;
+       const u32 uoff = (chan->chid.user - 1) * 0x1000;
+
+       chan->suspend_put = nvkm_rd32(device, 0x690000 + uoff);
+       r535_chan_fini(chan);
+}
+
+static int
+r535_dmac_init(struct nvkm_disp_chan *chan)
+{
+       NV50VAIO_CHANNELDMA_ALLOCATION_PARAMETERS *args;
+       int ret;
+
+       ret = r535_chan_push(chan);
+       if (ret)
+               return ret;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->disp->rm.object,
+                                    (chan->object.oclass << 16) | chan->head,
+                                    chan->object.oclass, sizeof(*args), &chan->rm.object);
+       if (IS_ERR(args))
+               return PTR_ERR(args);
+
+       args->channelInstance = chan->head;
+       args->offset = chan->suspend_put;
+
+       return nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
+}
+
+static int
+r535_dmac_push(struct nvkm_disp_chan *chan, u64 memory)
+{
+       chan->memory = nvkm_umem_search(chan->object.client, memory);
+       if (IS_ERR(chan->memory))
+               return PTR_ERR(chan->memory);
+
+       return 0;
+}
+
+static const struct nvkm_disp_chan_func
+r535_dmac_func = {
+       .push = r535_dmac_push,
+       .init = r535_dmac_init,
+       .fini = r535_dmac_fini,
+       .intr = r535_chan_intr,
+       .user = r535_chan_user,
+       .bind = r535_dmac_bind,
+};
+
+static const struct nvkm_disp_chan_func
+r535_wimm_func = {
+       .push = r535_dmac_push,
+       .init = r535_dmac_init,
+       .fini = r535_dmac_fini,
+       .intr = r535_chan_intr,
+       .user = r535_chan_user,
+};
+
+static const struct nvkm_disp_chan_user
+r535_wimm = {
+       .func = &r535_wimm_func,
+       .user = 33,
+};
+
+static const struct nvkm_disp_chan_user
+r535_wndw = {
+       .func = &r535_dmac_func,
+       .user = 1,
+};
+
+static void
+r535_core_fini(struct nvkm_disp_chan *chan)
+{
+       struct nvkm_device *device = chan->disp->engine.subdev.device;
+
+       chan->suspend_put = nvkm_rd32(device, 0x680000);
+       r535_chan_fini(chan);
+}
+
+static const struct nvkm_disp_chan_func
+r535_core_func = {
+       .push = r535_dmac_push,
+       .init = r535_dmac_init,
+       .fini = r535_core_fini,
+       .intr = r535_chan_intr,
+       .user = r535_chan_user,
+       .bind = r535_dmac_bind,
+};
+
+static const struct nvkm_disp_chan_user
+r535_core = {
+       .func = &r535_core_func,
+       .user = 0,
+};
+
+static int
+r535_sor_bl_set(struct nvkm_ior *sor, int lvl)
+{
+       struct nvkm_disp *disp = sor->disp;
+       NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_SET_BACKLIGHT_BRIGHTNESS,
+                                   sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->displayId = BIT(sor->asy.outp->index);
+       ctrl->brightness = lvl;
+
+       return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r535_sor_bl_get(struct nvkm_ior *sor)
+{
+       struct nvkm_disp *disp = sor->disp;
+       NV0073_CTRL_SPECIFIC_BACKLIGHT_BRIGHTNESS_PARAMS *ctrl;
+       int lvl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_GET_BACKLIGHT_BRIGHTNESS,
+                                   sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->displayId = BIT(sor->asy.outp->index);
+
+       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       lvl = ctrl->brightness;
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       return lvl;
+}
+
+static const struct nvkm_ior_func_bl
+r535_sor_bl = {
+       .get = r535_sor_bl_get,
+       .set = r535_sor_bl_set,
+};
+
+static void
+r535_sor_hda_eld(struct nvkm_ior *sor, int head, u8 *data, u8 size)
+{
+       struct nvkm_disp *disp = sor->disp;
+       NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *ctrl;
+
+       if (WARN_ON(size > sizeof(ctrl->bufferELD)))
+               return;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(sor->asy.outp->index);
+       ctrl->numELDSize = size;
+       memcpy(ctrl->bufferELD, data, size);
+       ctrl->maxFreqSupported = 0; //XXX
+       ctrl->ctrl  = NVDEF(NV0073, CTRL_DFP_ELD_AUDIO_CAPS_CTRL, PD, TRUE);
+       ctrl->ctrl |= NVDEF(NV0073, CTRL_DFP_ELD_AUDIO_CAPS_CTRL, ELDV, TRUE);
+       ctrl->deviceEntry = head;
+
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_hda_hpd(struct nvkm_ior *sor, int head, bool present)
+{
+       struct nvkm_disp *disp = sor->disp;
+       NV0073_CTRL_DFP_SET_ELD_AUDIO_CAP_PARAMS *ctrl;
+
+       if (present)
+               return;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DFP_SET_ELD_AUDIO_CAPS, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(sor->asy.outp->index);
+       ctrl->deviceEntry = head;
+
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static const struct nvkm_ior_func_hda
+r535_sor_hda = {
+       .hpd = r535_sor_hda_hpd,
+       .eld = r535_sor_hda_eld,
+};
+
+static void
+r535_sor_dp_audio_mute(struct nvkm_ior *sor, bool mute)
+{
+       struct nvkm_disp *disp = sor->disp;
+       NV0073_CTRL_DP_SET_AUDIO_MUTESTREAM_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DP_SET_AUDIO_MUTESTREAM, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(sor->asy.outp->index);
+       ctrl->mute = mute;
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable)
+{
+       struct nvkm_disp *disp = sor->disp;
+       NV0073_CTRL_DFP_SET_AUDIO_ENABLE_PARAMS *ctrl;
+
+       if (!enable)
+               r535_sor_dp_audio_mute(sor, true);
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DFP_SET_AUDIO_ENABLE, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(sor->asy.outp->index);
+       ctrl->enable = enable;
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+
+       if (enable)
+               r535_sor_dp_audio_mute(sor, false);
+}
+
+static void
+r535_sor_dp_vcpi(struct nvkm_ior *sor, int head, u8 slot, u8 slot_nr, u16 pbn, u16 aligned_pbn)
+{
+       struct nvkm_disp *disp = sor->disp;
+       struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DP_CONFIG_STREAM, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->head = head;
+       ctrl->sorIndex = sor->id;
+       ctrl->dpLink = sor->asy.link == 2;
+       ctrl->bEnableOverride = 1;
+       ctrl->bMST = 1;
+       ctrl->hBlankSym = 0;
+       ctrl->vBlankSym = 0;
+       ctrl->colorFormat = 0;
+       ctrl->bEnableTwoHeadOneOr = 0;
+       ctrl->singleHeadMultistreamMode = 0;
+       ctrl->MST.slotStart = slot;
+       ctrl->MST.slotEnd = slot + slot_nr - 1;
+       ctrl->MST.PBN = pbn;
+       ctrl->MST.Timeslice = aligned_pbn;
+       ctrl->MST.sendACT = 0;
+       ctrl->MST.singleHeadMSTPipeline = 0;
+       ctrl->MST.bEnableAudioOverRightPanel = 0;
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static int
+r535_sor_dp_sst(struct nvkm_ior *sor, int head, bool ef,
+               u32 watermark, u32 hblanksym, u32 vblanksym)
+{
+       struct nvkm_disp *disp = sor->disp;
+       struct NV0073_CTRL_CMD_DP_CONFIG_STREAM_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DP_CONFIG_STREAM, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->head = head;
+       ctrl->sorIndex = sor->id;
+       ctrl->dpLink = sor->asy.link == 2;
+       ctrl->bEnableOverride = 1;
+       ctrl->bMST = 0;
+       ctrl->hBlankSym = hblanksym;
+       ctrl->vBlankSym = vblanksym;
+       ctrl->colorFormat = 0;
+       ctrl->bEnableTwoHeadOneOr = 0;
+       ctrl->SST.bEnhancedFraming = ef;
+       ctrl->SST.tuSize = 64;
+       ctrl->SST.waterMark = watermark;
+       ctrl->SST.bEnableAudioOverRightPanel = 0;
+       return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static const struct nvkm_ior_func_dp
+r535_sor_dp = {
+       .sst = r535_sor_dp_sst,
+       .vcpi = r535_sor_dp_vcpi,
+       .audio = r535_sor_dp_audio,
+};
+
+static void
+r535_sor_hdmi_scdc(struct nvkm_ior *sor, u32 khz, bool support, bool scrambling,
+                  bool scrambling_low_rates)
+{
+       struct nvkm_outp *outp = sor->asy.outp;
+       struct nvkm_disp *disp = outp->disp;
+       NV0073_CTRL_SPECIFIC_SET_HDMI_SINK_CAPS_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_SINK_CAPS, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(outp->index);
+       ctrl->caps = 0;
+       if (support)
+               ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, SCDC_SUPPORTED, TRUE);
+       if (scrambling)
+               ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, GT_340MHZ_CLOCK_SUPPORTED, TRUE);
+       if (scrambling_low_rates)
+               ctrl->caps |= NVDEF(NV0073_CTRL_CMD_SPECIFIC, SET_HDMI_SINK_CAPS, LTE_340MHZ_SCRAMBLING_SUPPORTED, TRUE);
+
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_hdmi_ctrl_audio_mute(struct nvkm_outp *outp, bool mute)
+{
+       struct nvkm_disp *disp = outp->disp;
+       NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_AUDIO_MUTESTREAM, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(outp->index);
+       ctrl->mute = mute;
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_hdmi_ctrl_audio(struct nvkm_outp *outp, bool enable)
+{
+       struct nvkm_disp *disp = outp->disp;
+       NV0073_CTRL_SPECIFIC_SET_OD_PACKET_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_SET_OD_PACKET, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(outp->index);
+       ctrl->transmitControl =
+               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, ENABLE, YES) |
+               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, OTHER_FRAME, DISABLE) |
+               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, SINGLE_FRAME, DISABLE) |
+               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, ON_HBLANK, DISABLE) |
+               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, VIDEO_FMT, SW_CONTROLLED) |
+               NVDEF(NV0073_CTRL_SPECIFIC, SET_OD_PACKET_TRANSMIT_CONTROL, RESERVED_LEGACY_MODE, NO);
+       ctrl->packetSize = 10;
+       ctrl->aPacket[0] = 0x03;
+       ctrl->aPacket[1] = 0x00;
+       ctrl->aPacket[2] = 0x00;
+       ctrl->aPacket[3] = enable ? 0x10 : 0x01;
+       ctrl->aPacket[4] = 0x00;
+       ctrl->aPacket[5] = 0x00;
+       ctrl->aPacket[6] = 0x00;
+       ctrl->aPacket[7] = 0x00;
+       ctrl->aPacket[8] = 0x00;
+       ctrl->aPacket[9] = 0x00;
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static void
+r535_sor_hdmi_audio(struct nvkm_ior *sor, int head, bool enable)
+{
+       struct nvkm_device *device = sor->disp->engine.subdev.device;
+       const u32 hdmi = head * 0x400;
+
+       r535_sor_hdmi_ctrl_audio(sor->asy.outp, enable);
+       r535_sor_hdmi_ctrl_audio_mute(sor->asy.outp, !enable);
+
+       /* General Control (GCP). */
+       nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000000);
+       nvkm_wr32(device, 0x6f00cc + hdmi, !enable ? 0x00000001 : 0x00000010);
+       nvkm_mask(device, 0x6f00c0 + hdmi, 0x00000001, 0x00000001);
+}
+
+static void
+r535_sor_hdmi_ctrl(struct nvkm_ior *sor, int head, bool enable, u8 max_ac_packet, u8 rekey)
+{
+       struct nvkm_disp *disp = sor->disp;
+       NV0073_CTRL_SPECIFIC_SET_HDMI_ENABLE_PARAMS *ctrl;
+
+       if (!enable)
+               return;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_SET_HDMI_ENABLE, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return;
+
+       ctrl->displayId = BIT(sor->asy.outp->index);
+       ctrl->enable = enable;
+
+       WARN_ON(nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl));
+}
+
+static const struct nvkm_ior_func_hdmi
+r535_sor_hdmi = {
+       .ctrl = r535_sor_hdmi_ctrl,
+       .scdc = r535_sor_hdmi_scdc,
+       /*TODO: SF_USER -> KMS. */
+       .infoframe_avi = gv100_sor_hdmi_infoframe_avi,
+       .infoframe_vsi = gv100_sor_hdmi_infoframe_vsi,
+       .audio = r535_sor_hdmi_audio,
+};
+
+static const struct nvkm_ior_func
+r535_sor = {
+       .hdmi = &r535_sor_hdmi,
+       .dp = &r535_sor_dp,
+       .hda = &r535_sor_hda,
+       .bl = &r535_sor_bl,
+};
+
+static int
+r535_sor_new(struct nvkm_disp *disp, int id)
+{
+       return nvkm_ior_new_(&r535_sor, disp, SOR, id, true/*XXX: hda cap*/);
+}
+
+static int
+r535_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
+{
+       *pmask = 0xf;
+       return 4;
+}
+
+static void
+r535_head_vblank_put(struct nvkm_head *head)
+{
+       struct nvkm_device *device = head->disp->engine.subdev.device;
+
+       nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000002, 0x00000000);
+}
+
+static void
+r535_head_vblank_get(struct nvkm_head *head)
+{
+       struct nvkm_device *device = head->disp->engine.subdev.device;
+
+       nvkm_wr32(device, 0x611800 + (head->id * 4), 0x00000002);
+       nvkm_mask(device, 0x611d80 + (head->id * 4), 0x00000002, 0x00000002);
+}
+
+static void
+r535_head_state(struct nvkm_head *head, struct nvkm_head_state *state)
+{
+}
+
+static const struct nvkm_head_func
+r535_head = {
+       .state = r535_head_state,
+       .vblank_get = r535_head_vblank_get,
+       .vblank_put = r535_head_vblank_put,
+};
+
+static struct nvkm_conn *
+r535_conn_new(struct nvkm_disp *disp, u32 id)
+{
+       NV0073_CTRL_SPECIFIC_GET_CONNECTOR_DATA_PARAMS *ctrl;
+       struct nvbios_connE dcbE = {};
+       struct nvkm_conn *conn;
+       int ret, index;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return (void *)ctrl;
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayId = BIT(id);
+
+       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return (void *)ctrl;
+
+       list_for_each_entry(conn, &disp->conns, head) {
+               if (conn->index == ctrl->data[0].index) {
+                       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+                       return conn;
+               }
+       }
+
+       dcbE.type = ctrl->data[0].type;
+       index = ctrl->data[0].index;
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+       ret = nvkm_conn_new(disp, index, &dcbE, &conn);
+       if (ret)
+               return ERR_PTR(ret);
+
+       list_add_tail(&conn->head, &disp->conns);
+       return conn;
+}
+
+static void
+r535_outp_release(struct nvkm_outp *outp)
+{
+       outp->disp->rm.assigned_sors &= ~BIT(outp->ior->id);
+       outp->ior->asy.outp = NULL;
+       outp->ior = NULL;
+}
+
+static int
+r535_outp_acquire(struct nvkm_outp *outp, bool hda)
+{
+       struct nvkm_disp *disp = outp->disp;
+       struct nvkm_ior *ior;
+       NV0073_CTRL_DFP_ASSIGN_SOR_PARAMS *ctrl;
+       int or;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DFP_ASSIGN_SOR, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayId = BIT(outp->index);
+       ctrl->sorExcludeMask = disp->rm.assigned_sors;
+       if (hda)
+               ctrl->flags |= NVDEF(NV0073_CTRL, DFP_ASSIGN_SOR_FLAGS, AUDIO, OPTIMAL);
+
+       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       for (or = 0; or < ARRAY_SIZE(ctrl->sorAssignListWithTag); or++) {
+               if (ctrl->sorAssignListWithTag[or].displayMask & BIT(outp->index)) {
+                       disp->rm.assigned_sors |= BIT(or);
+                       break;
+               }
+       }
+
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+       if (WARN_ON(or == ARRAY_SIZE(ctrl->sorAssignListWithTag)))
+               return -EINVAL;
+
+       ior = nvkm_ior_find(disp, SOR, or);
+       if (WARN_ON(!ior))
+               return -EINVAL;
+
+       nvkm_outp_acquire_ior(outp, NVKM_OUTP_USER, ior);
+       return 0;
+}
+
+static int
+r535_disp_head_displayid(struct nvkm_disp *disp, int head, u32 *displayid)
+{
+       NV0073_CTRL_SYSTEM_GET_ACTIVE_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SYSTEM_GET_ACTIVE, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->head = head;
+
+       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       *displayid = ctrl->displayId;
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       return 0;
+}
+
+static struct nvkm_ior *
+r535_outp_inherit(struct nvkm_outp *outp)
+{
+       struct nvkm_disp *disp = outp->disp;
+       struct nvkm_head *head;
+       u32 displayid;
+       int ret;
+
+       list_for_each_entry(head, &disp->heads, head) {
+               ret = r535_disp_head_displayid(disp, head->id, &displayid);
+               if (WARN_ON(ret))
+                       return NULL;
+
+               if (displayid == BIT(outp->index)) {
+                       NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl;
+                       u32 id, proto;
+                       struct nvkm_ior *ior;
+
+                       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                                   NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO,
+                                                   sizeof(*ctrl));
+                       if (IS_ERR(ctrl))
+                               return NULL;
+
+                       ctrl->subDeviceInstance = 0;
+                       ctrl->displayId = displayid;
+
+                       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+                       if (IS_ERR(ctrl))
+                               return NULL;
+
+                       id = ctrl->index;
+                       proto = ctrl->protocol;
+                       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+                       ior = nvkm_ior_find(disp, SOR, id);
+                       if (WARN_ON(!ior))
+                               return NULL;
+
+                       switch (proto) {
+                       case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A:
+                               ior->arm.proto = TMDS;
+                               ior->arm.link = 1;
+                               break;
+                       case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B:
+                               ior->arm.proto = TMDS;
+                               ior->arm.link = 2;
+                               break;
+                       case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS:
+                               ior->arm.proto = TMDS;
+                               ior->arm.link = 3;
+                               break;
+                       case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A:
+                               ior->arm.proto = DP;
+                               ior->arm.link = 1;
+                               break;
+                       case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B:
+                               ior->arm.proto = DP;
+                               ior->arm.link = 2;
+                               break;
+                       default:
+                               WARN_ON(1);
+                               return NULL;
+                       }
+
+                       ior->arm.proto_evo = proto;
+                       ior->arm.head = BIT(head->id);
+                       disp->rm.assigned_sors |= BIT(ior->id);
+                       return ior;
+               }
+       }
+
+       return NULL;
+}
+
+static int
+r535_outp_dfp_get_info(struct nvkm_outp *outp)
+{
+       NV0073_CTRL_DFP_GET_INFO_PARAMS *ctrl;
+       struct nvkm_disp *disp = outp->disp;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DFP_GET_INFO, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->displayId = BIT(outp->index);
+
+       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       nvkm_debug(&disp->engine.subdev, "DFP %08x: flags:%08x flags2:%08x\n",
+                  ctrl->displayId, ctrl->flags, ctrl->flags2);
+
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       return 0;
+}
+
+static int
+r535_outp_detect(struct nvkm_outp *outp)
+{
+       NV0073_CTRL_SYSTEM_GET_CONNECT_STATE_PARAMS *ctrl;
+       struct nvkm_disp *disp = outp->disp;
+       int ret;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SYSTEM_GET_CONNECT_STATE, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayMask = BIT(outp->index);
+
+       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       if (ctrl->displayMask & BIT(outp->index)) {
+               ret = r535_outp_dfp_get_info(outp);
+               if (ret == 0)
+                       ret = 1;
+       } else {
+               ret = 0;
+       }
+
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       return ret;
+}
+
+static int
+r535_dp_mst_id_put(struct nvkm_outp *outp, u32 id)
+{
+       NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID_PARAMS *ctrl;
+       struct nvkm_disp *disp = outp->disp;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DP_TOPOLOGY_FREE_DISPLAYID, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayId = id;
+       return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r535_dp_mst_id_get(struct nvkm_outp *outp, u32 *pid)
+{
+       NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID_PARAMS *ctrl;
+       struct nvkm_disp *disp = outp->disp;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DP_TOPOLOGY_ALLOCATE_DISPLAYID,
+                                   sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayId = BIT(outp->index);
+       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       *pid = ctrl->displayIdAssigned;
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       return 0;
+}
+
+static int
+r535_dp_drive(struct nvkm_outp *outp, u8 lanes, u8 pe[4], u8 vs[4])
+{
+       NV0073_CTRL_DP_LANE_DATA_PARAMS *ctrl;
+       struct nvkm_disp *disp = outp->disp;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DP_SET_LANE_DATA, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->displayId = BIT(outp->index);
+       ctrl->numLanes = lanes;
+       for (int i = 0; i < lanes; i++)
+               ctrl->data[i] = NVVAL(NV0073_CTRL, DP_LANE_DATA,  PREEMPHASIS, pe[i]) |
+                               NVVAL(NV0073_CTRL, DP_LANE_DATA, DRIVECURRENT, vs[i]);
+
+       return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r535_dp_train_target(struct nvkm_outp *outp, u8 target, bool mst, u8 link_nr, u8 link_bw)
+{
+       struct nvkm_disp *disp = outp->disp;
+       NV0073_CTRL_DP_CTRL_PARAMS *ctrl;
+       int ret;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_CTRL, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayId = BIT(outp->index);
+       ctrl->cmd = NVDEF(NV0073_CTRL, DP_CMD, SET_LANE_COUNT, TRUE) |
+                   NVDEF(NV0073_CTRL, DP_CMD, SET_LINK_BW, TRUE) |
+                   NVDEF(NV0073_CTRL, DP_CMD, TRAIN_PHY_REPEATER, YES);
+       ctrl->data = NVVAL(NV0073_CTRL, DP_DATA, SET_LANE_COUNT, link_nr) |
+                    NVVAL(NV0073_CTRL, DP_DATA, SET_LINK_BW, link_bw) |
+                    NVVAL(NV0073_CTRL, DP_DATA, TARGET, target);
+
+       if (mst)
+               ctrl->cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_FORMAT_MODE, MULTI_STREAM);
+
+       if (outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP)
+               ctrl->cmd |= NVDEF(NV0073_CTRL, DP_CMD, SET_ENHANCED_FRAMING, TRUE);
+
+       if (target == 0 &&
+            (outp->dp.dpcd[DPCD_RC02] & 0x20) &&
+           !(outp->dp.dpcd[DPCD_RC03] & DPCD_RC03_TPS4_SUPPORTED))
+           ctrl->cmd |= NVDEF(NV0073_CTRL, DP_CMD, POST_LT_ADJ_REQ_GRANTED, YES);
+
+       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ret = ctrl->err ? -EIO : 0;
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       return ret;
+}
+
+static int
+r535_dp_train(struct nvkm_outp *outp, bool retrain)
+{
+       for (int target = outp->dp.lttprs; target >= 0; target--) {
+               int ret = r535_dp_train_target(outp, target, outp->dp.lt.mst,
+                                                            outp->dp.lt.nr,
+                                                            outp->dp.lt.bw);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int
+r535_dp_rates(struct nvkm_outp *outp)
+{
+       NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES_PARAMS *ctrl;
+       struct nvkm_disp *disp = outp->disp;
+
+       if (outp->conn->info.type != DCB_CONNECTOR_eDP ||
+           !outp->dp.rates || outp->dp.rate[0].dpcd < 0)
+               return 0;
+
+       if (WARN_ON(outp->dp.rates > ARRAY_SIZE(ctrl->linkRateTbl)))
+               return -EINVAL;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_DP_CONFIG_INDEXED_LINK_RATES, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->displayId = BIT(outp->index);
+       for (int i = 0; i < outp->dp.rates; i++)
+               ctrl->linkRateTbl[outp->dp.rate[i].dpcd] = outp->dp.rate[i].rate * 10 / 200;
+
+       return nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+}
+
+static int
+r535_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *psize)
+{
+       struct nvkm_disp *disp = outp->disp;
+       NV0073_CTRL_DP_AUXCH_CTRL_PARAMS *ctrl;
+       u8 size = *psize;
+       int ret;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom, NV0073_CTRL_CMD_DP_AUXCH_CTRL, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayId = BIT(outp->index);
+       ctrl->bAddrOnly = !size;
+       ctrl->cmd = type;
+       if (ctrl->bAddrOnly) {
+               ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD, REQ_TYPE, WRITE);
+               ctrl->cmd = NVDEF_SET(ctrl->cmd, NV0073_CTRL, DP_AUXCH_CMD,  I2C_MOT, FALSE);
+       }
+       ctrl->addr = addr;
+       ctrl->size = !ctrl->bAddrOnly ? (size - 1) : 0;
+       memcpy(ctrl->data, data, size);
+
+       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       memcpy(data, ctrl->data, size);
+       *psize = ctrl->size;
+       ret = ctrl->replyType;
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       return ret;
+}
+
+static int
+r535_dp_aux_pwr(struct nvkm_outp *outp, bool pu)
+{
+       return 0;
+}
+
+static void
+r535_dp_release(struct nvkm_outp *outp)
+{
+       if (!outp->dp.lt.bw) {
+               if (!WARN_ON(!outp->dp.rates))
+                       outp->dp.lt.bw = outp->dp.rate[0].rate / 27000;
+               else
+                       outp->dp.lt.bw = 0x06;
+       }
+
+       outp->dp.lt.nr = 0;
+
+       r535_dp_train_target(outp, 0, outp->dp.lt.mst, outp->dp.lt.nr, outp->dp.lt.bw);
+       r535_outp_release(outp);
+}
+
+static int
+r535_dp_acquire(struct nvkm_outp *outp, bool hda)
+{
+       int ret;
+
+       ret = r535_outp_acquire(outp, hda);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static const struct nvkm_outp_func
+r535_dp = {
+       .detect = r535_outp_detect,
+       .inherit = r535_outp_inherit,
+       .acquire = r535_dp_acquire,
+       .release = r535_dp_release,
+       .dp.aux_pwr = r535_dp_aux_pwr,
+       .dp.aux_xfer = r535_dp_aux_xfer,
+       .dp.mst_id_get = r535_dp_mst_id_get,
+       .dp.mst_id_put = r535_dp_mst_id_put,
+       .dp.rates = r535_dp_rates,
+       .dp.train = r535_dp_train,
+       .dp.drive = r535_dp_drive,
+};
+
+static int
+r535_tmds_edid_get(struct nvkm_outp *outp, u8 *data, u16 *psize)
+{
+       NV0073_CTRL_SPECIFIC_GET_EDID_V2_PARAMS *ctrl;
+       struct nvkm_disp *disp = outp->disp;
+       int ret = -E2BIG;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_GET_EDID_V2, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayId = BIT(outp->index);
+
+       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       if (ctrl->bufferSize <= *psize) {
+               memcpy(data, ctrl->edidBuffer, ctrl->bufferSize);
+               *psize = ctrl->bufferSize;
+               ret = 0;
+       }
+
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       return ret;
+}
+
+static const struct nvkm_outp_func
+r535_tmds = {
+       .detect = r535_outp_detect,
+       .inherit = r535_outp_inherit,
+       .acquire = r535_outp_acquire,
+       .release = r535_outp_release,
+       .edid_get = r535_tmds_edid_get,
+};
+
+static int
+r535_outp_new(struct nvkm_disp *disp, u32 id)
+{
+       NV0073_CTRL_SPECIFIC_OR_GET_INFO_PARAMS *ctrl;
+       enum nvkm_ior_proto proto;
+       struct dcb_output dcbE = {};
+       struct nvkm_conn *conn;
+       struct nvkm_outp *outp;
+       u8 locn, link = 0;
+       int ret;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                   NV0073_CTRL_CMD_SPECIFIC_OR_GET_INFO, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->subDeviceInstance = 0;
+       ctrl->displayId = BIT(id);
+
+       ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       switch (ctrl->type) {
+       case NV0073_CTRL_SPECIFIC_OR_TYPE_NONE:
+               return 0;
+       case NV0073_CTRL_SPECIFIC_OR_TYPE_SOR:
+               switch (ctrl->protocol) {
+               case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_A:
+                       proto = TMDS;
+                       link = 1;
+                       break;
+               case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_SINGLE_TMDS_B:
+                       proto = TMDS;
+                       link = 2;
+                       break;
+               case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DUAL_TMDS:
+                       proto = TMDS;
+                       link = 3;
+                       break;
+               case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_A:
+                       proto = DP;
+                       link = 1;
+                       break;
+               case NV0073_CTRL_SPECIFIC_OR_PROTOCOL_SOR_DP_B:
+                       proto = DP;
+                       link = 2;
+                       break;
+               default:
+                       WARN_ON(1);
+                       return -EINVAL;
+               }
+
+               break;
+       default:
+               WARN_ON(1);
+               return -EINVAL;
+       }
+
+       locn = ctrl->location;
+       nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+       conn = r535_conn_new(disp, id);
+       if (IS_ERR(conn))
+               return PTR_ERR(conn);
+
+       switch (proto) {
+       case TMDS: dcbE.type = DCB_OUTPUT_TMDS; break;
+       case   DP: dcbE.type = DCB_OUTPUT_DP; break;
+       default:
+               WARN_ON(1);
+               return -EINVAL;
+       }
+
+       dcbE.location = locn;
+       dcbE.connector = conn->index;
+       dcbE.heads = disp->head.mask;
+       dcbE.i2c_index = 0xff;
+       dcbE.link = dcbE.sorconf.link = link;
+
+       if (proto == TMDS) {
+               ret = nvkm_outp_new_(&r535_tmds, disp, id, &dcbE, &outp);
+               if (ret)
+                       return ret;
+       } else {
+               NV0073_CTRL_CMD_DP_GET_CAPS_PARAMS *ctrl;
+               bool mst, wm;
+
+               ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                           NV0073_CTRL_CMD_DP_GET_CAPS, sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               ctrl->sorIndex = ~0;
+
+               ctrl = nvkm_gsp_rm_ctrl_push(&disp->rm.objcom, ctrl, sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               switch (NVVAL_GET(ctrl->maxLinkRate, NV0073_CTRL_CMD, DP_GET_CAPS, MAX_LINK_RATE)) {
+               case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_1_62:
+                       dcbE.dpconf.link_bw = 0x06;
+                       break;
+               case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_2_70:
+                       dcbE.dpconf.link_bw = 0x0a;
+                       break;
+               case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_5_40:
+                       dcbE.dpconf.link_bw = 0x14;
+                       break;
+               case NV0073_CTRL_CMD_DP_GET_CAPS_MAX_LINK_RATE_8_10:
+                       dcbE.dpconf.link_bw = 0x1e;
+                       break;
+               default:
+                       dcbE.dpconf.link_bw = 0x00;
+                       break;
+               }
+
+               mst = ctrl->bIsMultistreamSupported;
+               wm = ctrl->bHasIncreasedWatermarkLimits;
+               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+               if (WARN_ON(!dcbE.dpconf.link_bw))
+                       return -EINVAL;
+
+               dcbE.dpconf.link_nr = 4;
+
+               ret = nvkm_outp_new_(&r535_dp, disp, id, &dcbE, &outp);
+               if (ret)
+                       return ret;
+
+               outp->dp.mst = mst;
+               outp->dp.increased_wm = wm;
+       }
+
+
+       outp->conn = conn;
+       list_add_tail(&outp->head, &disp->outps);
+       return 0;
+}
+
+static void
+r535_disp_irq(struct nvkm_gsp_event *event, void *repv, u32 repc)
+{
+       struct nvkm_disp *disp = container_of(event, typeof(*disp), rm.irq);
+       Nv2080DpIrqNotification *irq = repv;
+
+       if (WARN_ON(repc < sizeof(*irq)))
+               return;
+
+       nvkm_debug(&disp->engine.subdev, "event: dp irq displayId %08x\n", irq->displayId);
+
+       if (irq->displayId)
+               nvkm_event_ntfy(&disp->rm.event, fls(irq->displayId) - 1, NVKM_DPYID_IRQ);
+}
+
+static void
+r535_disp_hpd(struct nvkm_gsp_event *event, void *repv, u32 repc)
+{
+       struct nvkm_disp *disp = container_of(event, typeof(*disp), rm.hpd);
+       Nv2080HotplugNotification *hpd = repv;
+
+       if (WARN_ON(repc < sizeof(*hpd)))
+               return;
+
+       nvkm_debug(&disp->engine.subdev, "event: hpd plug %08x unplug %08x\n",
+                  hpd->plugDisplayMask, hpd->unplugDisplayMask);
+
+       for (int i = 0; i < 31; i++) {
+               u32 mask = 0;
+
+               if (hpd->plugDisplayMask & BIT(i))
+                       mask |= NVKM_DPYID_PLUG;
+               if (hpd->unplugDisplayMask & BIT(i))
+                       mask |= NVKM_DPYID_UNPLUG;
+
+               if (mask)
+                       nvkm_event_ntfy(&disp->rm.event, i, mask);
+       }
+}
+
+static const struct nvkm_event_func
+r535_disp_event = {
+};
+
+static void
+r535_disp_intr_head_timing(struct nvkm_disp *disp, int head)
+{
+       struct nvkm_subdev *subdev = &disp->engine.subdev;
+       struct nvkm_device *device = subdev->device;
+       u32 stat = nvkm_rd32(device, 0x611c00 + (head * 0x04));
+
+       if (stat & 0x00000002) {
+               nvkm_disp_vblank(disp, head);
+
+               nvkm_wr32(device, 0x611800 + (head * 0x04), 0x00000002);
+       }
+}
+
+static irqreturn_t
+r535_disp_intr(struct nvkm_inth *inth)
+{
+       struct nvkm_disp *disp = container_of(inth, typeof(*disp), engine.subdev.inth);
+       struct nvkm_subdev *subdev = &disp->engine.subdev;
+       struct nvkm_device *device = subdev->device;
+       unsigned long mask = nvkm_rd32(device, 0x611ec0) & 0x000000ff;
+       int head;
+
+       for_each_set_bit(head, &mask, 8)
+               r535_disp_intr_head_timing(disp, head);
+
+       return IRQ_HANDLED;
+}
+
+static void
+r535_disp_fini(struct nvkm_disp *disp, bool suspend)
+{
+       if (!disp->engine.subdev.use.enabled)
+               return;
+
+       nvkm_gsp_rm_free(&disp->rm.object);
+
+       if (!suspend) {
+               nvkm_gsp_event_dtor(&disp->rm.irq);
+               nvkm_gsp_event_dtor(&disp->rm.hpd);
+               nvkm_event_fini(&disp->rm.event);
+
+               nvkm_gsp_rm_free(&disp->rm.objcom);
+               nvkm_gsp_device_dtor(&disp->rm.device);
+               nvkm_gsp_client_dtor(&disp->rm.client);
+       }
+}
+
+static int
+r535_disp_init(struct nvkm_disp *disp)
+{
+       int ret;
+
+       ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, disp->func->root.oclass << 16,
+                               disp->func->root.oclass, 0, &disp->rm.object);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int
+r535_disp_oneinit(struct nvkm_disp *disp)
+{
+       struct nvkm_device *device = disp->engine.subdev.device;
+       struct nvkm_gsp *gsp = device->gsp;
+       NV2080_CTRL_INTERNAL_DISPLAY_WRITE_INST_MEM_PARAMS *ctrl;
+       int ret, i;
+
+       /* RAMIN. */
+       ret = nvkm_gpuobj_new(device, 0x10000, 0x10000, false, NULL, &disp->inst);
+       if (ret)
+               return ret;
+
+       if (WARN_ON(nvkm_memory_target(disp->inst->memory) != NVKM_MEM_TARGET_VRAM))
+               return -EINVAL;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+                                   NV2080_CTRL_CMD_INTERNAL_DISPLAY_WRITE_INST_MEM,
+                                   sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->instMemPhysAddr = nvkm_memory_addr(disp->inst->memory);
+       ctrl->instMemSize = nvkm_memory_size(disp->inst->memory);
+       ctrl->instMemAddrSpace = ADDR_FBMEM;
+       ctrl->instMemCpuCacheAttr = NV_MEMORY_WRITECOMBINED;
+
+       ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+       if (ret)
+               return ret;
+
+       /* OBJs. */
+       ret = nvkm_gsp_client_device_ctor(gsp, &disp->rm.client, &disp->rm.device);
+       if (ret)
+               return ret;
+
+       ret = nvkm_gsp_rm_alloc(&disp->rm.device.object, 0x00730000, NV04_DISPLAY_COMMON, 0,
+                               &disp->rm.objcom);
+       if (ret)
+               return ret;
+
+       {
+               NV2080_CTRL_INTERNAL_DISPLAY_GET_STATIC_INFO_PARAMS *ctrl;
+
+               ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+                                          NV2080_CTRL_CMD_INTERNAL_DISPLAY_GET_STATIC_INFO,
+                                          sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               disp->wndw.mask = ctrl->windowPresentMask;
+               disp->wndw.nr = fls(disp->wndw.mask);
+               nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+       }
+
+       /* */
+       {
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+               NV2080_CTRL_INTERNAL_INIT_BRIGHTC_STATE_LOAD_PARAMS *ctrl;
+               struct nvkm_gsp_object *subdevice = &disp->rm.client.gsp->internal.device.subdevice;
+
+               ctrl = nvkm_gsp_rm_ctrl_get(subdevice,
+                                           NV2080_CTRL_CMD_INTERNAL_INIT_BRIGHTC_STATE_LOAD,
+                                           sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               ctrl->status = 0x56; /* NV_ERR_NOT_SUPPORTED */
+
+               {
+                       const guid_t NBCI_DSM_GUID =
+                               GUID_INIT(0xD4A50B75, 0x65C7, 0x46F7,
+                                         0xBF, 0xB7, 0x41, 0x51, 0x4C, 0xEA, 0x02, 0x44);
+                       u64 NBCI_DSM_REV = 0x00000102;
+                       const guid_t NVHG_DSM_GUID =
+                               GUID_INIT(0x9D95A0A0, 0x0060, 0x4D48,
+                                         0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4);
+                       u64 NVHG_DSM_REV = 0x00000102;
+                       acpi_handle handle = ACPI_HANDLE(device->dev);
+
+                       if (handle && acpi_has_method(handle, "_DSM")) {
+                               bool nbci = acpi_check_dsm(handle, &NBCI_DSM_GUID, NBCI_DSM_REV,
+                                                          1ULL << 0x00000014);
+                               bool nvhg = acpi_check_dsm(handle, &NVHG_DSM_GUID, NVHG_DSM_REV,
+                                                          1ULL << 0x00000014);
+
+                               printk(KERN_ERR "bl: nbci:%d nvhg:%d\n", nbci, nvhg);
+
+                               if (nbci || nvhg) {
+                                       union acpi_object argv4 = {
+                                               .buffer.type    = ACPI_TYPE_BUFFER,
+                                               .buffer.length  = sizeof(ctrl->backLightData),
+                                               .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL),
+                                       }, *obj;
+
+                                       obj = acpi_evaluate_dsm(handle, nbci ? &NBCI_DSM_GUID : &NVHG_DSM_GUID,
+                                                               0x00000102, 0x14, &argv4);
+                                       if (!obj) {
+                                               acpi_handle_info(handle, "failed to evaluate _DSM\n");
+                                       } else {
+                                               printk(KERN_ERR "bl: obj type %d\n", obj->type);
+                                               printk(KERN_ERR "bl: obj len %d\n", obj->package.count);
+
+                                               for (int i = 0; i < obj->package.count; i++) {
+                                                       union acpi_object *elt = &obj->package.elements[i];
+                                                       u32 size;
+
+                                                       if (elt->integer.value & ~0xffffffffULL)
+                                                               size = 8;
+                                                       else
+                                                               size = 4;
+
+                                                       printk(KERN_ERR "elt %03d: type %d size %d\n", i, elt->type, size);
+                                                       memcpy(&ctrl->backLightData[ctrl->backLightDataSize], &elt->integer.value, size);
+                                                       ctrl->backLightDataSize += size;
+                                               }
+
+                                               printk(KERN_ERR "bl: data size %d\n", ctrl->backLightDataSize);
+                                               ctrl->status = 0;
+                                               ACPI_FREE(obj);
+                                       }
+
+                                       kfree(argv4.buffer.pointer);
+                               }
+                       }
+               }
+
+               ret = nvkm_gsp_rm_ctrl_wr(subdevice, ctrl);
+               if (ret)
+                       return ret;
+#endif
+       }
+
+       /* */
+       {
+               NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT_PARAMS *ctrl;
+
+               ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
+                                           NV0073_CTRL_CMD_DP_SET_MANUAL_DISPLAYPORT,
+                                           sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               ret = nvkm_gsp_rm_ctrl_wr(&disp->rm.objcom, ctrl);
+               if (ret)
+                       return ret;
+       }
+
+       /* */
+       {
+               NV0073_CTRL_SYSTEM_GET_NUM_HEADS_PARAMS *ctrl;
+
+               ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+                                          NV0073_CTRL_CMD_SYSTEM_GET_NUM_HEADS, sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               disp->head.nr = ctrl->numHeads;
+               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+       }
+
+       /* */
+       {
+               NV0073_CTRL_SPECIFIC_GET_ALL_HEAD_MASK_PARAMS *ctrl;
+
+               ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+                                          NV0073_CTRL_CMD_SPECIFIC_GET_ALL_HEAD_MASK,
+                                          sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               disp->head.mask = ctrl->headMask;
+               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+               for_each_set_bit(i, &disp->head.mask, disp->head.nr) {
+                       ret = nvkm_head_new_(&r535_head, disp, i);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       disp->sor.nr = disp->func->sor.cnt(disp, &disp->sor.mask);
+       nvkm_debug(&disp->engine.subdev, "   SOR(s): %d (%02lx)\n", disp->sor.nr, disp->sor.mask);
+       for_each_set_bit(i, &disp->sor.mask, disp->sor.nr) {
+               ret = disp->func->sor.new(disp, i);
+               if (ret)
+                       return ret;
+       }
+
+       /* */
+       {
+               NV0073_CTRL_SYSTEM_GET_SUPPORTED_PARAMS *ctrl;
+               unsigned long mask;
+               int i;
+
+               ctrl = nvkm_gsp_rm_ctrl_rd(&disp->rm.objcom,
+                                          NV0073_CTRL_CMD_SYSTEM_GET_SUPPORTED, sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               mask = ctrl->displayMask;
+               nvkm_gsp_rm_ctrl_done(&disp->rm.objcom, ctrl);
+
+               for_each_set_bit(i, &mask, 32) {
+                       ret = r535_outp_new(disp, i);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       ret = nvkm_event_init(&r535_disp_event, &gsp->subdev, 3, 32, &disp->rm.event);
+       if (WARN_ON(ret))
+               return ret;
+
+       ret = nvkm_gsp_device_event_ctor(&disp->rm.device, 0x007e0000, NV2080_NOTIFIERS_HOTPLUG,
+                                        r535_disp_hpd, &disp->rm.hpd);
+       if (ret)
+               return ret;
+
+       ret = nvkm_gsp_device_event_ctor(&disp->rm.device, 0x007e0001, NV2080_NOTIFIERS_DP_IRQ,
+                                        r535_disp_irq, &disp->rm.irq);
+       if (ret)
+               return ret;
+
+       /* RAMHT. */
+       ret = nvkm_ramht_new(device, disp->func->ramht_size ? disp->func->ramht_size :
+                            0x1000, 0, disp->inst, &disp->ramht);
+       if (ret)
+               return ret;
+
+       ret = nvkm_gsp_intr_stall(gsp, disp->engine.subdev.type, disp->engine.subdev.inst);
+       if (ret < 0)
+               return ret;
+
+       ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &disp->engine.subdev,
+                           r535_disp_intr, &disp->engine.subdev.inth);
+       if (ret)
+               return ret;
+
+       nvkm_inth_allow(&disp->engine.subdev.inth);
+       return 0;
+}
+
+static void
+r535_disp_dtor(struct nvkm_disp *disp)
+{
+       kfree(disp->func);
+}
+
+int
+r535_disp_new(const struct nvkm_disp_func *hw, struct nvkm_device *device,
+             enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp)
+{
+       struct nvkm_disp_func *rm;
+       int ret;
+
+       if (!(rm = kzalloc(sizeof(*rm) + 6 * sizeof(rm->user[0]), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_disp_dtor;
+       rm->oneinit = r535_disp_oneinit;
+       rm->init = r535_disp_init;
+       rm->fini = r535_disp_fini;
+       rm->uevent = hw->uevent;
+       rm->sor.cnt = r535_sor_cnt;
+       rm->sor.new = r535_sor_new;
+       rm->ramht_size = hw->ramht_size;
+
+       rm->root = hw->root;
+
+       for (int i = 0; hw->user[i].ctor; i++) {
+               switch (hw->user[i].base.oclass & 0xff) {
+               case 0x73: rm->user[i] = hw->user[i]; break;
+               case 0x7d: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_core; break;
+               case 0x7e: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wndw; break;
+               case 0x7b: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_wimm; break;
+               case 0x7a: rm->user[i] = hw->user[i]; rm->user[i].chan = &r535_curs; break;
+               default:
+                       WARN_ON(1);
+                       continue;
+               }
+       }
+
+       ret = nvkm_disp_new_(rm, device, type, inst, pdisp);
+       if (ret)
+               kfree(rm);
+
+       mutex_init(&(*pdisp)->super.mutex); //XXX
+       return ret;
+}
index 19f5d3a6035eb3cc900eb58c7fb33a7c571b2ce2..dcb9f8ba374ca6ba80236b33c2affdd28cabe28e 100644 (file)
@@ -25,6 +25,7 @@
 #include "ior.h"
 
 #include <core/gpuobj.h>
+#include <subdev/gsp.h>
 #include <subdev/timer.h>
 
 #include <nvif/class.h>
@@ -233,5 +234,8 @@ int
 tu102_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
               struct nvkm_disp **pdisp)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_disp_new(&tu102_disp, device, type, inst, pdisp);
+
        return nvkm_disp_new_(&tu102_disp, device, type, inst, pdisp);
 }
index 104f6ee9ae6d50c17de4f8a237972341699ffd03..2dab6612c4fc84651e8b8d7fa2fd7056c057399a 100644 (file)
 
 #include <nvif/if0011.h>
 
+static int
+nvkm_uconn_uevent_gsp(struct nvkm_object *object, u64 token, u32 bits)
+{
+       union nvif_conn_event_args args;
+
+       args.v0.version = 0;
+       args.v0.types = 0;
+       if (bits & NVKM_DPYID_PLUG)
+               args.v0.types |= NVIF_CONN_EVENT_V0_PLUG;
+       if (bits & NVKM_DPYID_UNPLUG)
+               args.v0.types |= NVIF_CONN_EVENT_V0_UNPLUG;
+       if (bits & NVKM_DPYID_IRQ)
+               args.v0.types |= NVIF_CONN_EVENT_V0_IRQ;
+
+       return object->client->event(token, &args, sizeof(args.v0));
+}
+
 static int
 nvkm_uconn_uevent_aux(struct nvkm_object *object, u64 token, u32 bits)
 {
@@ -78,13 +95,14 @@ static int
 nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_uevent *uevent)
 {
        struct nvkm_conn *conn = nvkm_uconn(object);
-       struct nvkm_device *device = conn->disp->engine.subdev.device;
+       struct nvkm_disp *disp = conn->disp;
+       struct nvkm_device *device = disp->engine.subdev.device;
        struct nvkm_outp *outp;
        union nvif_conn_event_args *args = argv;
        u64 bits = 0;
 
        if (!uevent) {
-               if (conn->info.hpd == DCB_GPIO_UNUSED)
+               if (!disp->rm.client.gsp && conn->info.hpd == DCB_GPIO_UNUSED)
                        return -ENOSYS;
                return 0;
        }
@@ -100,6 +118,15 @@ nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_
        if (&outp->head == &conn->disp->outps)
                return -EINVAL;
 
+       if (disp->rm.client.gsp) {
+               if (args->v0.types & NVIF_CONN_EVENT_V0_PLUG  ) bits |= NVKM_DPYID_PLUG;
+               if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_DPYID_UNPLUG;
+               if (args->v0.types & NVIF_CONN_EVENT_V0_IRQ   ) bits |= NVKM_DPYID_IRQ;
+
+               return nvkm_uevent_add(uevent, &disp->rm.event, outp->index, bits,
+                                      nvkm_uconn_uevent_gsp);
+       }
+
        if (outp->dp.aux && !outp->info.location) {
                if (args->v0.types & NVIF_CONN_EVENT_V0_PLUG  ) bits |= NVKM_I2C_PLUG;
                if (args->v0.types & NVIF_CONN_EVENT_V0_UNPLUG) bits |= NVKM_I2C_UNPLUG;
index d619b40a42c36f0a1b0986921de134732c5dca30..fd5ee9f0af360c93b117ef1c4affe7ac8b55ba48 100644 (file)
@@ -318,14 +318,14 @@ nvkm_falcon_init(struct nvkm_engine *engine)
 }
 
 static void *
-nvkm_falcon_dtor(struct nvkm_engine *engine)
+nvkm_falcon_dtor_engine(struct nvkm_engine *engine)
 {
        return nvkm_falcon(engine);
 }
 
 static const struct nvkm_engine_func
 nvkm_falcon = {
-       .dtor = nvkm_falcon_dtor,
+       .dtor = nvkm_falcon_dtor_engine,
        .oneinit = nvkm_falcon_oneinit,
        .init = nvkm_falcon_init,
        .fini = nvkm_falcon_fini,
index 5a074b9970abe6612e0ee93ddfc32f1939c9701d..aff92848abfee8b346479d896dfcb10ffdc9fbff 100644 (file)
@@ -26,5 +26,7 @@ nvkm-y += nvkm/engine/fifo/tu102.o
 nvkm-y += nvkm/engine/fifo/ga100.o
 nvkm-y += nvkm/engine/fifo/ga102.o
 
+nvkm-y += nvkm/engine/fifo/r535.o
+
 nvkm-y += nvkm/engine/fifo/ucgrp.o
 nvkm-y += nvkm/engine/fifo/uchan.o
index 5db37247dc29b2f1f9062981b2bf535fe2632c69..22443fe4a39ff6e036279dde140932ab26f20a6c 100644 (file)
@@ -210,6 +210,8 @@ nvkm_fifo_info(struct nvkm_engine *engine, u64 mthd, u64 *data)
                                CASE(SEC2  );
                                CASE(NVDEC );
                                CASE(NVENC );
+                               CASE(NVJPG );
+                               CASE(OFA   );
                                default:
                                        WARN_ON(1);
                                        break;
@@ -347,8 +349,14 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
        nvkm_chid_unref(&fifo->cgid);
        nvkm_chid_unref(&fifo->chid);
 
+       mutex_destroy(&fifo->userd.mutex);
+
        nvkm_event_fini(&fifo->nonstall.event);
        mutex_destroy(&fifo->mutex);
+
+       if (fifo->func->dtor)
+               fifo->func->dtor(fifo);
+
        return fifo;
 }
 
@@ -383,5 +391,8 @@ nvkm_fifo_new_(const struct nvkm_fifo_func *func, struct nvkm_device *device,
        spin_lock_init(&fifo->lock);
        mutex_init(&fifo->mutex);
 
+       INIT_LIST_HEAD(&fifo->userd.list);
+       mutex_init(&fifo->userd.mutex);
+
        return nvkm_engine_ctor(&nvkm_fifo, device, type, inst, true, &fifo->engine);
 }
index ea53fb3d5d06f1b3fc0f862eb133d70c575dcbe3..814db9daa194c4ece28cd3d7188f917bea7fdfb2 100644 (file)
@@ -156,6 +156,9 @@ nvkm_cgrp_vctx_get(struct nvkm_cgrp *cgrp, struct nvkm_engn *engn, struct nvkm_c
                atomic_inc(&vctx->vmm->engref[engn->engine->subdev.type]);
 
        /* Allocate the HW structures. */
+       if (engn->func->ctor2) {
+               ret = engn->func->ctor2(engn, vctx, chan);
+       } else
        if (engn->func->bind) {
                ret = nvkm_object_bind(vctx->ectx->object, NULL, 0, &vctx->inst);
                if (ret == 0 && engn->func->ctor)
index b7c9d6115bce37a6f55ca55ad4bd44467e0626a8..87a62d4ff4bda45ee532b0598bbe99b247566879 100644 (file)
@@ -275,13 +275,17 @@ nvkm_chan_del(struct nvkm_chan **pchan)
        nvkm_gpuobj_del(&chan->cache);
        nvkm_gpuobj_del(&chan->ramfc);
 
-       nvkm_memory_unref(&chan->userd.mem);
-
        if (chan->cgrp) {
-               nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock);
+               if (!chan->func->id_put)
+                       nvkm_chid_put(chan->cgrp->runl->chid, chan->id, &chan->cgrp->lock);
+               else
+                       chan->func->id_put(chan);
+
                nvkm_cgrp_unref(&chan->cgrp);
        }
 
+       nvkm_memory_unref(&chan->userd.mem);
+
        if (chan->vmm) {
                nvkm_vmm_part(chan->vmm, chan->inst->memory);
                nvkm_vmm_unref(&chan->vmm);
@@ -438,7 +442,32 @@ nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int ru
        }
 
        /* Allocate channel ID. */
-       chan->id = nvkm_chid_get(runl->chid, chan);
+       if (!chan->func->id_get) {
+               chan->id = nvkm_chid_get(runl->chid, chan);
+               if (chan->id >= 0) {
+                       if (func->userd->bar < 0) {
+                               if (ouserd + chan->func->userd->size >=
+                                       nvkm_memory_size(userd)) {
+                                       RUNL_DEBUG(runl, "ouserd %llx", ouserd);
+                                       return -EINVAL;
+                               }
+
+                               ret = nvkm_memory_kmap(userd, &chan->userd.mem);
+                               if (ret) {
+                                       RUNL_DEBUG(runl, "userd %d", ret);
+                                       return ret;
+                               }
+
+                               chan->userd.base = ouserd;
+                       } else {
+                               chan->userd.mem = nvkm_memory_ref(fifo->userd.mem);
+                               chan->userd.base = chan->id * chan->func->userd->size;
+                       }
+               }
+       } else {
+               chan->id = chan->func->id_get(chan, userd, ouserd);
+       }
+
        if (chan->id < 0) {
                RUNL_ERROR(runl, "!chids");
                return -ENOSPC;
@@ -448,24 +477,6 @@ nvkm_chan_new_(const struct nvkm_chan_func *func, struct nvkm_runl *runl, int ru
                cgrp->id = chan->id;
 
        /* Initialise USERD. */
-       if (func->userd->bar < 0) {
-               if (ouserd + chan->func->userd->size >= nvkm_memory_size(userd)) {
-                       RUNL_DEBUG(runl, "ouserd %llx", ouserd);
-                       return -EINVAL;
-               }
-
-               ret = nvkm_memory_kmap(userd, &chan->userd.mem);
-               if (ret) {
-                       RUNL_DEBUG(runl, "userd %d", ret);
-                       return ret;
-               }
-
-               chan->userd.base = ouserd;
-       } else {
-               chan->userd.mem = nvkm_memory_ref(fifo->userd.mem);
-               chan->userd.base = chan->id * chan->func->userd->size;
-       }
-
        if (chan->func->userd->clear)
                chan->func->userd->clear(chan);
 
index 85b94f699128d3db025ff2042b45d1293bd55a92..013682a709d56e3901a09a4f89f841e8d84145d8 100644 (file)
@@ -17,6 +17,9 @@ struct nvkm_cctx {
 };
 
 struct nvkm_chan_func {
+       int (*id_get)(struct nvkm_chan *, struct nvkm_memory *userd, u64 ouserd);
+       void (*id_put)(struct nvkm_chan *);
+
        const struct nvkm_chan_func_inst {
                u32 size;
                bool zero;
index c56d2a839efbaffdb6c0a01843973863b56c30de..c8ce7ff187135b0992b52a3c62d8a48593b2e625 100644 (file)
@@ -27,6 +27,7 @@
 #include "runq.h"
 
 #include <core/gpuobj.h>
+#include <subdev/gsp.h>
 #include <subdev/top.h>
 #include <subdev/vfn.h>
 
@@ -607,5 +608,8 @@ int
 ga100_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
               struct nvkm_fifo **pfifo)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_fifo_new(&ga100_fifo, device, type, inst, pfifo);
+
        return nvkm_fifo_new_(&ga100_fifo, device, type, inst, pfifo);
 }
index 2cdf5da339b60bc609a8ebd1d45160dd2722430b..755235f55b3aca564d7d182b152251f53d131048 100644 (file)
@@ -21,6 +21,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 #include <nvif/class.h>
 
 static const struct nvkm_fifo_func
@@ -34,12 +36,15 @@ ga102_fifo = {
        .engn = &ga100_engn,
        .engn_ce = &ga100_engn_ce,
        .cgrp = {{ 0, 0, KEPLER_CHANNEL_GROUP_A  }, &ga100_cgrp, .force = true },
-       .chan = {{ 0, 0, AMPERE_CHANNEL_GPFIFO_B }, &ga100_chan },
+       .chan = {{ 0, 0, AMPERE_CHANNEL_GPFIFO_A }, &ga100_chan },
 };
 
 int
 ga102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
               struct nvkm_fifo **pfifo)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_fifo_new(&ga102_fifo, device, type, inst, pfifo);
+
        return nvkm_fifo_new_(&ga102_fifo, device, type, inst, pfifo);
 }
index 4d448be19224a8a0f86e7bfe08eaa66b3a70e117..a0f3277605a5cf4e21d6501e23c07b4704c4be07 100644 (file)
@@ -13,6 +13,8 @@ struct nvkm_runq;
 struct nvkm_vctx;
 
 struct nvkm_fifo_func {
+       void (*dtor)(struct nvkm_fifo *);
+
        int (*chid_nr)(struct nvkm_fifo *);
        int (*chid_ctor)(struct nvkm_fifo *, int nr);
        int (*runq_nr)(struct nvkm_fifo *);
@@ -58,6 +60,8 @@ struct nvkm_fifo_func {
        } chan;
 };
 
+int r535_fifo_new(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+                 struct nvkm_fifo **);
 int nvkm_fifo_new_(const struct nvkm_fifo_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
                   struct nvkm_fifo **);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/r535.c
new file mode 100644 (file)
index 0000000..3adbb05
--- /dev/null
@@ -0,0 +1,664 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "cgrp.h"
+#include "chan.h"
+#include "chid.h"
+#include "runl.h"
+
+#include <core/gpuobj.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu.h>
+#include <subdev/vfn.h>
+#include <engine/gr.h>
+
+#include <nvhw/drf.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080_notification.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080ce.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080fifo.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrla06f/ctrla06fgpfifo.h>
+#include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h>
+#include <nvrm/535.113.01/nvidia/generated/g_kernel_fifo_nvoc.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gpu_engine_type.h>
+
+static u32
+r535_chan_doorbell_handle(struct nvkm_chan *chan)
+{
+       return (chan->cgrp->runl->id << 16) | chan->id;
+}
+
+static void
+r535_chan_stop(struct nvkm_chan *chan)
+{
+}
+
+static void
+r535_chan_start(struct nvkm_chan *chan)
+{
+}
+
+static void
+r535_chan_ramfc_clear(struct nvkm_chan *chan)
+{
+       struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+
+       nvkm_gsp_rm_free(&chan->rm.object);
+
+       dma_free_coherent(fifo->engine.subdev.device->dev, fifo->rm.mthdbuf_size,
+                         chan->rm.mthdbuf.ptr, chan->rm.mthdbuf.addr);
+
+       nvkm_cgrp_vctx_put(chan->cgrp, &chan->rm.grctx);
+}
+
+#define CHID_PER_USERD 8
+
+static int
+r535_chan_ramfc_write(struct nvkm_chan *chan, u64 offset, u64 length, u32 devm, bool priv)
+{
+       struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+       struct nvkm_engn *engn;
+       struct nvkm_device *device = fifo->engine.subdev.device;
+       NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
+       const int userd_p = chan->id / CHID_PER_USERD;
+       const int userd_i = chan->id % CHID_PER_USERD;
+       u32 eT = ~0;
+       int ret;
+
+       if (unlikely(device->gr && !device->gr->engine.subdev.oneinit)) {
+               ret = nvkm_subdev_oneinit(&device->gr->engine.subdev);
+               if (ret)
+                       return ret;
+       }
+
+       nvkm_runl_foreach_engn(engn, chan->cgrp->runl) {
+               eT = engn->id;
+               break;
+       }
+
+       if (WARN_ON(eT == ~0))
+               return -EINVAL;
+
+       chan->rm.mthdbuf.ptr = dma_alloc_coherent(fifo->engine.subdev.device->dev,
+                                                 fifo->rm.mthdbuf_size,
+                                                 &chan->rm.mthdbuf.addr, GFP_KERNEL);
+       if (!chan->rm.mthdbuf.ptr)
+               return -ENOMEM;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->vmm->rm.device.object, 0xf1f00000 | chan->id,
+                                    fifo->func->chan.user.oclass, sizeof(*args),
+                                    &chan->rm.object);
+       if (WARN_ON(IS_ERR(args)))
+               return PTR_ERR(args);
+
+       args->gpFifoOffset = offset;
+       args->gpFifoEntries = length / 8;
+
+       args->flags  = NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL);
+       args->flags |= NVDEF(NVOS04, FLAGS, VPR, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE);
+       args->flags |= NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, chan->runq);
+       if (!priv)
+               args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, FALSE);
+       else
+               args->flags |= NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE);
+       args->flags |= NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE);
+
+       args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, userd_i);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE);
+       args->flags |= NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, userd_p);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE);
+
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT);
+       args->flags |= NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE);
+       args->flags |= NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
+
+       args->hVASpace = chan->vmm->rm.object.handle;
+       args->engineType = eT;
+
+       args->instanceMem.base = chan->inst->addr;
+       args->instanceMem.size = chan->inst->size;
+       args->instanceMem.addressSpace = 2;
+       args->instanceMem.cacheAttrib = 1;
+
+       args->userdMem.base = nvkm_memory_addr(chan->userd.mem) + chan->userd.base;
+       args->userdMem.size = fifo->func->chan.func->userd->size;
+       args->userdMem.addressSpace = 2;
+       args->userdMem.cacheAttrib = 1;
+
+       args->ramfcMem.base = chan->inst->addr + 0;
+       args->ramfcMem.size = 0x200;
+       args->ramfcMem.addressSpace = 2;
+       args->ramfcMem.cacheAttrib = 1;
+
+       args->mthdbufMem.base = chan->rm.mthdbuf.addr;
+       args->mthdbufMem.size = fifo->rm.mthdbuf_size;
+       args->mthdbufMem.addressSpace = 1;
+       args->mthdbufMem.cacheAttrib = 0;
+
+       if (!priv)
+               args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, USER);
+       else
+               args->internalFlags = NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN);
+       args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE);
+       args->internalFlags |= NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
+
+       ret = nvkm_gsp_rm_alloc_wr(&chan->rm.object, args);
+       if (ret)
+               return ret;
+
+       if (1) {
+               NVA06F_CTRL_GPFIFO_SCHEDULE_PARAMS *ctrl;
+
+               if (1) {
+                       NVA06F_CTRL_BIND_PARAMS *ctrl;
+
+                       ctrl = nvkm_gsp_rm_ctrl_get(&chan->rm.object,
+                                                   NVA06F_CTRL_CMD_BIND, sizeof(*ctrl));
+                       if (WARN_ON(IS_ERR(ctrl)))
+                               return PTR_ERR(ctrl);
+
+                       ctrl->engineType = eT;
+
+                       ret = nvkm_gsp_rm_ctrl_wr(&chan->rm.object, ctrl);
+                       if (ret)
+                               return ret;
+               }
+
+               ctrl = nvkm_gsp_rm_ctrl_get(&chan->rm.object,
+                                           NVA06F_CTRL_CMD_GPFIFO_SCHEDULE, sizeof(*ctrl));
+               if (WARN_ON(IS_ERR(ctrl)))
+                       return PTR_ERR(ctrl);
+
+               ctrl->bEnable = 1;
+               ret = nvkm_gsp_rm_ctrl_wr(&chan->rm.object, ctrl);
+       }
+
+       return ret;
+}
+
+static const struct nvkm_chan_func_ramfc
+r535_chan_ramfc = {
+       .write = r535_chan_ramfc_write,
+       .clear = r535_chan_ramfc_clear,
+       .devm = 0xfff,
+       .priv = true,
+};
+
+struct r535_chan_userd {
+       struct nvkm_memory *mem;
+       struct nvkm_memory *map;
+       int chid;
+       u32 used;
+
+       struct list_head head;
+} *userd;
+
+static void
+r535_chan_id_put(struct nvkm_chan *chan)
+{
+       struct nvkm_runl *runl = chan->cgrp->runl;
+       struct nvkm_fifo *fifo = runl->fifo;
+       struct r535_chan_userd *userd;
+
+       mutex_lock(&fifo->userd.mutex);
+       list_for_each_entry(userd, &fifo->userd.list, head) {
+               if (userd->map == chan->userd.mem) {
+                       u32 chid = chan->userd.base / chan->func->userd->size;
+
+                       userd->used &= ~BIT(chid);
+                       if (!userd->used) {
+                               nvkm_memory_unref(&userd->map);
+                               nvkm_memory_unref(&userd->mem);
+                               nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock);
+                               list_del(&userd->head);
+                       }
+
+                       break;
+               }
+       }
+       mutex_unlock(&fifo->userd.mutex);
+
+}
+
+static int
+r535_chan_id_get_locked(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd)
+{
+       const u32 userd_size = CHID_PER_USERD * chan->func->userd->size;
+       struct nvkm_runl *runl = chan->cgrp->runl;
+       struct nvkm_fifo *fifo = runl->fifo;
+       struct r535_chan_userd *userd;
+       u32 chid;
+       int ret;
+
+       if (ouserd + chan->func->userd->size >= userd_size ||
+           (ouserd & (chan->func->userd->size - 1))) {
+               RUNL_DEBUG(runl, "ouserd %llx", ouserd);
+               return -EINVAL;
+       }
+
+       chid = div_u64(ouserd, chan->func->userd->size);
+
+       list_for_each_entry(userd, &fifo->userd.list, head) {
+               if (userd->mem == muserd) {
+                       if (userd->used & BIT(chid))
+                               return -EBUSY;
+                       break;
+               }
+       }
+
+       if (&userd->head == &fifo->userd.list) {
+               if (nvkm_memory_size(muserd) < userd_size) {
+                       RUNL_DEBUG(runl, "userd too small");
+                       return -EINVAL;
+               }
+
+               userd = kzalloc(sizeof(*userd), GFP_KERNEL);
+               if (!userd)
+                       return -ENOMEM;
+
+               userd->chid = nvkm_chid_get(runl->chid, chan);
+               if (userd->chid < 0) {
+                       ret = userd->chid;
+                       kfree(userd);
+                       return ret;
+               }
+
+               userd->mem = nvkm_memory_ref(muserd);
+
+               ret = nvkm_memory_kmap(userd->mem, &userd->map);
+               if (ret) {
+                       nvkm_chid_put(runl->chid, userd->chid, &chan->cgrp->lock);
+                       kfree(userd);
+                       return ret;
+               }
+
+
+               list_add(&userd->head, &fifo->userd.list);
+       }
+
+       userd->used |= BIT(chid);
+
+       chan->userd.mem = nvkm_memory_ref(userd->map);
+       chan->userd.base = ouserd;
+
+       return (userd->chid * CHID_PER_USERD) + chid;
+}
+
+static int
+r535_chan_id_get(struct nvkm_chan *chan, struct nvkm_memory *muserd, u64 ouserd)
+{
+       struct nvkm_fifo *fifo = chan->cgrp->runl->fifo;
+       int ret;
+
+       mutex_lock(&fifo->userd.mutex);
+       ret = r535_chan_id_get_locked(chan, muserd, ouserd);
+       mutex_unlock(&fifo->userd.mutex);
+       return ret;
+}
+
+static const struct nvkm_chan_func
+r535_chan = {
+       .id_get = r535_chan_id_get,
+       .id_put = r535_chan_id_put,
+       .inst = &gf100_chan_inst,
+       .userd = &gv100_chan_userd,
+       .ramfc = &r535_chan_ramfc,
+       .start = r535_chan_start,
+       .stop = r535_chan_stop,
+       .doorbell_handle = r535_chan_doorbell_handle,
+};
+
+static const struct nvkm_cgrp_func
+r535_cgrp = {
+};
+
+static int
+r535_engn_nonstall(struct nvkm_engn *engn)
+{
+       struct nvkm_subdev *subdev = &engn->engine->subdev;
+       int ret;
+
+       ret = nvkm_gsp_intr_nonstall(subdev->device->gsp, subdev->type, subdev->inst);
+       WARN_ON(ret < 0);
+       return ret;
+}
+
+static const struct nvkm_engn_func
+r535_ce = {
+       .nonstall = r535_engn_nonstall,
+};
+
+static int
+r535_gr_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
+{
+       /* RM requires GR context buffers to remain mapped until after the
+        * channel has been destroyed (as opposed to after the last gr obj
+        * has been deleted).
+        *
+        * Take an extra ref here, which will be released once the channel
+        * object has been deleted.
+        */
+       refcount_inc(&vctx->refs);
+       chan->rm.grctx = vctx;
+       return 0;
+}
+
+static const struct nvkm_engn_func
+r535_gr = {
+       .nonstall = r535_engn_nonstall,
+       .ctor2 = r535_gr_ctor,
+};
+
+static int
+r535_flcn_bind(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
+{
+       struct nvkm_gsp_client *client = &chan->vmm->rm.client;
+       NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&chan->vmm->rm.device.subdevice,
+                                   NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->hClient = client->object.handle;
+       ctrl->hObject = chan->rm.object.handle;
+       ctrl->hChanClient = client->object.handle;
+       ctrl->virtAddress = vctx->vma->addr;
+       ctrl->size = vctx->inst->size;
+       ctrl->engineType = engn->id;
+       ctrl->ChID = chan->id;
+
+       return nvkm_gsp_rm_ctrl_wr(&chan->vmm->rm.device.subdevice, ctrl);
+}
+
+static int
+r535_flcn_ctor(struct nvkm_engn *engn, struct nvkm_vctx *vctx, struct nvkm_chan *chan)
+{
+       int ret;
+
+       if (WARN_ON(!engn->rm.size))
+               return -EINVAL;
+
+       ret = nvkm_gpuobj_new(engn->engine->subdev.device, engn->rm.size, 0, true, NULL,
+                             &vctx->inst);
+       if (ret)
+               return ret;
+
+       ret = nvkm_vmm_get(vctx->vmm, 12, vctx->inst->size, &vctx->vma);
+       if (ret)
+               return ret;
+
+       ret = nvkm_memory_map(vctx->inst, 0, vctx->vmm, vctx->vma, NULL, 0);
+       if (ret)
+               return ret;
+
+       return r535_flcn_bind(engn, vctx, chan);
+}
+
+static const struct nvkm_engn_func
+r535_flcn = {
+       .nonstall = r535_engn_nonstall,
+       .ctor2 = r535_flcn_ctor,
+};
+
+static void
+r535_runl_allow(struct nvkm_runl *runl, u32 engm)
+{
+}
+
+static void
+r535_runl_block(struct nvkm_runl *runl, u32 engm)
+{
+}
+
+static const struct nvkm_runl_func
+r535_runl = {
+       .block = r535_runl_block,
+       .allow = r535_runl_allow,
+};
+
+static int
+r535_fifo_2080_type(enum nvkm_subdev_type type, int inst)
+{
+       switch (type) {
+       case NVKM_ENGINE_GR: return NV2080_ENGINE_TYPE_GR0;
+       case NVKM_ENGINE_CE: return NV2080_ENGINE_TYPE_COPY0 + inst;
+       case NVKM_ENGINE_SEC2: return NV2080_ENGINE_TYPE_SEC2;
+       case NVKM_ENGINE_NVDEC: return NV2080_ENGINE_TYPE_NVDEC0 + inst;
+       case NVKM_ENGINE_NVENC: return NV2080_ENGINE_TYPE_NVENC0 + inst;
+       case NVKM_ENGINE_NVJPG: return NV2080_ENGINE_TYPE_NVJPEG0 + inst;
+       case NVKM_ENGINE_OFA: return NV2080_ENGINE_TYPE_OFA;
+       case NVKM_ENGINE_SW: return NV2080_ENGINE_TYPE_SW;
+       default:
+               break;
+       }
+
+       WARN_ON(1);
+       return -EINVAL;
+}
+
+static int
+r535_fifo_engn_type(RM_ENGINE_TYPE rm, enum nvkm_subdev_type *ptype)
+{
+       switch (rm) {
+       case RM_ENGINE_TYPE_GR0:
+               *ptype = NVKM_ENGINE_GR;
+               return 0;
+       case RM_ENGINE_TYPE_COPY0...RM_ENGINE_TYPE_COPY9:
+               *ptype = NVKM_ENGINE_CE;
+               return rm - RM_ENGINE_TYPE_COPY0;
+       case RM_ENGINE_TYPE_NVDEC0...RM_ENGINE_TYPE_NVDEC7:
+               *ptype = NVKM_ENGINE_NVDEC;
+               return rm - RM_ENGINE_TYPE_NVDEC0;
+       case RM_ENGINE_TYPE_NVENC0...RM_ENGINE_TYPE_NVENC2:
+               *ptype = NVKM_ENGINE_NVENC;
+               return rm - RM_ENGINE_TYPE_NVENC0;
+       case RM_ENGINE_TYPE_SW:
+               *ptype = NVKM_ENGINE_SW;
+               return 0;
+       case RM_ENGINE_TYPE_SEC2:
+               *ptype = NVKM_ENGINE_SEC2;
+               return 0;
+       case RM_ENGINE_TYPE_NVJPEG0...RM_ENGINE_TYPE_NVJPEG7:
+               *ptype = NVKM_ENGINE_NVJPG;
+               return rm - RM_ENGINE_TYPE_NVJPEG0;
+       case RM_ENGINE_TYPE_OFA:
+               *ptype = NVKM_ENGINE_OFA;
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+static int
+r535_fifo_ectx_size(struct nvkm_fifo *fifo)
+{
+       NV2080_CTRL_INTERNAL_GET_CONSTRUCTED_FALCON_INFO_PARAMS *ctrl;
+       struct nvkm_gsp *gsp = fifo->engine.subdev.device->gsp;
+       struct nvkm_runl *runl;
+       struct nvkm_engn *engn;
+
+       ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+                                  NV2080_CTRL_CMD_INTERNAL_GET_CONSTRUCTED_FALCON_INFO,
+                                  sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return PTR_ERR(ctrl);
+
+       for (int i = 0; i < ctrl->numConstructedFalcons; i++) {
+               nvkm_runl_foreach(runl, fifo) {
+                       nvkm_runl_foreach_engn(engn, runl) {
+                               if (engn->rm.desc == ctrl->constructedFalconsTable[i].engDesc) {
+                                       engn->rm.size =
+                                               ctrl->constructedFalconsTable[i].ctxBufferSize;
+                                       break;
+                               }
+                       }
+               }
+       }
+
+       nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+       return 0;
+}
+
+static int
+r535_fifo_runl_ctor(struct nvkm_fifo *fifo)
+{
+       struct nvkm_subdev *subdev = &fifo->engine.subdev;
+       struct nvkm_gsp *gsp = subdev->device->gsp;
+       struct nvkm_runl *runl;
+       struct nvkm_engn *engn;
+       u32 cgids = 2048;
+       u32 chids = 2048 / CHID_PER_USERD;
+       int ret;
+       NV2080_CTRL_FIFO_GET_DEVICE_INFO_TABLE_PARAMS *ctrl;
+
+       if ((ret = nvkm_chid_new(&nvkm_chan_event, subdev, cgids, 0, cgids, &fifo->cgid)) ||
+           (ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, 0, chids, &fifo->chid)))
+               return ret;
+
+       ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+                                  NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return PTR_ERR(ctrl);
+
+       for (int i = 0; i < ctrl->numEntries; i++) {
+               const u32 addr = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE];
+               const u32 id = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST];
+
+               runl = nvkm_runl_get(fifo, id, addr);
+               if (!runl) {
+                       runl = nvkm_runl_new(fifo, id, addr, 0);
+                       if (WARN_ON(IS_ERR(runl)))
+                               continue;
+               }
+       }
+
+       for (int i = 0; i < ctrl->numEntries; i++) {
+               const u32 addr = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST_PRI_BASE];
+               const u32 rmid = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RM_ENGINE_TYPE];
+               const u32 id = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_RUNLIST];
+               enum nvkm_subdev_type type;
+               int inst, nv2080;
+
+               runl = nvkm_runl_get(fifo, id, addr);
+               if (!runl)
+                       continue;
+
+               inst = r535_fifo_engn_type(rmid, &type);
+               if (inst < 0) {
+                       nvkm_warn(subdev, "RM_ENGINE_TYPE 0x%x\n", rmid);
+                       nvkm_runl_del(runl);
+                       continue;
+               }
+
+               nv2080 = r535_fifo_2080_type(type, inst);
+               if (nv2080 < 0) {
+                       nvkm_runl_del(runl);
+                       continue;
+               }
+
+               switch (type) {
+               case NVKM_ENGINE_CE:
+                       engn = nvkm_runl_add(runl, nv2080, &r535_ce, type, inst);
+                       break;
+               case NVKM_ENGINE_GR:
+                       engn = nvkm_runl_add(runl, nv2080, &r535_gr, type, inst);
+                       break;
+               case NVKM_ENGINE_NVDEC:
+               case NVKM_ENGINE_NVENC:
+               case NVKM_ENGINE_NVJPG:
+               case NVKM_ENGINE_OFA:
+                       engn = nvkm_runl_add(runl, nv2080, &r535_flcn, type, inst);
+                       break;
+               case NVKM_ENGINE_SW:
+                       continue;
+               default:
+                       engn = NULL;
+                       break;
+               }
+
+               if (!engn) {
+                       nvkm_runl_del(runl);
+                       continue;
+               }
+
+               engn->rm.desc = ctrl->entries[i].engineData[ENGINE_INFO_TYPE_ENG_DESC];
+       }
+
+       nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+
+       {
+               NV2080_CTRL_CE_GET_FAULT_METHOD_BUFFER_SIZE_PARAMS *ctrl;
+
+               ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+                                          NV2080_CTRL_CMD_CE_GET_FAULT_METHOD_BUFFER_SIZE,
+                                          sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               fifo->rm.mthdbuf_size = ctrl->size;
+
+               nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+       }
+
+       return r535_fifo_ectx_size(fifo);
+}
+
+static void
+r535_fifo_dtor(struct nvkm_fifo *fifo)
+{
+       kfree(fifo->func);
+}
+
+int
+r535_fifo_new(const struct nvkm_fifo_func *hw, struct nvkm_device *device,
+             enum nvkm_subdev_type type, int inst, struct nvkm_fifo **pfifo)
+{
+       struct nvkm_fifo_func *rm;
+
+       if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_fifo_dtor;
+       rm->runl_ctor = r535_fifo_runl_ctor;
+       rm->runl = &r535_runl;
+       rm->cgrp = hw->cgrp;
+       rm->cgrp.func = &r535_cgrp;
+       rm->chan = hw->chan;
+       rm->chan.func = &r535_chan;
+       rm->nonstall = &ga100_fifo_nonstall;
+       rm->nonstall_ctor = ga100_fifo_nonstall_ctor;
+
+       return nvkm_fifo_new_(rm, device, type, inst, pfifo);
+}
index 5421321f8e85f3a7e94cae5a76b869ad0d33641c..19e6772ead11fc78627eae42640dec3d55ece407 100644 (file)
@@ -18,6 +18,7 @@ struct nvkm_engn {
                bool (*mmu_fault_triggered)(struct nvkm_engn *);
                int (*ctor)(struct nvkm_engn *, struct nvkm_vctx *);
                void (*bind)(struct nvkm_engn *, struct nvkm_cctx *, struct nvkm_chan *);
+               int (*ctor2)(struct nvkm_engn *, struct nvkm_vctx *, struct nvkm_chan *);
                int (*ramht_add)(struct nvkm_engn *, struct nvkm_object *, struct nvkm_chan *);
                void (*ramht_del)(struct nvkm_chan *, int hash);
        } *func;
@@ -28,6 +29,11 @@ struct nvkm_engn {
 
        int fault;
 
+       struct {
+               u32 desc;
+               u32 size;
+       } rm;
+
        struct list_head head;
 };
 
index ea9e151dbb488286511f3272f1a3de22d18339f4..1d39a6840a404d1409758db65d935b1c3a1cad72 100644 (file)
@@ -25,6 +25,7 @@
 #include "runl.h"
 
 #include <core/memory.h>
+#include <subdev/gsp.h>
 #include <subdev/mc.h>
 #include <subdev/vfn.h>
 
@@ -282,5 +283,8 @@ int
 tu102_fifo_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
               struct nvkm_fifo **pfifo)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_fifo_new(&tu102_fifo, device, type, inst, pfifo);
+
        return nvkm_fifo_new_(&tu102_fifo, device, type, inst, pfifo);
 }
index 04140e0110beb0f5c6a85ca6ecb7c1b891f00476..9e56bcc166ed54b25d14a0c77fad6d53dc41292b 100644 (file)
@@ -317,6 +317,15 @@ nvkm_uchan = {
        .uevent = nvkm_uchan_uevent,
 };
 
+struct nvkm_chan *
+nvkm_uchan_chan(struct nvkm_object *object)
+{
+       if (WARN_ON(object->func != &nvkm_uchan))
+               return NULL;
+
+       return nvkm_uchan(object)->chan;
+}
+
 int
 nvkm_uchan_new(struct nvkm_fifo *fifo, struct nvkm_cgrp *cgrp, const struct nvkm_oclass *oclass,
               void *argv, u32 argc, struct nvkm_object **pobject)
index b5418f05ccd8b46c7664fd449b51f90ff3f9eb25..1555f8c40b4f3d93b6cb9453759211df5704b438 100644 (file)
@@ -41,6 +41,9 @@ nvkm-y += nvkm/engine/gr/gp10b.o
 nvkm-y += nvkm/engine/gr/gv100.o
 nvkm-y += nvkm/engine/gr/tu102.o
 nvkm-y += nvkm/engine/gr/ga102.o
+nvkm-y += nvkm/engine/gr/ad102.o
+
+nvkm-y += nvkm/engine/gr/r535.o
 
 nvkm-y += nvkm/engine/gr/ctxnv40.o
 nvkm-y += nvkm/engine/gr/ctxnv50.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ad102.c
new file mode 100644 (file)
index 0000000..7bfa624
--- /dev/null
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct gf100_gr_func
+ad102_gr = {
+       .sclass = {
+               { -1, -1, FERMI_TWOD_A },
+               { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+               { -1, -1, ADA_A },
+               { -1, -1, ADA_COMPUTE_A },
+               {}
+       }
+};
+
+int
+ad102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_gr_new(&ad102_gr, device, type, inst, pgr);
+
+       return -ENODEV;
+}
index 0096ad401b15321d2b535107cc09ab2f542b9f0e..f5e68f09df768d4486fb1909e40e8a2dcdf1024d 100644 (file)
@@ -160,7 +160,11 @@ static int
 nvkm_gr_init(struct nvkm_engine *engine)
 {
        struct nvkm_gr *gr = nvkm_gr(engine);
-       return gr->func->init(gr);
+
+       if (gr->func->init)
+               return gr->func->init(gr);
+
+       return 0;
 }
 
 static int
index 00cd70abad67e2cb601916035666f1934f3090db..d285c597aff9c4a0d208aa956d9d45d126d440e4 100644 (file)
@@ -23,6 +23,7 @@
 #include "ctxgf100.h"
 
 #include <core/firmware.h>
+#include <subdev/gsp.h>
 #include <subdev/acr.h>
 #include <subdev/timer.h>
 #include <subdev/vfn.h>
@@ -350,5 +351,8 @@ ga102_gr_fwif[] = {
 int
 ga102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_gr_new(&ga102_gr, device, type, inst, pgr);
+
        return gf100_gr_new_(ga102_gr_fwif, device, type, inst, pgr);
 }
index 54f686ba39ac2fff3cdbed960539c053e8ac9154..b0e0c93050345d73ac57d6cd0e9e4297f81b6b8f 100644 (file)
@@ -445,4 +445,6 @@ void gp108_gr_acr_bld_patch(struct nvkm_acr *, u32, s64);
 
 int gf100_gr_new_(const struct gf100_gr_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
                  struct nvkm_gr **);
+int r535_gr_new(const struct gf100_gr_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
+               struct nvkm_gr **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/r535.c
new file mode 100644 (file)
index 0000000..f4bed3e
--- /dev/null
@@ -0,0 +1,508 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+#include <core/memory.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu/vmm.h>
+#include <engine/fifo/priv.h>
+
+#include <nvif/if900d.h>
+
+#include <nvhw/drf.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/alloc/alloc_channel.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl0080/ctrl0080fifo.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
+#include <nvrm/535.113.01/nvidia/generated/g_kernel_channel_nvoc.h>
+
+#define r535_gr(p) container_of((p), struct r535_gr, base)
+
+#define R515_GR_MAX_CTXBUFS 9
+
+struct r535_gr {
+       struct nvkm_gr base;
+
+       struct {
+               u16 bufferId;
+               u32 size;
+               u8  page;
+               u8  align;
+               bool global;
+               bool init;
+               bool ro;
+       } ctxbuf[R515_GR_MAX_CTXBUFS];
+       int ctxbuf_nr;
+
+       struct nvkm_memory *ctxbuf_mem[R515_GR_MAX_CTXBUFS];
+};
+
+struct r535_gr_chan {
+       struct nvkm_object object;
+       struct r535_gr *gr;
+
+       struct nvkm_vmm *vmm;
+       struct nvkm_chan *chan;
+
+       struct nvkm_memory *mem[R515_GR_MAX_CTXBUFS];
+       struct nvkm_vma    *vma[R515_GR_MAX_CTXBUFS];
+};
+
+struct r535_gr_obj {
+       struct nvkm_object object;
+       struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_gr_obj_dtor(struct nvkm_object *object)
+{
+       struct r535_gr_obj *obj = container_of(object, typeof(*obj), object);
+
+       nvkm_gsp_rm_free(&obj->rm);
+       return obj;
+}
+
+static const struct nvkm_object_func
+r535_gr_obj = {
+       .dtor = r535_gr_obj_dtor,
+};
+
+static int
+r535_gr_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+                struct nvkm_object **pobject)
+{
+       struct r535_gr_chan *chan = container_of(oclass->parent, typeof(*chan), object);
+       struct r535_gr_obj *obj;
+
+       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&r535_gr_obj, oclass, &obj->object);
+       *pobject = &obj->object;
+
+       return nvkm_gsp_rm_alloc(&chan->chan->rm.object, oclass->handle, oclass->base.oclass, 0,
+                                &obj->rm);
+}
+
+static void *
+r535_gr_chan_dtor(struct nvkm_object *object)
+{
+       struct r535_gr_chan *grc = container_of(object, typeof(*grc), object);
+       struct r535_gr *gr = grc->gr;
+
+       for (int i = 0; i < gr->ctxbuf_nr; i++) {
+               nvkm_vmm_put(grc->vmm, &grc->vma[i]);
+               nvkm_memory_unref(&grc->mem[i]);
+       }
+
+       nvkm_vmm_unref(&grc->vmm);
+       return grc;
+}
+
+static const struct nvkm_object_func
+r535_gr_chan = {
+       .dtor = r535_gr_chan_dtor,
+};
+
+static int
+r535_gr_promote_ctx(struct r535_gr *gr, bool golden, struct nvkm_vmm *vmm,
+                   struct nvkm_memory **pmem, struct nvkm_vma **pvma,
+                   struct nvkm_gsp_object *chan)
+{
+       struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+       struct nvkm_device *device = subdev->device;
+       NV2080_CTRL_GPU_PROMOTE_CTX_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.device.subdevice,
+                                   NV2080_CTRL_CMD_GPU_PROMOTE_CTX, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return PTR_ERR(ctrl);
+
+       ctrl->engineType = 1;
+       ctrl->hChanClient = vmm->rm.client.object.handle;
+       ctrl->hObject = chan->handle;
+
+       for (int i = 0; i < gr->ctxbuf_nr; i++) {
+               NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ENTRY *entry =
+                       &ctrl->promoteEntry[ctrl->entryCount];
+               const bool alloc = golden || !gr->ctxbuf[i].global;
+               int ret;
+
+               entry->bufferId = gr->ctxbuf[i].bufferId;
+               entry->bInitialize = gr->ctxbuf[i].init && alloc;
+
+               if (alloc) {
+                       ret = nvkm_memory_new(device, gr->ctxbuf[i].init ?
+                                             NVKM_MEM_TARGET_INST : NVKM_MEM_TARGET_INST_SR_LOST,
+                                             gr->ctxbuf[i].size, 1 << gr->ctxbuf[i].page,
+                                             gr->ctxbuf[i].init, &pmem[i]);
+                       if (WARN_ON(ret))
+                               return ret;
+
+                       if (gr->ctxbuf[i].bufferId ==
+                                       NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP)
+                               entry->bNonmapped = 1;
+               } else {
+                       if (gr->ctxbuf[i].bufferId ==
+                               NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP)
+                               continue;
+
+                       pmem[i] = nvkm_memory_ref(gr->ctxbuf_mem[i]);
+               }
+
+               if (!entry->bNonmapped) {
+                       struct gf100_vmm_map_v0 args = {
+                               .priv = 1,
+                               .ro   = gr->ctxbuf[i].ro,
+                       };
+
+                       mutex_lock(&vmm->mutex.vmm);
+                       ret = nvkm_vmm_get_locked(vmm, false, true, false, 0, gr->ctxbuf[i].align,
+                                                 nvkm_memory_size(pmem[i]), &pvma[i]);
+                       mutex_unlock(&vmm->mutex.vmm);
+                       if (ret)
+                               return ret;
+
+                       ret = nvkm_memory_map(pmem[i], 0, vmm, pvma[i], &args, sizeof(args));
+                       if (ret)
+                               return ret;
+
+                       entry->gpuVirtAddr = pvma[i]->addr;
+               }
+
+               if (entry->bInitialize) {
+                       entry->gpuPhysAddr = nvkm_memory_addr(pmem[i]);
+                       entry->size = gr->ctxbuf[i].size;
+                       entry->physAttr = 4;
+               }
+
+               nvkm_debug(subdev,
+                          "promote %02d: pa %016llx/%08x sz %016llx va %016llx init:%d nm:%d\n",
+                          entry->bufferId, entry->gpuPhysAddr, entry->physAttr, entry->size,
+                          entry->gpuVirtAddr, entry->bInitialize, entry->bNonmapped);
+
+               ctrl->entryCount++;
+       }
+
+       return nvkm_gsp_rm_ctrl_wr(&vmm->rm.device.subdevice, ctrl);
+}
+
+static int
+r535_gr_chan_new(struct nvkm_gr *base, struct nvkm_chan *chan, const struct nvkm_oclass *oclass,
+                struct nvkm_object **pobject)
+{
+       struct r535_gr *gr = r535_gr(base);
+       struct r535_gr_chan *grc;
+       int ret;
+
+       if (!(grc = kzalloc(sizeof(*grc), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&r535_gr_chan, oclass, &grc->object);
+       grc->gr = gr;
+       grc->vmm = nvkm_vmm_ref(chan->vmm);
+       grc->chan = chan;
+       *pobject = &grc->object;
+
+       ret = r535_gr_promote_ctx(gr, false, grc->vmm, grc->mem, grc->vma, &chan->rm.object);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static u64
+r535_gr_units(struct nvkm_gr *gr)
+{
+       struct nvkm_gsp *gsp = gr->engine.subdev.device->gsp;
+
+       return (gsp->gr.tpcs << 8) | gsp->gr.gpcs;
+}
+
+static int
+r535_gr_oneinit(struct nvkm_gr *base)
+{
+       NV2080_CTRL_INTERNAL_STATIC_GR_GET_CONTEXT_BUFFERS_INFO_PARAMS *info;
+       struct r535_gr *gr = container_of(base, typeof(*gr), base);
+       struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+       struct nvkm_device *device = subdev->device;
+       struct nvkm_gsp *gsp = device->gsp;
+       struct nvkm_mmu *mmu = device->mmu;
+       struct {
+               struct nvkm_memory *inst;
+               struct nvkm_vmm *vmm;
+               struct nvkm_gsp_object chan;
+               struct nvkm_vma *vma[R515_GR_MAX_CTXBUFS];
+       } golden = {};
+       int ret;
+
+       /* Allocate a channel to use for golden context init. */
+       ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 0x12000, 0, true, &golden.inst);
+       if (ret)
+               goto done;
+
+       ret = nvkm_vmm_new(device, 0x1000, 0, NULL, 0, NULL, "grGoldenVmm", &golden.vmm);
+       if (ret)
+               goto done;
+
+       ret = mmu->func->promote_vmm(golden.vmm);
+       if (ret)
+               goto done;
+
+       {
+               NV_CHANNELGPFIFO_ALLOCATION_PARAMETERS *args;
+
+               args = nvkm_gsp_rm_alloc_get(&golden.vmm->rm.device.object, 0xf1f00000,
+                                            device->fifo->func->chan.user.oclass,
+                                            sizeof(*args), &golden.chan);
+               if (IS_ERR(args)) {
+                       ret = PTR_ERR(args);
+                       goto done;
+               }
+
+               args->gpFifoOffset = 0;
+               args->gpFifoEntries = 0x1000 / 8;
+               args->flags =
+                       NVDEF(NVOS04, FLAGS, CHANNEL_TYPE, PHYSICAL) |
+                       NVDEF(NVOS04, FLAGS, VPR, FALSE) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_MAP_REFCOUNTING, FALSE) |
+                       NVVAL(NVOS04, FLAGS, GROUP_CHANNEL_RUNQUEUE, 0) |
+                       NVDEF(NVOS04, FLAGS, PRIVILEGED_CHANNEL, TRUE) |
+                       NVDEF(NVOS04, FLAGS, DELAY_CHANNEL_SCHEDULING, FALSE) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_DENY_PHYSICAL_MODE_CE, FALSE) |
+                       NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_VALUE, 0) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_FIXED, FALSE) |
+                       NVVAL(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_VALUE, 0) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_USERD_INDEX_PAGE_FIXED, TRUE) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_DENY_AUTH_LEVEL_PRIV, FALSE) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_SKIP_SCRUBBER, FALSE) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_CLIENT_MAP_FIFO, FALSE) |
+                       NVDEF(NVOS04, FLAGS, SET_EVICT_LAST_CE_PREFETCH_CHANNEL, FALSE) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_VGPU_PLUGIN_CONTEXT, FALSE) |
+                       NVDEF(NVOS04, FLAGS, CHANNEL_PBDMA_ACQUIRE_TIMEOUT, FALSE) |
+                       NVDEF(NVOS04, FLAGS, GROUP_CHANNEL_THREAD, DEFAULT) |
+                       NVDEF(NVOS04, FLAGS, MAP_CHANNEL, FALSE) |
+                       NVDEF(NVOS04, FLAGS, SKIP_CTXBUFFER_ALLOC, FALSE);
+               args->hVASpace = golden.vmm->rm.object.handle;
+               args->engineType = 1;
+               args->instanceMem.base = nvkm_memory_addr(golden.inst);
+               args->instanceMem.size = 0x1000;
+               args->instanceMem.addressSpace = 2;
+               args->instanceMem.cacheAttrib = 1;
+               args->ramfcMem.base = nvkm_memory_addr(golden.inst);
+               args->ramfcMem.size = 0x200;
+               args->ramfcMem.addressSpace = 2;
+               args->ramfcMem.cacheAttrib = 1;
+               args->userdMem.base = nvkm_memory_addr(golden.inst) + 0x1000;
+               args->userdMem.size = 0x200;
+               args->userdMem.addressSpace = 2;
+               args->userdMem.cacheAttrib = 1;
+               args->mthdbufMem.base = nvkm_memory_addr(golden.inst) + 0x2000;
+               args->mthdbufMem.size = 0x5000;
+               args->mthdbufMem.addressSpace = 2;
+               args->mthdbufMem.cacheAttrib = 1;
+               args->internalFlags =
+                       NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, PRIVILEGE, ADMIN) |
+                       NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ERROR_NOTIFIER_TYPE, NONE) |
+                       NVDEF(NV_KERNELCHANNEL, ALLOC_INTERNALFLAGS, ECC_ERROR_NOTIFIER_TYPE, NONE);
+
+               ret = nvkm_gsp_rm_alloc_wr(&golden.chan, args);
+               if (ret)
+                       goto done;
+       }
+
+       /* Fetch context buffer info from RM and allocate each of them here to use
+        * during golden context init (or later as a global context buffer).
+        *
+        * Also build the information that'll be used to create channel contexts.
+        */
+       info = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
+                                  NV2080_CTRL_CMD_INTERNAL_STATIC_KGR_GET_CONTEXT_BUFFERS_INFO,
+                                  sizeof(*info));
+       if (WARN_ON(IS_ERR(info))) {
+               ret = PTR_ERR(info);
+               goto done;
+       }
+
+       for (int i = 0; i < ARRAY_SIZE(info->engineContextBuffersInfo[0].engine); i++) {
+               static const struct {
+                       u32     id0; /* NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID */
+                       u32     id1; /* NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID */
+                       bool global;
+                       bool   init;
+                       bool     ro;
+               } map[] = {
+#define _A(n,N,G,I,R) { .id0 = NV0080_CTRL_FIFO_GET_ENGINE_CONTEXT_PROPERTIES_ENGINE_ID_##n, \
+                       .id1 = NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_##N, \
+                       .global = (G), .init = (I), .ro = (R) }
+#define _B(N,G,I,R) _A(GRAPHICS_##N, N, (G), (I), (R))
+                       /*                                       global   init     ro */
+                       _A(           GRAPHICS,             MAIN, false,  true, false),
+                       _B(                                PATCH, false,  true, false),
+                       _A( GRAPHICS_BUNDLE_CB, BUFFER_BUNDLE_CB,  true, false, false),
+                       _B(                             PAGEPOOL,  true, false, false),
+                       _B(                         ATTRIBUTE_CB,  true, false, false),
+                       _B(                        RTV_CB_GLOBAL,  true, false, false),
+                       _B(                           FECS_EVENT,  true,  true, false),
+                       _B(                      PRIV_ACCESS_MAP,  true,  true,  true),
+#undef _B
+#undef _A
+               };
+               u32 size = info->engineContextBuffersInfo[0].engine[i].size;
+               u8 align, page;
+               int id;
+
+               for (id = 0; id < ARRAY_SIZE(map); id++) {
+                       if (map[id].id0 == i)
+                               break;
+               }
+
+               nvkm_debug(subdev, "%02x: size:0x%08x %s\n", i,
+                          size, (id < ARRAY_SIZE(map)) ? "*" : "");
+               if (id >= ARRAY_SIZE(map))
+                       continue;
+
+               if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_MAIN)
+                       size = ALIGN(size, 0x1000) + 64 * 0x1000; /* per-subctx headers */
+
+               if      (size >= 1 << 21) page = 21;
+               else if (size >= 1 << 16) page = 16;
+               else                      page = 12;
+
+               if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_ATTRIBUTE_CB)
+                       align = order_base_2(size);
+               else
+                       align = page;
+
+               if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
+                       continue;
+
+               gr->ctxbuf[gr->ctxbuf_nr].bufferId = map[id].id1;
+               gr->ctxbuf[gr->ctxbuf_nr].size     = size;
+               gr->ctxbuf[gr->ctxbuf_nr].page     = page;
+               gr->ctxbuf[gr->ctxbuf_nr].align    = align;
+               gr->ctxbuf[gr->ctxbuf_nr].global   = map[id].global;
+               gr->ctxbuf[gr->ctxbuf_nr].init     = map[id].init;
+               gr->ctxbuf[gr->ctxbuf_nr].ro       = map[id].ro;
+               gr->ctxbuf_nr++;
+
+               if (map[id].id1 == NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_PRIV_ACCESS_MAP) {
+                       if (WARN_ON(gr->ctxbuf_nr == ARRAY_SIZE(gr->ctxbuf)))
+                               continue;
+
+                       gr->ctxbuf[gr->ctxbuf_nr] = gr->ctxbuf[gr->ctxbuf_nr - 1];
+                       gr->ctxbuf[gr->ctxbuf_nr].bufferId =
+                               NV2080_CTRL_GPU_PROMOTE_CTX_BUFFER_ID_UNRESTRICTED_PRIV_ACCESS_MAP;
+                       gr->ctxbuf_nr++;
+               }
+       }
+
+       nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, info);
+
+       /* Promote golden context to RM. */
+       ret = r535_gr_promote_ctx(gr, true, golden.vmm, gr->ctxbuf_mem, golden.vma, &golden.chan);
+       if (ret)
+               goto done;
+
+       /* Allocate 3D class on channel to trigger golden context init in RM. */
+       {
+               int i;
+
+               for (i = 0; gr->base.func->sclass[i].ctor; i++) {
+                       if ((gr->base.func->sclass[i].oclass & 0xff) == 0x97) {
+                               struct nvkm_gsp_object threed;
+
+                               ret = nvkm_gsp_rm_alloc(&golden.chan, 0x97000000,
+                                                       gr->base.func->sclass[i].oclass, 0,
+                                                       &threed);
+                               if (ret)
+                                       goto done;
+
+                               nvkm_gsp_rm_free(&threed);
+                               break;
+                       }
+               }
+
+               if (WARN_ON(!gr->base.func->sclass[i].ctor)) {
+                       ret = -EINVAL;
+                       goto done;
+               }
+       }
+
+done:
+       nvkm_gsp_rm_free(&golden.chan);
+       for (int i = gr->ctxbuf_nr - 1; i >= 0; i--)
+               nvkm_vmm_put(golden.vmm, &golden.vma[i]);
+       nvkm_vmm_unref(&golden.vmm);
+       nvkm_memory_unref(&golden.inst);
+       return ret;
+
+}
+
+static void *
+r535_gr_dtor(struct nvkm_gr *base)
+{
+       struct r535_gr *gr = r535_gr(base);
+
+       while (gr->ctxbuf_nr)
+               nvkm_memory_unref(&gr->ctxbuf_mem[--gr->ctxbuf_nr]);
+
+       kfree(gr->base.func);
+       return gr;
+}
+
+int
+r535_gr_new(const struct gf100_gr_func *hw,
+           struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
+{
+       struct nvkm_gr_func *rm;
+       struct r535_gr *gr;
+       int nclass;
+
+       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_gr_dtor;
+       rm->oneinit = r535_gr_oneinit;
+       rm->units = r535_gr_units;
+       rm->chan_new = r535_gr_chan_new;
+
+       for (int i = 0; i < nclass; i++) {
+               rm->sclass[i].minver = hw->sclass[i].minver;
+               rm->sclass[i].maxver = hw->sclass[i].maxver;
+               rm->sclass[i].oclass = hw->sclass[i].oclass;
+               rm->sclass[i].ctor = r535_gr_obj_ctor;
+       }
+
+       if (!(gr = kzalloc(sizeof(*gr), GFP_KERNEL))) {
+               kfree(rm);
+               return -ENOMEM;
+       }
+
+       *pgr = &gr->base;
+
+       return nvkm_gr_ctor(rm, device, type, inst, true, &gr->base);
+}
index a7775aa185415a35ac3851237b1cda4aedc835c8..b7a458e9040a3e26a6eeef0814289c99729318f1 100644 (file)
@@ -22,6 +22,8 @@
 #include "gf100.h"
 #include "ctxgf100.h"
 
+#include <subdev/gsp.h>
+
 #include <nvif/class.h>
 
 void
@@ -216,5 +218,8 @@ tu102_gr_fwif[] = {
 int
 tu102_gr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_gr **pgr)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_gr_new(&tu102_gr, device, type, inst, pgr);
+
        return gf100_gr_new_(tu102_gr_fwif, device, type, inst, pgr);
 }
index f05e79670d22736647b3995b0065907bbfd0573a..2b0e923cb75541f7fcad98eec9fc7cd7a95c9f3b 100644 (file)
@@ -1,4 +1,9 @@
 # SPDX-License-Identifier: MIT
 nvkm-y += nvkm/engine/nvdec/base.o
 nvkm-y += nvkm/engine/nvdec/gm107.o
+nvkm-y += nvkm/engine/nvdec/tu102.o
+nvkm-y += nvkm/engine/nvdec/ga100.o
 nvkm-y += nvkm/engine/nvdec/ga102.o
+nvkm-y += nvkm/engine/nvdec/ad102.o
+
+nvkm-y += nvkm/engine/nvdec/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ad102.c
new file mode 100644 (file)
index 0000000..d72b3aa
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ad102_nvdec = {
+       .sclass = {
+               { -1, -1, NVC9B0_VIDEO_DECODER },
+               {}
+       }
+};
+
+int
+ad102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+               struct nvkm_nvdec **pnvdec)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_nvdec_new(&ad102_nvdec, device, type, inst, pnvdec);
+
+       return -ENODEV;
+}
index 1f6e3b32ba163cfc70c8e21acbb1cdb58c3d94e8..7d1c6791ae8265721177179521c5b0982bc71188 100644 (file)
@@ -33,6 +33,7 @@ nvkm_nvdec_dtor(struct nvkm_engine *engine)
 static const struct nvkm_engine_func
 nvkm_nvdec = {
        .dtor = nvkm_nvdec_dtor,
+       .sclass = { {} },
 };
 
 int
@@ -58,4 +59,4 @@ nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *device,
 
        return nvkm_falcon_ctor(nvdec->func->flcn, &nvdec->engine.subdev,
                                nvdec->engine.subdev.name, addr, &nvdec->falcon);
-};
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/ga100.c
new file mode 100644 (file)
index 0000000..9329342
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ga100_nvdec = {
+       .sclass = {
+               { -1, -1, NVC6B0_VIDEO_DECODER },
+               {}
+       }
+};
+
+int
+ga100_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+               struct nvkm_nvdec **pnvdec)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_nvdec_new(&ga100_nvdec, device, type, inst, pnvdec);
+
+       return -ENODEV;
+}
index 37d8c3c0f3abc08bb2f442d3c664bc624827dee8..022a9c824304372c4ceb0af1db31af806fd4acd1 100644 (file)
  */
 #include "priv.h"
 
-#include <subdev/mc.h>
-#include <subdev/timer.h>
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ga102_nvdec_gsp = {
+       .sclass = {
+               { -1, -1, NVC7B0_VIDEO_DECODER },
+               {}
+       }
+};
 
 static const struct nvkm_falcon_func
 ga102_nvdec_flcn = {
@@ -57,5 +66,8 @@ int
 ga102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
                struct nvkm_nvdec **pnvdec)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_nvdec_new(&ga102_nvdec_gsp, device, type, inst, pnvdec);
+
        return nvkm_nvdec_new_(ga102_nvdec_fwif, device, type, inst, 0x848000, pnvdec);
 }
index 564f7e8960a2016d1af1cade0bbf16156c15f862..51c9d0e68ee41acb6879fd80a19d8aee64de8432 100644 (file)
@@ -44,7 +44,7 @@ gm107_nvdec_nofw(struct nvkm_nvdec *nvdec, int ver,
        return 0;
 }
 
-static const struct nvkm_nvdec_fwif
+const struct nvkm_nvdec_fwif
 gm107_nvdec_fwif[] = {
        { -1, gm107_nvdec_nofw, &gm107_nvdec },
        {}
index 61e1f7aaa50933ae2ef32bd699fad679bd5c03fe..f506ae83bfd73f6358102c4e7bc62815c87cbdb6 100644 (file)
@@ -5,6 +5,8 @@
 
 struct nvkm_nvdec_func {
        const struct nvkm_falcon_func *flcn;
+
+       struct nvkm_sclass sclass[];
 };
 
 struct nvkm_nvdec_fwif {
@@ -14,6 +16,11 @@ struct nvkm_nvdec_fwif {
        const struct nvkm_nvdec_func *func;
 };
 
+extern const struct nvkm_nvdec_fwif gm107_nvdec_fwif[];
+
 int nvkm_nvdec_new_(const struct nvkm_nvdec_fwif *fwif, struct nvkm_device *,
                    enum nvkm_subdev_type, int, u32 addr, struct nvkm_nvdec **);
+
+int r535_nvdec_new(const struct nvkm_engine_func *, struct nvkm_device *,
+                  enum nvkm_subdev_type, int, struct nvkm_nvdec **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/r535.c
new file mode 100644 (file)
index 0000000..75a24f3
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/object.h>
+#include <subdev/gsp.h>
+#include <engine/fifo.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+
+struct r535_nvdec_obj {
+       struct nvkm_object object;
+       struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_nvdec_obj_dtor(struct nvkm_object *object)
+{
+       struct r535_nvdec_obj *obj = container_of(object, typeof(*obj), object);
+
+       nvkm_gsp_rm_free(&obj->rm);
+       return obj;
+}
+
+static const struct nvkm_object_func
+r535_nvdec_obj = {
+       .dtor = r535_nvdec_obj_dtor,
+};
+
+static int
+r535_nvdec_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+                struct nvkm_object **pobject)
+{
+       struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+       struct r535_nvdec_obj *obj;
+       NV_BSP_ALLOCATION_PARAMETERS *args;
+
+       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&r535_nvdec_obj, oclass, &obj->object);
+       *pobject = &obj->object;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
+                                    sizeof(*args), &obj->rm);
+       if (WARN_ON(IS_ERR(args)))
+               return PTR_ERR(args);
+
+       args->size = sizeof(*args);
+       args->engineInstance = oclass->engine->subdev.inst;
+
+       return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
+}
+
+static void *
+r535_nvdec_dtor(struct nvkm_engine *engine)
+{
+       struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
+
+       kfree(nvdec->engine.func);
+       return nvdec;
+}
+
+int
+r535_nvdec_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
+              enum nvkm_subdev_type type, int inst, struct nvkm_nvdec **pnvdec)
+{
+       struct nvkm_engine_func *rm;
+       int nclass;
+
+       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_nvdec_dtor;
+       for (int i = 0; i < nclass; i++) {
+               rm->sclass[i].minver = hw->sclass[i].minver;
+               rm->sclass[i].maxver = hw->sclass[i].maxver;
+               rm->sclass[i].oclass = hw->sclass[i].oclass;
+               rm->sclass[i].ctor = r535_nvdec_obj_ctor;
+       }
+
+       if (!(*pnvdec = kzalloc(sizeof(**pnvdec), GFP_KERNEL))) {
+               kfree(rm);
+               return -ENOMEM;
+       }
+
+       return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvdec)->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/tu102.c
new file mode 100644 (file)
index 0000000..808c8e0
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+tu102_nvdec = {
+       .sclass = {
+               { -1, -1, NVC4B0_VIDEO_DECODER },
+               {}
+       }
+};
+
+int
+tu102_nvdec_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+               struct nvkm_nvdec **pnvdec)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_nvdec_new(&tu102_nvdec, device, type, inst, pnvdec);
+
+       return nvkm_nvdec_new_(gm107_nvdec_fwif, device, type, inst, 0, pnvdec);
+}
index 75bf4436bf3fe1fc039c43e0064dc82262ac2bc0..2c1495b730f3b4fc4ad4a034a97706c8bb3f481d 100644 (file)
@@ -1,3 +1,8 @@
 # SPDX-License-Identifier: MIT
 nvkm-y += nvkm/engine/nvenc/base.o
 nvkm-y += nvkm/engine/nvenc/gm107.o
+nvkm-y += nvkm/engine/nvenc/tu102.o
+nvkm-y += nvkm/engine/nvenc/ga102.o
+nvkm-y += nvkm/engine/nvenc/ad102.o
+
+nvkm-y += nvkm/engine/nvenc/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ad102.c
new file mode 100644 (file)
index 0000000..1b4619f
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ad102_nvenc = {
+       .sclass = {
+               { -1, -1, NVC9B7_VIDEO_ENCODER },
+               {}
+       }
+};
+
+int
+ad102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+               struct nvkm_nvenc **pnvenc)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_nvenc_new(&ad102_nvenc, device, type, inst, pnvenc);
+
+       return -ENODEV;
+}
index cf5dcfda7b2538fbcbaafe0095292bf272ab8d95..d45dbb42a0dbf02446d2ad4a0ab27791bed5cedd 100644 (file)
@@ -34,6 +34,7 @@ nvkm_nvenc_dtor(struct nvkm_engine *engine)
 static const struct nvkm_engine_func
 nvkm_nvenc = {
        .dtor = nvkm_nvenc_dtor,
+       .sclass = { {} },
 };
 
 int
@@ -59,4 +60,4 @@ nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *fwif, struct nvkm_device *device,
 
        return nvkm_falcon_ctor(nvenc->func->flcn, &nvenc->engine.subdev,
                                nvenc->engine.subdev.name, 0, &nvenc->falcon);
-};
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/ga102.c
new file mode 100644 (file)
index 0000000..6463ab8
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ga102_nvenc = {
+       .sclass = {
+               { -1, -1, NVC7B7_VIDEO_ENCODER },
+               {}
+       }
+};
+
+int
+ga102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+               struct nvkm_nvenc **pnvenc)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_nvenc_new(&ga102_nvenc, device, type, inst, pnvenc);
+
+       return -ENODEV;
+}
index ad27d8b97569686186ebadca0073b5841d6a61be..922abb647ad3504f9283ca7cbfdf582da909c31d 100644 (file)
@@ -38,7 +38,7 @@ gm107_nvenc_nofw(struct nvkm_nvenc *nvenc, int ver,
        return 0;
 }
 
-static const struct nvkm_nvenc_fwif
+const struct nvkm_nvenc_fwif
 gm107_nvenc_fwif[] = {
        { -1, gm107_nvenc_nofw, &gm107_nvenc },
        {}
index 4130a2bfbb4f79b060eae00eb552f26a26a058d9..7917affc6505a8ecf965d475dc8f5fecedb45bbe 100644 (file)
@@ -14,6 +14,11 @@ struct nvkm_nvenc_fwif {
        const struct nvkm_nvenc_func *func;
 };
 
+extern const struct nvkm_nvenc_fwif gm107_nvenc_fwif[];
+
 int nvkm_nvenc_new_(const struct nvkm_nvenc_fwif *, struct nvkm_device *, enum nvkm_subdev_type,
                    int, struct nvkm_nvenc **pnvenc);
+
+int r535_nvenc_new(const struct nvkm_engine_func *, struct nvkm_device *,
+                  enum nvkm_subdev_type, int, struct nvkm_nvenc **);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/r535.c
new file mode 100644 (file)
index 0000000..c8a2a91
--- /dev/null
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/object.h>
+#include <subdev/gsp.h>
+#include <engine/fifo.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+
+struct r535_nvenc_obj {
+       struct nvkm_object object;
+       struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_nvenc_obj_dtor(struct nvkm_object *object)
+{
+       struct r535_nvenc_obj *obj = container_of(object, typeof(*obj), object);
+
+       nvkm_gsp_rm_free(&obj->rm);
+       return obj;
+}
+
+static const struct nvkm_object_func
+r535_nvenc_obj = {
+       .dtor = r535_nvenc_obj_dtor,
+};
+
+static int
+r535_nvenc_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+                struct nvkm_object **pobject)
+{
+       struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+       struct r535_nvenc_obj *obj;
+       NV_MSENC_ALLOCATION_PARAMETERS *args;
+
+       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&r535_nvenc_obj, oclass, &obj->object);
+       *pobject = &obj->object;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
+                                    sizeof(*args), &obj->rm);
+       if (WARN_ON(IS_ERR(args)))
+               return PTR_ERR(args);
+
+       args->size = sizeof(*args);
+       args->engineInstance = oclass->engine->subdev.inst;
+
+       return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
+}
+
+static void *
+r535_nvenc_dtor(struct nvkm_engine *engine)
+{
+       struct nvkm_nvenc *nvenc = nvkm_nvenc(engine);
+
+       kfree(nvenc->engine.func);
+       return nvenc;
+}
+
+int
+r535_nvenc_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
+              enum nvkm_subdev_type type, int inst, struct nvkm_nvenc **pnvenc)
+{
+       struct nvkm_engine_func *rm;
+       int nclass;
+
+       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_nvenc_dtor;
+       for (int i = 0; i < nclass; i++) {
+               rm->sclass[i].minver = hw->sclass[i].minver;
+               rm->sclass[i].maxver = hw->sclass[i].maxver;
+               rm->sclass[i].oclass = hw->sclass[i].oclass;
+               rm->sclass[i].ctor = r535_nvenc_obj_ctor;
+       }
+
+       if (!(*pnvenc = kzalloc(sizeof(**pnvenc), GFP_KERNEL))) {
+               kfree(rm);
+               return -ENOMEM;
+       }
+
+       return nvkm_engine_ctor(rm, device, type, inst, true, &(*pnvenc)->engine);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvenc/tu102.c
new file mode 100644 (file)
index 0000000..9338644
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+tu102_nvenc = {
+       .sclass = {
+               { -1, -1, NVC4B7_VIDEO_ENCODER },
+               {}
+       }
+};
+
+int
+tu102_nvenc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+               struct nvkm_nvenc **pnvenc)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_nvenc_new(&tu102_nvenc, device, type, inst, pnvenc);
+
+       return nvkm_nvenc_new_(gm107_nvenc_fwif, device, type, inst, pnvenc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/Kbuild
new file mode 100644 (file)
index 0000000..1408f66
--- /dev/null
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/engine/nvjpg/ga100.o
+nvkm-y += nvkm/engine/nvjpg/ad102.o
+
+nvkm-y += nvkm/engine/nvjpg/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ad102.c
new file mode 100644 (file)
index 0000000..62705dc
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ad102_nvjpg = {
+       .sclass = {
+               { -1, -1, NVC9D1_VIDEO_NVJPG },
+               {}
+       }
+};
+
+int
+ad102_nvjpg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+               struct nvkm_engine **pengine)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_nvjpg_new(&ad102_nvjpg, device, type, inst, pengine);
+
+       return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/ga100.c
new file mode 100644 (file)
index 0000000..f550eb0
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ga100_nvjpg = {
+       .sclass = {
+               { -1, -1, NVC4D1_VIDEO_NVJPG },
+               {}
+       }
+};
+
+int
+ga100_nvjpg_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+               struct nvkm_engine **pengine)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_nvjpg_new(&ga100_nvjpg, device, type, inst, pengine);
+
+       return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/priv.h
new file mode 100644 (file)
index 0000000..1e80cf7
--- /dev/null
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_NVJPG_PRIV_H__
+#define __NVKM_NVJPG_PRIV_H__
+#include <engine/nvjpg.h>
+
+int r535_nvjpg_new(const struct nvkm_engine_func *, struct nvkm_device *,
+                  enum nvkm_subdev_type, int, struct nvkm_engine **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvjpg/r535.c
new file mode 100644 (file)
index 0000000..1babddc
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/object.h>
+#include <subdev/gsp.h>
+#include <engine/fifo.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+
+struct r535_nvjpg_obj {
+       struct nvkm_object object;
+       struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_nvjpg_obj_dtor(struct nvkm_object *object)
+{
+       struct r535_nvjpg_obj *obj = container_of(object, typeof(*obj), object);
+
+       nvkm_gsp_rm_free(&obj->rm);
+       return obj;
+}
+
+static const struct nvkm_object_func
+r535_nvjpg_obj = {
+       .dtor = r535_nvjpg_obj_dtor,
+};
+
+static int
+r535_nvjpg_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+                   struct nvkm_object **pobject)
+{
+       struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+       struct r535_nvjpg_obj *obj;
+       NV_NVJPG_ALLOCATION_PARAMETERS *args;
+
+       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&r535_nvjpg_obj, oclass, &obj->object);
+       *pobject = &obj->object;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
+                                    sizeof(*args), &obj->rm);
+       if (WARN_ON(IS_ERR(args)))
+               return PTR_ERR(args);
+
+       args->size = sizeof(*args);
+       args->engineInstance = oclass->engine->subdev.inst;
+
+       return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
+}
+
+static void *
+r535_nvjpg_dtor(struct nvkm_engine *engine)
+{
+       kfree(engine->func);
+       return engine;
+}
+
+int
+r535_nvjpg_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
+              enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
+{
+       struct nvkm_engine_func *rm;
+       int nclass, ret;
+
+       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_nvjpg_dtor;
+       for (int i = 0; i < nclass; i++) {
+               rm->sclass[i].minver = hw->sclass[i].minver;
+               rm->sclass[i].maxver = hw->sclass[i].maxver;
+               rm->sclass[i].oclass = hw->sclass[i].oclass;
+               rm->sclass[i].ctor = r535_nvjpg_obj_ctor;
+       }
+
+       ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
+       if (ret)
+               kfree(rm);
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/Kbuild
new file mode 100644 (file)
index 0000000..99f1713
--- /dev/null
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: MIT
+nvkm-y += nvkm/engine/ofa/ga100.o
+nvkm-y += nvkm/engine/ofa/ga102.o
+nvkm-y += nvkm/engine/ofa/ad102.o
+
+nvkm-y += nvkm/engine/ofa/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ad102.c
new file mode 100644 (file)
index 0000000..7ac87ef
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ad102_ofa = {
+       .sclass = {
+               { -1, -1, NVC9FA_VIDEO_OFA },
+               {}
+       }
+};
+
+int
+ad102_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+             struct nvkm_engine **pengine)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_ofa_new(&ad102_ofa, device, type, inst, pengine);
+
+       return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga100.c
new file mode 100644 (file)
index 0000000..ef474f6
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ga100_ofa = {
+       .sclass = {
+               { -1, -1, NVC6FA_VIDEO_OFA },
+               {}
+       }
+};
+
+int
+ga100_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+             struct nvkm_engine **pengine)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_ofa_new(&ga100_ofa, device, type, inst, pengine);
+
+       return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/ga102.c
new file mode 100644 (file)
index 0000000..bea2555
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvif/class.h>
+
+static const struct nvkm_engine_func
+ga102_ofa = {
+       .sclass = {
+               { -1, -1, NVC7FA_VIDEO_OFA },
+               {}
+       }
+};
+
+int
+ga102_ofa_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+             struct nvkm_engine **pengine)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_ofa_new(&ga102_ofa, device, type, inst, pengine);
+
+       return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/priv.h
new file mode 100644 (file)
index 0000000..caf29e6
--- /dev/null
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: MIT */
+#ifndef __NVKM_OFA_PRIV_H__
+#define __NVKM_OFA_PRIV_H__
+#include <engine/ofa.h>
+
+int r535_ofa_new(const struct nvkm_engine_func *, struct nvkm_device *,
+                enum nvkm_subdev_type, int, struct nvkm_engine **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/ofa/r535.c
new file mode 100644 (file)
index 0000000..438dc69
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/object.h>
+#include <subdev/gsp.h>
+#include <subdev/mmu.h>
+#include <engine/fifo.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+
+struct r535_ofa_obj {
+       struct nvkm_object object;
+       struct nvkm_gsp_object rm;
+};
+
+static void *
+r535_ofa_obj_dtor(struct nvkm_object *object)
+{
+       struct r535_ofa_obj *obj = container_of(object, typeof(*obj), object);
+
+       nvkm_gsp_rm_free(&obj->rm);
+       return obj;
+}
+
+static const struct nvkm_object_func
+r535_ofa_obj = {
+       .dtor = r535_ofa_obj_dtor,
+};
+
+static int
+r535_ofa_obj_ctor(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+                struct nvkm_object **pobject)
+{
+       struct nvkm_chan *chan = nvkm_uchan_chan(oclass->parent);
+       struct r535_ofa_obj *obj;
+       NV_OFA_ALLOCATION_PARAMETERS *args;
+
+       if (!(obj = kzalloc(sizeof(*obj), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_object_ctor(&r535_ofa_obj, oclass, &obj->object);
+       *pobject = &obj->object;
+
+       args = nvkm_gsp_rm_alloc_get(&chan->rm.object, oclass->handle, oclass->base.oclass,
+                                    sizeof(*args), &obj->rm);
+       if (WARN_ON(IS_ERR(args)))
+               return PTR_ERR(args);
+
+       args->size = sizeof(*args);
+
+       return nvkm_gsp_rm_alloc_wr(&obj->rm, args);
+}
+
+static void *
+r535_ofa_dtor(struct nvkm_engine *engine)
+{
+       kfree(engine->func);
+       return engine;
+}
+
+int
+r535_ofa_new(const struct nvkm_engine_func *hw, struct nvkm_device *device,
+            enum nvkm_subdev_type type, int inst, struct nvkm_engine **pengine)
+{
+       struct nvkm_engine_func *rm;
+       int nclass, ret;
+
+       for (nclass = 0; hw->sclass[nclass].oclass; nclass++);
+
+       if (!(rm = kzalloc(sizeof(*rm) + (nclass + 1) * sizeof(rm->sclass[0]), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_ofa_dtor;
+       for (int i = 0; i < nclass; i++) {
+               rm->sclass[i].minver = hw->sclass[i].minver;
+               rm->sclass[i].maxver = hw->sclass[i].maxver;
+               rm->sclass[i].oclass = hw->sclass[i].oclass;
+               rm->sclass[i].ctor = r535_ofa_obj_ctor;
+       }
+
+       ret = nvkm_engine_new_(rm, device, type, inst, true, pengine);
+       if (ret)
+               kfree(rm);
+
+       return ret;
+}
index 19feadb1f67b4a3acdd333193178d9f7ba43d7a4..b43b7e5e2733e4a5a4997b3e5e832f8dbc197345 100644 (file)
@@ -4,3 +4,5 @@ nvkm-y += nvkm/engine/sec2/gp102.o
 nvkm-y += nvkm/engine/sec2/gp108.o
 nvkm-y += nvkm/engine/sec2/tu102.o
 nvkm-y += nvkm/engine/sec2/ga102.o
+
+nvkm-y += nvkm/engine/sec2/r535.o
index 945abb8156d72657ad48807c93d64b3e6b1c855d..54be7596b046f925d1a7427c5eff4e729efbba24 100644 (file)
@@ -21,6 +21,7 @@
  */
 #include "priv.h"
 #include <subdev/acr.h>
+#include <subdev/gsp.h>
 #include <subdev/vfn.h>
 
 #include <nvfw/flcn.h>
@@ -193,5 +194,10 @@ ga102_sec2_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
        /* TOP info wasn't updated on Turing to reflect the PRI
         * address change for some reason.  We override it here.
         */
-       return nvkm_sec2_new_(ga102_sec2_fwif, device, type, inst, 0x840000, psec2);
+       const u32 addr = 0x840000;
+
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_sec2_new(&ga102_sec2, device, type, inst, addr, psec2);
+
+       return nvkm_sec2_new_(ga102_sec2_fwif, device, type, inst, addr, psec2);
 }
index 172d2705c199234484939762ce0e80752bf527d8..e158a40a4f09a6510c1dd49e2376d3e2d8b5af11 100644 (file)
@@ -4,6 +4,9 @@
 #include <engine/sec2.h>
 struct nvkm_acr_lsfw;
 
+int r535_sec2_new(const struct nvkm_sec2_func *,
+                 struct nvkm_device *, enum nvkm_subdev_type, int, u32 addr, struct nvkm_sec2 **);
+
 struct nvkm_sec2_func {
        const struct nvkm_falcon_func *flcn;
        u8 unit_unload;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/r535.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/r535.c
new file mode 100644 (file)
index 0000000..83a6bad
--- /dev/null
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static void *
+r535_sec2_dtor(struct nvkm_engine *engine)
+{
+       struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
+
+       nvkm_falcon_dtor(&sec2->falcon);
+       return sec2;
+}
+
+static const struct nvkm_engine_func
+r535_sec2 = {
+       .dtor = r535_sec2_dtor,
+};
+
+int
+r535_sec2_new(const struct nvkm_sec2_func *func, struct nvkm_device *device,
+             enum nvkm_subdev_type type, int inst, u32 addr, struct nvkm_sec2 **psec2)
+{
+       struct nvkm_sec2 *sec2;
+       int ret;
+
+       if (!(sec2 = *psec2 = kzalloc(sizeof(*sec2), GFP_KERNEL)))
+               return -ENOMEM;
+
+       ret = nvkm_engine_ctor(&r535_sec2, device, type, inst, true, &sec2->engine);
+       if (ret)
+               return ret;
+
+       return nvkm_falcon_ctor(func->flcn, &sec2->engine.subdev, sec2->engine.subdev.name,
+                               addr, &sec2->falcon);
+}
index 0afc4b2fa5294f917d12e5248a22dd7838f058ec..20452046d7d1ffc3f50a6eff70ccf91b4c1bfaa5 100644 (file)
@@ -21,6 +21,7 @@
  */
 #include "priv.h"
 #include <subdev/acr.h>
+#include <subdev/gsp.h>
 
 #include <nvfw/sec2.h>
 
@@ -82,5 +83,10 @@ tu102_sec2_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
        /* TOP info wasn't updated on Turing to reflect the PRI
         * address change for some reason.  We override it here.
         */
-       return nvkm_sec2_new_(tu102_sec2_fwif, device, type, inst, 0x840000, psec2);
+       const u32 addr = 0x840000;
+
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_sec2_new(&tu102_sec2, device, type, inst, addr, psec2);
+
+       return nvkm_sec2_new_(tu102_sec2_fwif, device, type, inst, addr, psec2);
 }
index 9ffe7b921ccb528d712efc94076d8655dd57fbcd..d6b0155644016392eefbc882a89a8ea70d8fd8cc 100644 (file)
@@ -8,5 +8,6 @@ nvkm-y += nvkm/falcon/v1.o
 
 nvkm-y += nvkm/falcon/gm200.o
 nvkm-y += nvkm/falcon/gp102.o
+nvkm-y += nvkm/falcon/tu102.o
 nvkm-y += nvkm/falcon/ga100.o
 nvkm-y += nvkm/falcon/ga102.o
index 235149f73a690d8dfef496c84e20774581e7e79e..3b790865aece9bc4d0afebb5ad61058971dc5b76 100644 (file)
 #include <subdev/timer.h>
 #include <subdev/top.h>
 
+void
+nvkm_falcon_intr_retrigger(struct nvkm_falcon *falcon)
+{
+       if (falcon->func->intr_retrigger)
+               falcon->func->intr_retrigger(falcon);
+}
+
+bool
+nvkm_falcon_riscv_active(struct nvkm_falcon *falcon)
+{
+       if (!falcon->func->riscv_active)
+               return false;
+
+       return falcon->func->riscv_active(falcon);
+}
+
 static const struct nvkm_falcon_func_dma *
 nvkm_falcon_dma(struct nvkm_falcon *falcon, enum nvkm_falcon_mem *mem_type, u32 *mem_base)
 {
index 49fd32943916f31e00e8760652220144914bf7ec..5db94fb10afcc1cef590eca539f1cc6e7243b20d 100644 (file)
  */
 #include "priv.h"
 
+void
+ga100_flcn_intr_retrigger(struct nvkm_falcon *falcon)
+{
+       nvkm_falcon_wr32(falcon, 0x3e8, 0x00000001);
+}
+
 int
 ga100_flcn_fw_signature(struct nvkm_falcon_fw *fw, u32 *src_base_src)
 {
index 0ff450fe359081e25742ad8af67c2236961a14da..834afa45f2fdb821ee38244ad0a03a73b2baf37e 100644 (file)
 #include <subdev/mc.h>
 #include <subdev/timer.h>
 
+bool
+ga102_flcn_riscv_active(struct nvkm_falcon *falcon)
+{
+       return (nvkm_falcon_rd32(falcon, falcon->addr2 + 0x388) & 0x00000080) != 0;
+}
+
 static bool
 ga102_flcn_dma_done(struct nvkm_falcon *falcon)
 {
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/tu102.c b/drivers/gpu/drm/nouveau/nvkm/falcon/tu102.c
new file mode 100644 (file)
index 0000000..3999182
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+bool
+tu102_flcn_riscv_active(struct nvkm_falcon *falcon)
+{
+       return (nvkm_falcon_rd32(falcon, falcon->addr2 + 0x240) & 0x00000001) != 0;
+}
index 45dcf493e972978c7368913ce26add89c5471cca..c7d38609bb7eb55d92287e571581ddb4fa9bd9ac 100644 (file)
@@ -20,6 +20,7 @@
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 #include "priv.h"
+#include <subdev/gsp.h>
 
 #include <nvfw/acr.h>
 
@@ -322,5 +323,8 @@ int
 ga102_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
              struct nvkm_acr **pacr)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_acr_new_(ga102_acr_fwif, device, type, inst, pacr);
 }
index c22d551c0078104d8374f162faa0c1f900c7b55b..565e9a070b23f38c884b677edc0c2e95e6f1c473 100644 (file)
@@ -201,5 +201,8 @@ int
 tu102_acr_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
              struct nvkm_acr **pacr)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_acr_new_(tu102_acr_fwif, device, type, inst, pacr);
 }
index 8faee3317a74fbb13fc411c8266275d495ccc49a..9754c6872543cdd2c0e02509c7a17be02992ff42 100644 (file)
@@ -7,3 +7,5 @@ nvkm-y += nvkm/subdev/bar/gk20a.o
 nvkm-y += nvkm/subdev/bar/gm107.o
 nvkm-y += nvkm/subdev/bar/gm20b.o
 nvkm-y += nvkm/subdev/bar/tu102.o
+
+nvkm-y += nvkm/subdev/bar/r535.o
index d017a1b5e5dd55f23ce0ef7d752727b07ac86ab1..91bc53be97ffc228c243e675c039b8c53bdb8aab 100644 (file)
@@ -93,8 +93,16 @@ static int
 nvkm_bar_fini(struct nvkm_subdev *subdev, bool suspend)
 {
        struct nvkm_bar *bar = nvkm_bar(subdev);
+
+       if (!subdev->use.enabled)
+               return 0;
+
        if (bar->func->bar1.fini)
                bar->func->bar1.fini(bar);
+
+       if (!suspend) /* Handled by instmem. */
+               nvkm_bar_bar2_fini(subdev->device);
+
        return 0;
 }
 
@@ -120,7 +128,7 @@ static void *
 nvkm_bar_dtor(struct nvkm_subdev *subdev)
 {
        struct nvkm_bar *bar = nvkm_bar(subdev);
-       nvkm_bar_bar2_fini(subdev->device);
+
        return bar->func->dtor(bar);
 }
 
index daebfc991c76e6b8e3f1a004a7e364807b2dfac5..d0168e0b78fb745b3d7ff66e14c1b63f2c1d9ec0 100644 (file)
@@ -4,6 +4,9 @@
 #define nvkm_bar(p) container_of((p), struct nvkm_bar, subdev)
 #include <subdev/bar.h>
 
+int r535_bar_new_(const struct nvkm_bar_func *,
+                 struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_bar **);
+
 void nvkm_bar_ctor(const struct nvkm_bar_func *, struct nvkm_device *,
                   enum nvkm_subdev_type, int, struct nvkm_bar *);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c
new file mode 100644 (file)
index 0000000..4135690
--- /dev/null
@@ -0,0 +1,186 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+#include <core/mm.h>
+#include <subdev/fb.h>
+#include <subdev/gsp.h>
+#include <subdev/instmem.h>
+#include <subdev/mmu/vmm.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_headers.h>
+
+static void
+r535_bar_flush(struct nvkm_bar *bar)
+{
+       ioread32_native(bar->flushBAR2);
+}
+
+static void
+r535_bar_bar2_wait(struct nvkm_bar *base)
+{
+}
+
+static int
+r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr)
+{
+       rpc_update_bar_pde_v15_00 *rpc;
+
+       rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UPDATE_BAR_PDE, sizeof(*rpc));
+       if (WARN_ON(IS_ERR_OR_NULL(rpc)))
+               return -EIO;
+
+       rpc->info.barType = NV_RPC_UPDATE_PDE_BAR_2;
+       rpc->info.entryValue = addr ? ((addr >> 4) | 2) : 0; /* PD3 entry format! */
+       rpc->info.entryLevelShift = 47; //XXX: probably fetch this from mmu!
+
+       return nvkm_gsp_rpc_wr(gsp, rpc, true);
+}
+
+static void
+r535_bar_bar2_fini(struct nvkm_bar *bar)
+{
+       struct nvkm_gsp *gsp = bar->subdev.device->gsp;
+
+       bar->flushBAR2 = bar->flushBAR2PhysMode;
+       nvkm_done(bar->flushFBZero);
+
+       WARN_ON(r535_bar_bar2_update_pde(gsp, 0));
+}
+
+static void
+r535_bar_bar2_init(struct nvkm_bar *bar)
+{
+       struct nvkm_device *device = bar->subdev.device;
+       struct nvkm_vmm *vmm = gf100_bar(bar)->bar[0].vmm;
+       struct nvkm_gsp *gsp = device->gsp;
+
+       WARN_ON(r535_bar_bar2_update_pde(gsp, vmm->pd->pde[0]->pt[0]->addr));
+       vmm->rm.bar2_pdb = gsp->bar.rm_bar2_pdb;
+
+       if (!bar->flushFBZero) {
+               struct nvkm_memory *fbZero;
+               int ret;
+
+               ret = nvkm_ram_wrap(device, 0, 0x1000, &fbZero);
+               if (ret == 0) {
+                       ret = nvkm_memory_kmap(fbZero, &bar->flushFBZero);
+                       nvkm_memory_unref(&fbZero);
+               }
+               WARN_ON(ret);
+       }
+
+       bar->bar2 = true;
+       bar->flushBAR2 = nvkm_kmap(bar->flushFBZero);
+       WARN_ON(!bar->flushBAR2);
+}
+
+static void
+r535_bar_bar1_wait(struct nvkm_bar *base)
+{
+}
+
+static void
+r535_bar_bar1_fini(struct nvkm_bar *base)
+{
+}
+
+static void
+r535_bar_bar1_init(struct nvkm_bar *bar)
+{
+       struct nvkm_device *device = bar->subdev.device;
+       struct nvkm_gsp *gsp = device->gsp;
+       struct nvkm_vmm *vmm = gf100_bar(bar)->bar[1].vmm;
+       struct nvkm_memory *pd3;
+       int ret;
+
+       ret = nvkm_ram_wrap(device, gsp->bar.rm_bar1_pdb, 0x1000, &pd3);
+       if (WARN_ON(ret))
+               return;
+
+       nvkm_memory_unref(&vmm->pd->pt[0]->memory);
+
+       ret = nvkm_memory_kmap(pd3, &vmm->pd->pt[0]->memory);
+       nvkm_memory_unref(&pd3);
+       if (WARN_ON(ret))
+               return;
+
+       vmm->pd->pt[0]->addr = nvkm_memory_addr(vmm->pd->pt[0]->memory);
+}
+
+static void *
+r535_bar_dtor(struct nvkm_bar *bar)
+{
+       void *data = gf100_bar_dtor(bar);
+
+       nvkm_memory_unref(&bar->flushFBZero);
+
+       if (bar->flushBAR2PhysMode)
+               iounmap(bar->flushBAR2PhysMode);
+
+       kfree(bar->func);
+       return data;
+}
+
+int
+r535_bar_new_(const struct nvkm_bar_func *hw, struct nvkm_device *device,
+             enum nvkm_subdev_type type, int inst, struct nvkm_bar **pbar)
+{
+       struct nvkm_bar_func *rm;
+       struct nvkm_bar *bar;
+       int ret;
+
+       if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_bar_dtor;
+       rm->oneinit = hw->oneinit;
+       rm->bar1.init = r535_bar_bar1_init;
+       rm->bar1.fini = r535_bar_bar1_fini;
+       rm->bar1.wait = r535_bar_bar1_wait;
+       rm->bar1.vmm = hw->bar1.vmm;
+       rm->bar2.init = r535_bar_bar2_init;
+       rm->bar2.fini = r535_bar_bar2_fini;
+       rm->bar2.wait = r535_bar_bar2_wait;
+       rm->bar2.vmm = hw->bar2.vmm;
+       rm->flush = r535_bar_flush;
+
+       ret = gf100_bar_new_(rm, device, type, inst, &bar);
+       *pbar = bar;
+       if (ret) {
+               if (!bar)
+                       kfree(rm);
+               return ret;
+       }
+
+       bar->flushBAR2PhysMode = ioremap(device->func->resource_addr(device, 3), PAGE_SIZE);
+       if (!bar->flushBAR2PhysMode)
+               return -ENOMEM;
+
+       bar->flushBAR2 = bar->flushBAR2PhysMode;
+
+       gf100_bar(*pbar)->bar2_halve = true;
+       return 0;
+}
index c25ab407b85d9182ff26ba361ac23dadde8b87a0..b4196edad5b8d029d4b08ada0eaf9fa0855cacc1 100644 (file)
@@ -22,6 +22,7 @@
 #include "gf100.h"
 
 #include <core/memory.h>
+#include <subdev/gsp.h>
 #include <subdev/timer.h>
 
 static void
@@ -95,5 +96,8 @@ int
 tu102_bar_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
              struct nvkm_bar **pbar)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_bar_new_(&tu102_bar, device, type, inst, pbar);
+
        return gf100_bar_new_(&tu102_bar, device, type, inst, pbar);
 }
index 6c318e41bde042609a4de03e3b72152eb3644c22..91f486ee4c42d771f41da1a91a686d658e978a78 100644 (file)
@@ -46,6 +46,14 @@ nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size)
        return true;
 }
 
+void *
+nvbios_pointer(struct nvkm_bios *bios, u32 addr)
+{
+       if (likely(nvbios_addr(bios, &addr, 0)))
+               return &bios->data[addr];
+       return NULL;
+}
+
 u8
 nvbios_rd08(struct nvkm_bios *bios, u32 addr)
 {
index 80b5aaceeaad17f6dde872cdac8c641dec6ce182..8e1e0b057a0b42c61bfe40ffbf0b09fb72bd753e 100644 (file)
@@ -24,6 +24,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 static void
 gf100_bus_intr(struct nvkm_bus *bus)
 {
@@ -72,5 +74,8 @@ int
 gf100_bus_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
              struct nvkm_bus **pbus)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_bus_new_(&gf100_bus, device, type, inst, pbus);
 }
index d1abb64841dac7ad969e4ab1ff688c03c5060095..5f97bffca979aeec56009e7ec0b0ccf77bdad1ff 100644 (file)
@@ -16,3 +16,5 @@ nvkm-y += nvkm/subdev/devinit/gm200.o
 nvkm-y += nvkm/subdev/devinit/gv100.o
 nvkm-y += nvkm/subdev/devinit/tu102.o
 nvkm-y += nvkm/subdev/devinit/ga100.o
+
+nvkm-y += nvkm/subdev/devinit/r535.o
index 6b280b05c4ca07c9bedf81b76c26827bbf2f937f..5f0b12a1fc38769dc138923db28c779cd37e8151 100644 (file)
@@ -24,6 +24,7 @@
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 #include <subdev/clk/pll.h>
+#include <subdev/gsp.h>
 
 static int
 ga100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
@@ -62,8 +63,19 @@ ga100_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
        return ret;
 }
 
+static void
+ga100_devinit_disable(struct nvkm_devinit *init)
+{
+       struct nvkm_device *device = init->subdev.device;
+       u32 r820c04 = nvkm_rd32(device, 0x820c04);
+
+       if (r820c04 & 0x00000001)
+               nvkm_subdev_disable(device, NVKM_ENGINE_DISP, 0);
+}
+
 static const struct nvkm_devinit_func
 ga100_devinit = {
+       .disable = ga100_devinit_disable,
        .init = nv50_devinit_init,
        .post = tu102_devinit_post,
        .pll_set = ga100_devinit_pll_set,
@@ -73,5 +85,8 @@ int
 ga100_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
                  struct nvkm_devinit **pinit)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_devinit_new(&ga100_devinit, device, type, inst, pinit);
+
        return nv50_devinit_new_(&ga100_devinit, device, type, inst, pinit);
 }
index a648482d06e912e0543ad55a1899d6bb2c8278d1..06bbfdcc788cf5f5ca041ec9c849c9439f1c4ca9 100644 (file)
@@ -4,6 +4,9 @@
 #define nvkm_devinit(p) container_of((p), struct nvkm_devinit, subdev)
 #include <subdev/devinit.h>
 
+int r535_devinit_new(const struct nvkm_devinit_func *,
+                    struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_devinit **);
+
 struct nvkm_devinit_func {
        void *(*dtor)(struct nvkm_devinit *);
        void (*preinit)(struct nvkm_devinit *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
new file mode 100644 (file)
index 0000000..666eb93
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "nv50.h"
+
+static void *
+r535_devinit_dtor(struct nvkm_devinit *devinit)
+{
+       kfree(devinit->func);
+       return devinit;
+}
+
+int
+r535_devinit_new(const struct nvkm_devinit_func *hw,
+                struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+                struct nvkm_devinit **pdevinit)
+{
+       struct nvkm_devinit_func *rm;
+       int ret;
+
+       if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_devinit_dtor;
+       rm->post = hw->post;
+       rm->disable = hw->disable;
+
+       ret = nv50_devinit_new_(rm, device, type, inst, pdevinit);
+       if (ret)
+               kfree(rm);
+
+       return ret;
+}
index 40997ad1d101c59ac9fbd0d460a163534099ed38..f406b1525a4affa66f5dde71cba8604e3e8f4d3d 100644 (file)
@@ -24,6 +24,7 @@
 #include <subdev/bios.h>
 #include <subdev/bios/pll.h>
 #include <subdev/clk/pll.h>
+#include <subdev/gsp.h>
 
 static int
 tu102_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq)
@@ -100,5 +101,8 @@ int
 tu102_devinit_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
                  struct nvkm_devinit **pinit)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_devinit_new(&tu102_devinit, device, type, inst, pinit);
+
        return nv50_devinit_new_(&tu102_devinit, device, type, inst, pinit);
 }
index 967efaddae281261656577d8b188e96d8319a435..5390417a58b5d526e5874ac9c6007790170c3e44 100644 (file)
@@ -22,6 +22,7 @@
 #include "priv.h"
 
 #include <core/memory.h>
+#include <subdev/gsp.h>
 #include <subdev/mc.h>
 #include <subdev/mmu.h>
 #include <subdev/vfn.h>
@@ -175,7 +176,12 @@ int
 tu102_fault_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
                struct nvkm_fault **pfault)
 {
-       int ret = nvkm_fault_new_(&tu102_fault, device, type, inst, pfault);
+       int ret;
+
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
+       ret = nvkm_fault_new_(&tu102_fault, device, type, inst, pfault);
        if (ret)
                return ret;
 
index 394c305e759ad8f008e2e0fa784a03f9aa9851c6..d1611ad3bf81c5155ac60046e989ade88e5f954f 100644 (file)
@@ -36,6 +36,8 @@ nvkm-y += nvkm/subdev/fb/tu102.o
 nvkm-y += nvkm/subdev/fb/ga100.o
 nvkm-y += nvkm/subdev/fb/ga102.o
 
+nvkm-y += nvkm/subdev/fb/r535.o
+
 nvkm-y += nvkm/subdev/fb/ram.o
 nvkm-y += nvkm/subdev/fb/ramnv04.o
 nvkm-y += nvkm/subdev/fb/ramnv10.o
index 12037fd4fdf27fd708c345700b7a297448fae4e9..e9e7c1d5c4c427f404037cea934904c46e19522e 100644 (file)
@@ -22,6 +22,8 @@
 #include "gf100.h"
 #include "ram.h"
 
+#include <subdev/gsp.h>
+
 static const struct nvkm_fb_func
 ga100_fb = {
        .dtor = gf100_fb_dtor,
@@ -38,5 +40,8 @@ ga100_fb = {
 int
 ga100_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_fb_new(&ga100_fb, device, type, inst, pfb);
+
        return gf100_fb_new_(&ga100_fb, device, type, inst, pfb);
 }
index 76f6877b54c6f239cca61ebaef114d1b882c7e64..25f82b372bcab23e5528b0c34330adec0baa5e45 100644 (file)
@@ -22,6 +22,7 @@
 #include "gf100.h"
 #include "ram.h"
 
+#include <subdev/gsp.h>
 #include <engine/nvdec.h>
 
 static u64
@@ -59,6 +60,9 @@ ga102_fb = {
 int
 ga102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_fb_new(&ga102_fb, device, type, inst, pfb);
+
        return gf100_fb_new_(&ga102_fb, device, type, inst, pfb);
 }
 
index 77d6a8c108298c78f4c6082ca4c2a2c65fa247b4..35c55dfba23d94725959324d30c93b960f36909a 100644 (file)
@@ -6,6 +6,9 @@
 #include <subdev/therm.h>
 struct nvkm_bios;
 
+int r535_fb_new(const struct nvkm_fb_func *,
+               struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_fb **);
+
 struct nvkm_fb_func {
        void *(*dtor)(struct nvkm_fb *);
        u32 (*tags)(struct nvkm_fb *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/r535.c
new file mode 100644 (file)
index 0000000..d325150
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+#include "ram.h"
+
+#include <subdev/gsp.h>
+
+static const struct nvkm_ram_func
+r535_fb_ram = {
+};
+
+static int
+r535_fb_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+       struct nvkm_gsp *gsp = fb->subdev.device->gsp;
+       struct nvkm_ram *ram;
+       int ret;
+
+       if (!(ram = *pram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+               return -ENOMEM;
+
+       ram->func = &r535_fb_ram;
+       ram->fb = fb;
+       ram->type = NVKM_RAM_TYPE_UNKNOWN; /*TODO: pull this from GSP. */
+       ram->size = gsp->fb.size;
+       ram->stolen = false;
+       mutex_init(&ram->mutex);
+
+       for (int i = 0; i < gsp->fb.region_nr; i++) {
+               ret = nvkm_mm_init(&ram->vram, NVKM_RAM_MM_NORMAL,
+                                  gsp->fb.region[i].addr >> NVKM_RAM_MM_SHIFT,
+                                  gsp->fb.region[i].size >> NVKM_RAM_MM_SHIFT,
+                                  1);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static void *
+r535_fb_dtor(struct nvkm_fb *fb)
+{
+       kfree(fb->func);
+       return fb;
+}
+
+int
+r535_fb_new(const struct nvkm_fb_func *hw,
+           struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
+{
+       struct nvkm_fb_func *rm;
+       int ret;
+
+       if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_fb_dtor;
+       rm->sysmem.flush_page_init = hw->sysmem.flush_page_init;
+       rm->vidmem.size = hw->vidmem.size;
+       rm->ram_new = r535_fb_ram_new;
+
+       ret = nvkm_fb_new_(rm, device, type, inst, pfb);
+       if (ret)
+               kfree(rm);
+
+       return ret;
+}
index 5c34416cb637951a4bc420d75f145813172028d6..c826980bf70ee1c67ef0e58905e3bba55860d0dc 100644 (file)
@@ -88,12 +88,20 @@ nvkm_vram_dtor(struct nvkm_memory *memory)
        struct nvkm_vram *vram = nvkm_vram(memory);
        struct nvkm_mm_node *next = vram->mn;
        struct nvkm_mm_node *node;
-       mutex_lock(&vram->ram->mutex);
-       while ((node = next)) {
-               next = node->next;
-               nvkm_mm_free(&vram->ram->vram, &node);
+
+       if (next) {
+               if (likely(next->nl_entry.next)){
+                       mutex_lock(&vram->ram->mutex);
+                       while ((node = next)) {
+                               next = node->next;
+                               nvkm_mm_free(&vram->ram->vram, &node);
+                       }
+                       mutex_unlock(&vram->ram->mutex);
+               } else {
+                       kfree(vram->mn);
+               }
        }
-       mutex_unlock(&vram->ram->mutex);
+
        return vram;
 }
 
@@ -108,6 +116,34 @@ nvkm_vram = {
        .kmap = nvkm_vram_kmap,
 };
 
+int
+nvkm_ram_wrap(struct nvkm_device *device, u64 addr, u64 size,
+             struct nvkm_memory **pmemory)
+{
+       struct nvkm_ram *ram;
+       struct nvkm_vram *vram;
+
+       if (!device->fb || !(ram = device->fb->ram))
+               return -ENODEV;
+       ram = device->fb->ram;
+
+       if (!(vram = kzalloc(sizeof(*vram), GFP_KERNEL)))
+               return -ENOMEM;
+
+       nvkm_memory_ctor(&nvkm_vram, &vram->memory);
+       vram->ram = ram;
+       vram->page = NVKM_RAM_MM_SHIFT;
+       *pmemory = &vram->memory;
+
+       vram->mn = kzalloc(sizeof(*vram->mn), GFP_KERNEL);
+       if (!vram->mn)
+               return -ENOMEM;
+
+       vram->mn->offset = addr >> NVKM_RAM_MM_SHIFT;
+       vram->mn->length = size >> NVKM_RAM_MM_SHIFT;
+       return 0;
+}
+
 int
 nvkm_ram_get(struct nvkm_device *device, u8 heap, u8 type, u8 rpage, u64 size,
             bool contig, bool back, struct nvkm_memory **pmemory)
index bcc23d4c8115d1d6b44c0dc1776ce5bdaeb3bf6f..f7d2a749ce3fe9dd6174a7b5784bab231aea3830 100644 (file)
@@ -22,6 +22,8 @@
 #include "gf100.h"
 #include "ram.h"
 
+#include <subdev/gsp.h>
+
 bool
 tu102_fb_vpr_scrub_required(struct nvkm_fb *fb)
 {
@@ -46,6 +48,9 @@ tu102_fb = {
 int
 tu102_fb_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_fb **pfb)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_fb_new(&tu102_fb, device, type, inst, pfb);
+
        return gf100_fb_new_(&tu102_fb, device, type, inst, pfb);
 }
 
index 7dc99492f536e7c90f5d16fc3c1bcd3c77ecd6b9..d621edbdff9d9ec90bd8656a58b46c4625a5dddb 100644 (file)
@@ -23,6 +23,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 static u32
 gm107_fuse_read(struct nvkm_fuse *fuse, u32 addr)
 {
@@ -39,5 +41,8 @@ int
 gm107_fuse_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
               struct nvkm_fuse **pfuse)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_fuse_new_(&gm107_fuse, device, type, inst, pfuse);
 }
index 4a96f926b66df2758c40b1ac24317b60422389d8..4dbffae21ddcd1f58f5c28544e9df4c784556f24 100644 (file)
@@ -21,6 +21,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 static void
 ga102_gpio_reset(struct nvkm_gpio *gpio, u8 match)
 {
@@ -115,5 +117,8 @@ int
 ga102_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
               struct nvkm_gpio **pgpio)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_gpio_new_(&ga102_gpio, device, type, inst, pgpio);
 }
index c0e4cdb45520be5b87d3887f2e13b7d47f8fe4cf..5f7063d5579bba2f5451db25f564276c8791d946 100644 (file)
@@ -23,6 +23,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 static void
 gk104_gpio_intr_stat(struct nvkm_gpio *gpio, u32 *hi, u32 *lo)
 {
@@ -71,5 +73,8 @@ int
 gk104_gpio_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
               struct nvkm_gpio **pgpio)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_gpio_new_(&gk104_gpio, device, type, inst, pgpio);
 }
index 7f61a1ed158b98148d77180662854a6ff7dc988d..16bf2f1bb78014235e1349a0d2ad404a8750b94e 100644 (file)
@@ -1,4 +1,12 @@
 # SPDX-License-Identifier: MIT
 nvkm-y += nvkm/subdev/gsp/base.o
+nvkm-y += nvkm/subdev/gsp/fwsec.o
+
 nvkm-y += nvkm/subdev/gsp/gv100.o
+nvkm-y += nvkm/subdev/gsp/tu102.o
+nvkm-y += nvkm/subdev/gsp/tu116.o
+nvkm-y += nvkm/subdev/gsp/ga100.o
 nvkm-y += nvkm/subdev/gsp/ga102.o
+nvkm-y += nvkm/subdev/gsp/ad102.o
+
+nvkm-y += nvkm/subdev/gsp/r535.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ad102.c
new file mode 100644 (file)
index 0000000..c849c62
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static const struct nvkm_gsp_func
+ad102_gsp_r535_113_01 = {
+       .flcn = &ga102_gsp_flcn,
+       .fwsec = &ga102_gsp_fwsec,
+
+       .sig_section = ".fwsignature_ad10x",
+
+       .wpr_heap.os_carveout_size = 20 << 20,
+       .wpr_heap.base_size = 8 << 20,
+       .wpr_heap.min_size = 84 << 20,
+
+       .booter.ctor = ga102_gsp_booter_ctor,
+
+       .dtor = r535_gsp_dtor,
+       .oneinit = tu102_gsp_oneinit,
+       .init = r535_gsp_init,
+       .fini = r535_gsp_fini,
+       .reset = ga102_gsp_reset,
+
+       .rm = &r535_gsp_rm,
+};
+
+static struct nvkm_gsp_fwif
+ad102_gsps[] = {
+       { 0, r535_gsp_load, &ad102_gsp_r535_113_01, "535.113.01", true },
+       {}
+};
+
+int
+ad102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+             struct nvkm_gsp **pgsp)
+{
+       return nvkm_gsp_new_(ad102_gsps, device, type, inst, pgsp);
+}
index 591ac95c26699c2a67e7c8506401c493c23b43e0..04bceaa28a197d93d85db77098e9f8330c63cff0 100644 (file)
  * OTHER DEALINGS IN THE SOFTWARE.
  */
 #include "priv.h"
-#include <core/falcon.h>
-#include <core/firmware.h>
-#include <subdev/acr.h>
-#include <subdev/top.h>
+
+int
+nvkm_gsp_intr_nonstall(struct nvkm_gsp *gsp, enum nvkm_subdev_type type, int inst)
+{
+       for (int i = 0; i < gsp->intr_nr; i++) {
+               if (gsp->intr[i].type == type && gsp->intr[i].inst == inst) {
+                       if (gsp->intr[i].nonstall != ~0)
+                               return gsp->intr[i].nonstall;
+
+                       return -EINVAL;
+               }
+       }
+
+       return -ENOENT;
+}
+
+int
+nvkm_gsp_intr_stall(struct nvkm_gsp *gsp, enum nvkm_subdev_type type, int inst)
+{
+       for (int i = 0; i < gsp->intr_nr; i++) {
+               if (gsp->intr[i].type == type && gsp->intr[i].inst == inst) {
+                       if (gsp->intr[i].stall != ~0)
+                               return gsp->intr[i].stall;
+
+                       return -EINVAL;
+               }
+       }
+
+       return -ENOENT;
+}
+
+static int
+nvkm_gsp_fini(struct nvkm_subdev *subdev, bool suspend)
+{
+       struct nvkm_gsp *gsp = nvkm_gsp(subdev);
+
+       if (!gsp->func->fini)
+               return 0;
+
+       return gsp->func->fini(gsp, suspend);
+}
+
+static int
+nvkm_gsp_init(struct nvkm_subdev *subdev)
+{
+       struct nvkm_gsp *gsp = nvkm_gsp(subdev);
+
+       if (!gsp->func->init)
+               return 0;
+
+       return gsp->func->init(gsp);
+}
+
+static int
+nvkm_gsp_oneinit(struct nvkm_subdev *subdev)
+{
+       struct nvkm_gsp *gsp = nvkm_gsp(subdev);
+
+       if (!gsp->func->oneinit)
+               return 0;
+
+       return gsp->func->oneinit(gsp);
+}
 
 static void *
 nvkm_gsp_dtor(struct nvkm_subdev *subdev)
 {
        struct nvkm_gsp *gsp = nvkm_gsp(subdev);
+
+       if (gsp->func && gsp->func->dtor)
+               gsp->func->dtor(gsp);
+
        nvkm_falcon_dtor(&gsp->falcon);
        return gsp;
 }
@@ -36,6 +99,9 @@ nvkm_gsp_dtor(struct nvkm_subdev *subdev)
 static const struct nvkm_subdev_func
 nvkm_gsp = {
        .dtor = nvkm_gsp_dtor,
+       .oneinit = nvkm_gsp_oneinit,
+       .init = nvkm_gsp_init,
+       .fini = nvkm_gsp_fini,
 };
 
 int
@@ -54,6 +120,8 @@ nvkm_gsp_new_(const struct nvkm_gsp_fwif *fwif, struct nvkm_device *device,
                return PTR_ERR(fwif);
 
        gsp->func = fwif->func;
+       gsp->rm = gsp->func->rm;
 
-       return nvkm_falcon_ctor(gsp->func->flcn, &gsp->subdev, gsp->subdev.name, 0, &gsp->falcon);
+       return nvkm_falcon_ctor(gsp->func->flcn, &gsp->subdev, gsp->subdev.name, 0x110000,
+                               &gsp->falcon);
 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
new file mode 100644 (file)
index 0000000..330d72b
--- /dev/null
@@ -0,0 +1,359 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/bios.h>
+#include <subdev/bios/pmu.h>
+
+#include <nvfw/fw.h>
+
+union nvfw_falcon_appif_hdr {
+       struct nvfw_falcon_appif_hdr_v1 {
+               u8 ver;
+               u8 hdr;
+               u8 len;
+               u8 cnt;
+       } v1;
+};
+
+union nvfw_falcon_appif {
+       struct nvfw_falcon_appif_v1 {
+#define NVFW_FALCON_APPIF_ID_DMEMMAPPER 0x00000004
+               u32 id;
+               u32 dmem_base;
+       } v1;
+};
+
+union nvfw_falcon_appif_dmemmapper {
+       struct {
+               u32 signature;
+               u16 version;
+               u16 size;
+               u32 cmd_in_buffer_offset;
+               u32 cmd_in_buffer_size;
+               u32 cmd_out_buffer_offset;
+               u32 cmd_out_buffer_size;
+               u32 nvf_img_data_buffer_offset;
+               u32 nvf_img_data_buffer_size;
+               u32 printf_buffer_hdr;
+               u32 ucode_build_time_stamp;
+               u32 ucode_signature;
+#define NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS 0x00000015
+#define NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB   0x00000019
+               u32 init_cmd;
+               u32 ucode_feature;
+               u32 ucode_cmd_mask0;
+               u32 ucode_cmd_mask1;
+               u32 multi_tgt_tbl;
+       } v3;
+};
+
+struct nvfw_fwsec_frts_cmd {
+       struct {
+           u32 ver;
+           u32 hdr;
+           u64 addr;
+           u32 size;
+           u32 flags;
+       } read_vbios;
+       struct {
+           u32 ver;
+           u32 hdr;
+           u32 addr;
+           u32 size;
+#define NVFW_FRTS_CMD_REGION_TYPE_FB 0x00000002
+           u32 type;
+       } frts_region;
+};
+
+static int
+nvkm_gsp_fwsec_patch(struct nvkm_gsp *gsp, struct nvkm_falcon_fw *fw, u32 if_offset, u32 init_cmd)
+{
+       union nvfw_falcon_appif_hdr *hdr = (void *)(fw->fw.img + fw->dmem_base_img + if_offset);
+       const u8 *dmem = fw->fw.img + fw->dmem_base_img;
+       int i;
+
+       if (WARN_ON(hdr->v1.ver != 1))
+               return -EINVAL;
+
+       for (i = 0; i < hdr->v1.cnt; i++) {
+               union nvfw_falcon_appif *app = (void *)((u8 *)hdr + hdr->v1.hdr + i * hdr->v1.len);
+               union nvfw_falcon_appif_dmemmapper *dmemmap;
+               struct nvfw_fwsec_frts_cmd *frtscmd;
+
+               if (app->v1.id != NVFW_FALCON_APPIF_ID_DMEMMAPPER)
+                       continue;
+
+               dmemmap = (void *)(dmem + app->v1.dmem_base);
+               dmemmap->v3.init_cmd = init_cmd;
+
+               frtscmd = (void *)(dmem + dmemmap->v3.cmd_in_buffer_offset);
+
+               frtscmd->read_vbios.ver = 1;
+               frtscmd->read_vbios.hdr = sizeof(frtscmd->read_vbios);
+               frtscmd->read_vbios.addr = 0;
+               frtscmd->read_vbios.size = 0;
+               frtscmd->read_vbios.flags = 2;
+
+               if (init_cmd == NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS) {
+                       frtscmd->frts_region.ver = 1;
+                       frtscmd->frts_region.hdr = sizeof(frtscmd->frts_region);
+                       frtscmd->frts_region.addr = gsp->fb.wpr2.frts.addr >> 12;
+                       frtscmd->frts_region.size = gsp->fb.wpr2.frts.size >> 12;
+                       frtscmd->frts_region.type = NVFW_FRTS_CMD_REGION_TYPE_FB;
+               }
+
+               break;
+       }
+
+       if (WARN_ON(i == hdr->v1.cnt))
+               return -EINVAL;
+
+       return 0;
+}
+
+union nvfw_falcon_ucode_desc {
+       struct nvkm_falcon_ucode_desc_v2 {
+               u32 Hdr;
+               u32 StoredSize;
+               u32 UncompressedSize;
+               u32 VirtualEntry;
+               u32 InterfaceOffset;
+               u32 IMEMPhysBase;
+               u32 IMEMLoadSize;
+               u32 IMEMVirtBase;
+               u32 IMEMSecBase;
+               u32 IMEMSecSize;
+               u32 DMEMOffset;
+               u32 DMEMPhysBase;
+               u32 DMEMLoadSize;
+               u32 altIMEMLoadSize;
+               u32 altDMEMLoadSize;
+       } v2;
+
+       struct nvkm_falcon_ucode_desc_v3 {
+               u32 Hdr;
+               u32 StoredSize;
+               u32 PKCDataOffset;
+               u32 InterfaceOffset;
+               u32 IMEMPhysBase;
+               u32 IMEMLoadSize;
+               u32 IMEMVirtBase;
+               u32 DMEMPhysBase;
+               u32 DMEMLoadSize;
+               u16 EngineIdMask;
+               u8  UcodeId;
+               u8  SignatureCount;
+               u16 SignatureVersions;
+               u16 Reserved;
+       } v3;
+};
+
+static int
+nvkm_gsp_fwsec_v2(struct nvkm_gsp *gsp, const char *name,
+                 const struct nvkm_falcon_ucode_desc_v2 *desc, u32 size, u32 init_cmd,
+                 struct nvkm_falcon_fw *fw)
+{
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       const struct firmware *bl;
+       const struct nvfw_bin_hdr *hdr;
+       const struct nvfw_bl_desc *bld;
+       int ret;
+
+       /* Build ucode. */
+       ret = nvkm_falcon_fw_ctor(gsp->func->fwsec, name, subdev->device, true,
+                                 (u8 *)desc + size, desc->IMEMLoadSize + desc->DMEMLoadSize,
+                                 &gsp->falcon, fw);
+       if (WARN_ON(ret))
+               return ret;
+
+       fw->nmem_base_img = 0;
+       fw->nmem_base = desc->IMEMPhysBase;
+       fw->nmem_size = desc->IMEMLoadSize - desc->IMEMSecSize;
+
+       fw->imem_base_img = 0;
+       fw->imem_base = desc->IMEMSecBase;
+       fw->imem_size = desc->IMEMSecSize;
+
+       fw->dmem_base_img = desc->DMEMOffset;
+       fw->dmem_base = desc->DMEMPhysBase;
+       fw->dmem_size = desc->DMEMLoadSize;
+
+       /* Bootloader. */
+       ret = nvkm_firmware_get(subdev, "acr/bl", 0, &bl);
+       if (ret)
+               return ret;
+
+       hdr = nvfw_bin_hdr(subdev, bl->data);
+       bld = nvfw_bl_desc(subdev, bl->data + hdr->header_offset);
+
+       fw->boot_addr = bld->start_tag << 8;
+       fw->boot_size = bld->code_size;
+       fw->boot = kmemdup(bl->data + hdr->data_offset + bld->code_off, fw->boot_size, GFP_KERNEL);
+       if (!fw->boot)
+               ret = -ENOMEM;
+
+       nvkm_firmware_put(bl);
+
+       /* Patch in interface data. */
+       return nvkm_gsp_fwsec_patch(gsp, fw, desc->InterfaceOffset, init_cmd);
+}
+
+static int
+nvkm_gsp_fwsec_v3(struct nvkm_gsp *gsp, const char *name,
+                 const struct nvkm_falcon_ucode_desc_v3 *desc, u32 size, u32 init_cmd,
+                 struct nvkm_falcon_fw *fw)
+{
+       struct nvkm_device *device = gsp->subdev.device;
+       struct nvkm_bios *bios = device->bios;
+       int ret;
+
+       /* Build ucode. */
+       ret = nvkm_falcon_fw_ctor(gsp->func->fwsec, name, device, true,
+                                 (u8 *)desc + size, desc->IMEMLoadSize + desc->DMEMLoadSize,
+                                 &gsp->falcon, fw);
+       if (WARN_ON(ret))
+               return ret;
+
+       fw->imem_base_img = 0;
+       fw->imem_base = desc->IMEMPhysBase;
+       fw->imem_size = desc->IMEMLoadSize;
+       fw->dmem_base_img = desc->IMEMLoadSize;
+       fw->dmem_base = desc->DMEMPhysBase;
+       fw->dmem_size = ALIGN(desc->DMEMLoadSize, 256);
+       fw->dmem_sign = desc->PKCDataOffset;
+       fw->boot_addr = 0;
+       fw->fuse_ver = desc->SignatureVersions;
+       fw->ucode_id = desc->UcodeId;
+       fw->engine_id = desc->EngineIdMask;
+
+       /* Patch in signature. */
+       ret = nvkm_falcon_fw_sign(fw, fw->dmem_base_img + desc->PKCDataOffset, 96 * 4,
+                                 nvbios_pointer(bios, 0), desc->SignatureCount,
+                                 (u8 *)desc + 0x2c - (u8 *)nvbios_pointer(bios, 0), 0, 0);
+       if (WARN_ON(ret))
+               return ret;
+
+       /* Patch in interface data. */
+       return nvkm_gsp_fwsec_patch(gsp, fw, desc->InterfaceOffset, init_cmd);
+}
+
+static int
+nvkm_gsp_fwsec(struct nvkm_gsp *gsp, const char *name, u32 init_cmd)
+{
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       struct nvkm_device *device = subdev->device;
+       struct nvkm_bios *bios = device->bios;
+       const union nvfw_falcon_ucode_desc *desc;
+       struct nvbios_pmuE flcn_ucode;
+       u8 idx, ver, hdr;
+       u32 data;
+       u16 size, vers;
+       struct nvkm_falcon_fw fw = {};
+       u32 mbox0 = 0;
+       int ret;
+
+       /* Lookup in VBIOS. */
+       for (idx = 0; (data = nvbios_pmuEp(bios, idx, &ver, &hdr, &flcn_ucode)); idx++) {
+               if (flcn_ucode.type == 0x85)
+                       break;
+       }
+
+       if (WARN_ON(!data))
+               return -EINVAL;
+
+       /* Deteremine version. */
+       desc = nvbios_pointer(bios, flcn_ucode.data);
+       if (WARN_ON(!(desc->v2.Hdr & 0x00000001)))
+               return -EINVAL;
+
+       size = (desc->v2.Hdr & 0xffff0000) >> 16;
+       vers = (desc->v2.Hdr & 0x0000ff00) >> 8;
+
+       switch (vers) {
+       case 2: ret = nvkm_gsp_fwsec_v2(gsp, name, &desc->v2, size, init_cmd, &fw); break;
+       case 3: ret = nvkm_gsp_fwsec_v3(gsp, name, &desc->v3, size, init_cmd, &fw); break;
+       default:
+               nvkm_error(subdev, "%s(v%d): version unknown\n", name, vers);
+               return -EINVAL;
+       }
+
+       if (ret) {
+               nvkm_error(subdev, "%s(v%d): %d\n", name, vers, ret);
+               return ret;
+       }
+
+       /* Boot. */
+       ret = nvkm_falcon_fw_boot(&fw, subdev, true, &mbox0, NULL, 0, 0);
+       nvkm_falcon_fw_dtor(&fw);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+int
+nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp)
+{
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       struct nvkm_device *device = subdev->device;
+       int ret;
+       u32 err;
+
+       ret = nvkm_gsp_fwsec(gsp, "fwsec-sb", NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB);
+       if (ret)
+               return ret;
+
+       /* Verify. */
+       err = nvkm_rd32(device, 0x001400 + (0xf * 4)) & 0x0000ffff;
+       if (err) {
+               nvkm_error(subdev, "fwsec-sb: 0x%04x\n", err);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+int
+nvkm_gsp_fwsec_frts(struct nvkm_gsp *gsp)
+{
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       struct nvkm_device *device = subdev->device;
+       int ret;
+       u32 err, wpr2_lo, wpr2_hi;
+
+       ret = nvkm_gsp_fwsec(gsp, "fwsec-frts", NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS);
+       if (ret)
+               return ret;
+
+       /* Verify. */
+       err = nvkm_rd32(device, 0x001400 + (0xe * 4)) >> 16;
+       if (err) {
+               nvkm_error(subdev, "fwsec-frts: 0x%04x\n", err);
+               return -EIO;
+       }
+
+       wpr2_lo = nvkm_rd32(device, 0x1fa824);
+       wpr2_hi = nvkm_rd32(device, 0x1fa828);
+       nvkm_debug(subdev, "fwsec-frts: WPR2 @ %08x - %08x\n", wpr2_lo, wpr2_hi);
+       return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/ga100.c
new file mode 100644 (file)
index 0000000..223f68b
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2022 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static const struct nvkm_falcon_func
+ga100_gsp_flcn = {
+       .disable = gm200_flcn_disable,
+       .enable = gm200_flcn_enable,
+       .addr2 = 0x1000,
+       .riscv_irqmask = 0x2b4,
+       .reset_eng = gp102_flcn_reset_eng,
+       .reset_wait_mem_scrubbing = gm200_flcn_reset_wait_mem_scrubbing,
+       .bind_inst = gm200_flcn_bind_inst,
+       .bind_stat = gm200_flcn_bind_stat,
+       .bind_intr = true,
+       .imem_pio = &gm200_flcn_imem_pio,
+       .dmem_pio = &gm200_flcn_dmem_pio,
+       .riscv_active = tu102_flcn_riscv_active,
+       .intr_retrigger = ga100_flcn_intr_retrigger,
+};
+
+static const struct nvkm_gsp_func
+ga100_gsp_r535_113_01 = {
+       .flcn = &ga100_gsp_flcn,
+       .fwsec = &tu102_gsp_fwsec,
+
+       .sig_section = ".fwsignature_ga100",
+
+       .wpr_heap.base_size = 8 << 20,
+       .wpr_heap.min_size = 64 << 20,
+
+       .booter.ctor = tu102_gsp_booter_ctor,
+
+       .dtor = r535_gsp_dtor,
+       .oneinit = tu102_gsp_oneinit,
+       .init = r535_gsp_init,
+       .fini = r535_gsp_fini,
+       .reset = tu102_gsp_reset,
+
+       .rm = &r535_gsp_rm,
+};
+
+static struct nvkm_gsp_fwif
+ga100_gsps[] = {
+       {  0,  r535_gsp_load, &ga100_gsp_r535_113_01, "535.113.01" },
+       { -1, gv100_gsp_nofw, &gv100_gsp },
+       {}
+};
+
+int
+ga100_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+             struct nvkm_gsp **pgsp)
+{
+       return nvkm_gsp_new_(ga100_gsps, device, type, inst, pgsp);
+}
index a3996ceca99552de6415056d4b821285918279be..4c4b4168a266bada5327b4e2d2ecfc0a79756175 100644 (file)
  */
 #include "priv.h"
 
-static const struct nvkm_falcon_func
+#include <nvfw/flcn.h>
+#include <nvfw/fw.h>
+#include <nvfw/hs.h>
+
+int
+ga102_gsp_reset(struct nvkm_gsp *gsp)
+{
+       int ret;
+
+       ret = gsp->falcon.func->reset_eng(&gsp->falcon);
+       if (ret)
+               return ret;
+
+       nvkm_falcon_mask(&gsp->falcon, 0x1668, 0x00000111, 0x00000111);
+       return 0;
+}
+
+int
+ga102_gsp_booter_ctor(struct nvkm_gsp *gsp, const char *name, const struct firmware *blob,
+                     struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw)
+{
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       const struct nvkm_falcon_fw_func *func = &ga102_flcn_fw;
+       const struct nvfw_bin_hdr *hdr;
+       const struct nvfw_hs_header_v2 *hshdr;
+       const struct nvfw_hs_load_header_v2 *lhdr;
+       u32 loc, sig, cnt, *meta;
+       int ret;
+
+       hdr = nvfw_bin_hdr(subdev, blob->data);
+       hshdr = nvfw_hs_header_v2(subdev, blob->data + hdr->header_offset);
+       meta = (u32 *)(blob->data + hshdr->meta_data_offset);
+       loc = *(u32 *)(blob->data + hshdr->patch_loc);
+       sig = *(u32 *)(blob->data + hshdr->patch_sig);
+       cnt = *(u32 *)(blob->data + hshdr->num_sig);
+
+       ret = nvkm_falcon_fw_ctor(func, name, subdev->device, true,
+                                 blob->data + hdr->data_offset, hdr->data_size, falcon, fw);
+       if (ret)
+               goto done;
+
+       ret = nvkm_falcon_fw_sign(fw, loc, hshdr->sig_prod_size / cnt, blob->data,
+                                 cnt, hshdr->sig_prod_offset + sig, 0, 0);
+       if (ret)
+               goto done;
+
+       lhdr = nvfw_hs_load_header_v2(subdev, blob->data + hshdr->header_offset);
+
+       fw->imem_base_img = lhdr->app[0].offset;
+       fw->imem_base = 0;
+       fw->imem_size = lhdr->app[0].size;
+
+       fw->dmem_base_img = lhdr->os_data_offset;
+       fw->dmem_base = 0;
+       fw->dmem_size = lhdr->os_data_size;
+       fw->dmem_sign = loc - lhdr->os_data_offset;
+
+       fw->boot_addr = lhdr->app[0].offset;
+
+       fw->fuse_ver = meta[0];
+       fw->engine_id = meta[1];
+       fw->ucode_id = meta[2];
+
+done:
+       if (ret)
+               nvkm_falcon_fw_dtor(fw);
+
+       return ret;
+}
+
+static int
+ga102_gsp_fwsec_signature(struct nvkm_falcon_fw *fw, u32 *src_base_src)
+{
+       struct nvkm_falcon *falcon = fw->falcon;
+       struct nvkm_device *device = falcon->owner->device;
+       u32 sig_fuse_version = fw->fuse_ver;
+       u32 reg_fuse_version;
+       int idx = 0;
+
+       FLCN_DBG(falcon, "brom: %08x %08x", fw->engine_id, fw->ucode_id);
+       FLCN_DBG(falcon, "sig_fuse_version: %08x", sig_fuse_version);
+
+       if (fw->engine_id & 0x00000400) {
+               reg_fuse_version = nvkm_rd32(device, 0x8241c0 + (fw->ucode_id - 1) * 4);
+       } else {
+               WARN_ON(1);
+               return -ENOSYS;
+       }
+
+       FLCN_DBG(falcon, "reg_fuse_version: %08x", reg_fuse_version);
+       reg_fuse_version = BIT(fls(reg_fuse_version));
+       FLCN_DBG(falcon, "reg_fuse_version: %08x", reg_fuse_version);
+       if (!(reg_fuse_version & fw->fuse_ver))
+               return -EINVAL;
+
+       while (!(reg_fuse_version & sig_fuse_version & 1)) {
+               idx += (sig_fuse_version & 1);
+               reg_fuse_version >>= 1;
+               sig_fuse_version >>= 1;
+       }
+
+       return idx;
+}
+
+const struct nvkm_falcon_fw_func
+ga102_gsp_fwsec = {
+       .signature = ga102_gsp_fwsec_signature,
+       .reset = gm200_flcn_fw_reset,
+       .load = ga102_flcn_fw_load,
+       .boot = ga102_flcn_fw_boot,
+};
+
+const struct nvkm_falcon_func
 ga102_gsp_flcn = {
        .disable = gm200_flcn_disable,
        .enable = gm200_flcn_enable,
        .select = ga102_flcn_select,
        .addr2 = 0x1000,
+       .riscv_irqmask = 0x528,
        .reset_eng = gp102_flcn_reset_eng,
        .reset_prep = ga102_flcn_reset_prep,
        .reset_wait_mem_scrubbing = ga102_flcn_reset_wait_mem_scrubbing,
        .imem_dma = &ga102_flcn_dma,
        .dmem_dma = &ga102_flcn_dma,
+       .riscv_active = ga102_flcn_riscv_active,
+       .intr_retrigger = ga100_flcn_intr_retrigger,
 };
 
 static const struct nvkm_gsp_func
-ga102_gsp = {
+ga102_gsp_r535_113_01 = {
        .flcn = &ga102_gsp_flcn,
+       .fwsec = &ga102_gsp_fwsec,
+
+       .sig_section = ".fwsignature_ga10x",
+
+       .wpr_heap.os_carveout_size = 20 << 20,
+       .wpr_heap.base_size = 8 << 20,
+       .wpr_heap.min_size = 84 << 20,
+
+       .booter.ctor = ga102_gsp_booter_ctor,
+
+       .dtor = r535_gsp_dtor,
+       .oneinit = tu102_gsp_oneinit,
+       .init = r535_gsp_init,
+       .fini = r535_gsp_fini,
+       .reset = ga102_gsp_reset,
+
+       .rm = &r535_gsp_rm,
 };
 
-static int
-ga102_gsp_nofw(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
-{
-       return 0;
-}
+static const struct nvkm_gsp_func
+ga102_gsp = {
+       .flcn = &ga102_gsp_flcn,
+};
 
 static struct nvkm_gsp_fwif
 ga102_gsps[] = {
-       { -1, ga102_gsp_nofw, &ga102_gsp },
+       {  0,  r535_gsp_load, &ga102_gsp_r535_113_01, "535.113.01" },
+       { -1, gv100_gsp_nofw, &ga102_gsp },
        {}
 };
 
index da6a809cd31731dca04ca2ea623c1be5415f6bbd..62d9289bcaa5f971cd5aef3a10ddb50808eaa0c7 100644 (file)
@@ -34,12 +34,12 @@ gv100_gsp_flcn = {
        .dmem_pio = &gm200_flcn_dmem_pio,
 };
 
-static const struct nvkm_gsp_func
+const struct nvkm_gsp_func
 gv100_gsp = {
        .flcn = &gv100_gsp_flcn,
 };
 
-static int
+int
 gv100_gsp_nofw(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
 {
        return 0;
index 89749a40203c46b95f78eda42fa1d29745bed8fa..9f4a62375a278a079aaba528fd99db05589b8e02 100644 (file)
@@ -4,16 +4,67 @@
 #include <subdev/gsp.h>
 enum nvkm_acr_lsf_id;
 
-struct nvkm_gsp_func {
-       const struct nvkm_falcon_func *flcn;
-};
+int nvkm_gsp_fwsec_frts(struct nvkm_gsp *);
+int nvkm_gsp_fwsec_sb(struct nvkm_gsp *);
 
 struct nvkm_gsp_fwif {
        int version;
        int (*load)(struct nvkm_gsp *, int ver, const struct nvkm_gsp_fwif *);
        const struct nvkm_gsp_func *func;
+       const char *ver;
+       bool enable;
 };
 
+int gv100_gsp_nofw(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
+int  r535_gsp_load(struct nvkm_gsp *, int, const struct nvkm_gsp_fwif *);
+
+struct nvkm_gsp_func {
+       const struct nvkm_falcon_func *flcn;
+       const struct nvkm_falcon_fw_func *fwsec;
+
+       char *sig_section;
+
+       struct {
+               u32 os_carveout_size;
+               u32 base_size;
+               u64 min_size;
+       } wpr_heap;
+
+       struct {
+               int (*ctor)(struct nvkm_gsp *, const char *name, const struct firmware *,
+                           struct nvkm_falcon *, struct nvkm_falcon_fw *);
+       } booter;
+
+       void (*dtor)(struct nvkm_gsp *);
+       int (*oneinit)(struct nvkm_gsp *);
+       int (*init)(struct nvkm_gsp *);
+       int (*fini)(struct nvkm_gsp *, bool suspend);
+       int (*reset)(struct nvkm_gsp *);
+
+       const struct nvkm_gsp_rm *rm;
+};
+
+extern const struct nvkm_falcon_func tu102_gsp_flcn;
+extern const struct nvkm_falcon_fw_func tu102_gsp_fwsec;
+int tu102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *,
+                         struct nvkm_falcon *, struct nvkm_falcon_fw *);
+int tu102_gsp_oneinit(struct nvkm_gsp *);
+int tu102_gsp_reset(struct nvkm_gsp *);
+
+extern const struct nvkm_falcon_func ga102_gsp_flcn;
+extern const struct nvkm_falcon_fw_func ga102_gsp_fwsec;
+int ga102_gsp_booter_ctor(struct nvkm_gsp *, const char *, const struct firmware *,
+                         struct nvkm_falcon *, struct nvkm_falcon_fw *);
+int ga102_gsp_reset(struct nvkm_gsp *);
+
+void r535_gsp_dtor(struct nvkm_gsp *);
+int r535_gsp_oneinit(struct nvkm_gsp *);
+int r535_gsp_init(struct nvkm_gsp *);
+int r535_gsp_fini(struct nvkm_gsp *, bool suspend);
+extern const struct nvkm_gsp_rm r535_gsp_rm;
+
 int nvkm_gsp_new_(const struct nvkm_gsp_fwif *, struct nvkm_device *, enum nvkm_subdev_type, int,
                  struct nvkm_gsp **);
+
+extern const struct nvkm_gsp_func gv100_gsp;
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
new file mode 100644 (file)
index 0000000..e31f964
--- /dev/null
@@ -0,0 +1,2236 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/pci.h>
+#include <subdev/timer.h>
+#include <subdev/vfn.h>
+#include <engine/fifo/chan.h>
+#include <engine/sec2.h>
+
+#include <nvfw/fw.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0000.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0005.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl0080.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl2080.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080event.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080gpu.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+#include <nvrm/535.113.01/common/shared/msgq/inc/msgq/msgq_priv.h>
+#include <nvrm/535.113.01/common/uproc/os/common/include/libos_init_args.h>
+#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_sr_meta.h>
+#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/gsp/gsp_fw_wpr_meta.h>
+#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmRiscvUcode.h>
+#include <nvrm/535.113.01/nvidia/arch/nvalloc/common/inc/rmgspseq.h>
+#include <nvrm/535.113.01/nvidia/generated/g_allclasses.h>
+#include <nvrm/535.113.01/nvidia/generated/g_os_nvoc.h>
+#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_fw_heap.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_init_args.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/gsp/gsp_static_config.h>
+#include <nvrm/535.113.01/nvidia/inc/kernel/gpu/intr/engine_idx.h>
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
+
+#include <linux/acpi.h>
+
+#define GSP_MSG_MIN_SIZE GSP_PAGE_SIZE
+#define GSP_MSG_MAX_SIZE GSP_PAGE_MIN_SIZE * 16
+
+struct r535_gsp_msg {
+       u8 auth_tag_buffer[16];
+       u8 aad_buffer[16];
+       u32 checksum;
+       u32 sequence;
+       u32 elem_count;
+       u32 pad;
+       u8  data[];
+};
+
+#define GSP_MSG_HDR_SIZE offsetof(struct r535_gsp_msg, data)
+
+static void *
+r535_gsp_msgq_wait(struct nvkm_gsp *gsp, u32 repc, u32 *prepc, int *ptime)
+{
+       struct r535_gsp_msg *mqe;
+       u32 size, rptr = *gsp->msgq.rptr;
+       int used;
+       u8 *msg;
+       u32 len;
+
+       size = DIV_ROUND_UP(GSP_MSG_HDR_SIZE + repc, GSP_PAGE_SIZE);
+       if (WARN_ON(!size || size >= gsp->msgq.cnt))
+               return ERR_PTR(-EINVAL);
+
+       do {
+               u32 wptr = *gsp->msgq.wptr;
+
+               used = wptr + gsp->msgq.cnt - rptr;
+               if (used >= gsp->msgq.cnt)
+                       used -= gsp->msgq.cnt;
+               if (used >= size)
+                       break;
+
+               usleep_range(1, 2);
+       } while (--(*ptime));
+
+       if (WARN_ON(!*ptime))
+               return ERR_PTR(-ETIMEDOUT);
+
+       mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + rptr * 0x1000);
+
+       if (prepc) {
+               *prepc = (used * GSP_PAGE_SIZE) - sizeof(*mqe);
+               return mqe->data;
+       }
+
+       msg = kvmalloc(repc, GFP_KERNEL);
+       if (!msg)
+               return ERR_PTR(-ENOMEM);
+
+       len = ((gsp->msgq.cnt - rptr) * GSP_PAGE_SIZE) - sizeof(*mqe);
+       len = min_t(u32, repc, len);
+       memcpy(msg, mqe->data, len);
+
+       rptr += DIV_ROUND_UP(len, GSP_PAGE_SIZE);
+       if (rptr == gsp->msgq.cnt)
+               rptr = 0;
+
+       repc -= len;
+
+       if (repc) {
+               mqe = (void *)((u8 *)gsp->shm.msgq.ptr + 0x1000 + 0 * 0x1000);
+               memcpy(msg + len, mqe, repc);
+
+               rptr += DIV_ROUND_UP(repc, GSP_PAGE_SIZE);
+       }
+
+       mb();
+       (*gsp->msgq.rptr) = rptr;
+       return msg;
+}
+
+static void *
+r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 repc, int *ptime)
+{
+       return r535_gsp_msgq_wait(gsp, repc, NULL, ptime);
+}
+
+static int
+r535_gsp_cmdq_push(struct nvkm_gsp *gsp, void *argv)
+{
+       struct r535_gsp_msg *cmd = container_of(argv, typeof(*cmd), data);
+       struct r535_gsp_msg *cqe;
+       u32 argc = cmd->checksum;
+       u64 *ptr = (void *)cmd;
+       u64 *end;
+       u64 csum = 0;
+       int free, time = 1000000;
+       u32 wptr, size;
+       u32 off = 0;
+
+       argc = ALIGN(GSP_MSG_HDR_SIZE + argc, GSP_PAGE_SIZE);
+
+       end = (u64 *)((char *)ptr + argc);
+       cmd->pad = 0;
+       cmd->checksum = 0;
+       cmd->sequence = gsp->cmdq.seq++;
+       cmd->elem_count = DIV_ROUND_UP(argc, 0x1000);
+
+       while (ptr < end)
+               csum ^= *ptr++;
+
+       cmd->checksum = upper_32_bits(csum) ^ lower_32_bits(csum);
+
+       wptr = *gsp->cmdq.wptr;
+       do {
+               do {
+                       free = *gsp->cmdq.rptr + gsp->cmdq.cnt - wptr - 1;
+                       if (free >= gsp->cmdq.cnt)
+                               free -= gsp->cmdq.cnt;
+                       if (free >= 1)
+                               break;
+
+                       usleep_range(1, 2);
+               } while(--time);
+
+               if (WARN_ON(!time)) {
+                       kvfree(cmd);
+                       return -ETIMEDOUT;
+               }
+
+               cqe = (void *)((u8 *)gsp->shm.cmdq.ptr + 0x1000 + wptr * 0x1000);
+               size = min_t(u32, argc, (gsp->cmdq.cnt - wptr) * GSP_PAGE_SIZE);
+               memcpy(cqe, (u8 *)cmd + off, size);
+
+               wptr += DIV_ROUND_UP(size, 0x1000);
+               if (wptr == gsp->cmdq.cnt)
+                       wptr = 0;
+
+               off  += size;
+               argc -= size;
+       } while(argc);
+
+       nvkm_trace(&gsp->subdev, "cmdq: wptr %d\n", wptr);
+       wmb();
+       (*gsp->cmdq.wptr) = wptr;
+       mb();
+
+       nvkm_falcon_wr32(&gsp->falcon, 0xc00, 0x00000000);
+
+       kvfree(cmd);
+       return 0;
+}
+
+static void *
+r535_gsp_cmdq_get(struct nvkm_gsp *gsp, u32 argc)
+{
+       struct r535_gsp_msg *cmd;
+       u32 size = GSP_MSG_HDR_SIZE + argc;
+
+       size = ALIGN(size, GSP_MSG_MIN_SIZE);
+       cmd = kvzalloc(size, GFP_KERNEL);
+       if (!cmd)
+               return ERR_PTR(-ENOMEM);
+
+       cmd->checksum = argc;
+       return cmd->data;
+}
+
+struct nvfw_gsp_rpc {
+       u32 header_version;
+       u32 signature;
+       u32 length;
+       u32 function;
+       u32 rpc_result;
+       u32 rpc_result_private;
+       u32 sequence;
+       union {
+               u32 spare;
+               u32 cpuRmGfid;
+       };
+       u8  data[];
+};
+
+static void
+r535_gsp_msg_done(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg)
+{
+       kvfree(msg);
+}
+
+static void
+r535_gsp_msg_dump(struct nvkm_gsp *gsp, struct nvfw_gsp_rpc *msg, int lvl)
+{
+       if (gsp->subdev.debug >= lvl) {
+               nvkm_printk__(&gsp->subdev, lvl, info,
+                             "msg fn:%d len:0x%x/0x%zx res:0x%x resp:0x%x\n",
+                             msg->function, msg->length, msg->length - sizeof(*msg),
+                             msg->rpc_result, msg->rpc_result_private);
+               print_hex_dump(KERN_INFO, "msg: ", DUMP_PREFIX_OFFSET, 16, 1,
+                              msg->data, msg->length - sizeof(*msg), true);
+       }
+}
+
+static struct nvfw_gsp_rpc *
+r535_gsp_msg_recv(struct nvkm_gsp *gsp, int fn, u32 repc)
+{
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       struct nvfw_gsp_rpc *msg;
+       int time = 4000000, i;
+       u32 size;
+
+retry:
+       msg = r535_gsp_msgq_wait(gsp, sizeof(*msg), &size, &time);
+       if (IS_ERR_OR_NULL(msg))
+               return msg;
+
+       msg = r535_gsp_msgq_recv(gsp, msg->length, &time);
+       if (IS_ERR_OR_NULL(msg))
+               return msg;
+
+       if (msg->rpc_result) {
+               r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR);
+               r535_gsp_msg_done(gsp, msg);
+               return ERR_PTR(-EINVAL);
+       }
+
+       r535_gsp_msg_dump(gsp, msg, NV_DBG_TRACE);
+
+       if (fn && msg->function == fn) {
+               if (repc) {
+                       if (msg->length < sizeof(*msg) + repc) {
+                               nvkm_error(subdev, "msg len %d < %zd\n",
+                                          msg->length, sizeof(*msg) + repc);
+                               r535_gsp_msg_dump(gsp, msg, NV_DBG_ERROR);
+                               r535_gsp_msg_done(gsp, msg);
+                               return ERR_PTR(-EIO);
+                       }
+
+                       return msg;
+               }
+
+               r535_gsp_msg_done(gsp, msg);
+               return NULL;
+       }
+
+       for (i = 0; i < gsp->msgq.ntfy_nr; i++) {
+               struct nvkm_gsp_msgq_ntfy *ntfy = &gsp->msgq.ntfy[i];
+
+               if (ntfy->fn == msg->function) {
+                       ntfy->func(ntfy->priv, ntfy->fn, msg->data, msg->length - sizeof(*msg));
+                       break;
+               }
+       }
+
+       if (i == gsp->msgq.ntfy_nr)
+               r535_gsp_msg_dump(gsp, msg, NV_DBG_WARN);
+
+       r535_gsp_msg_done(gsp, msg);
+       if (fn)
+               goto retry;
+
+       if (*gsp->msgq.rptr != *gsp->msgq.wptr)
+               goto retry;
+
+       return NULL;
+}
+
+static int
+r535_gsp_msg_ntfy_add(struct nvkm_gsp *gsp, u32 fn, nvkm_gsp_msg_ntfy_func func, void *priv)
+{
+       int ret = 0;
+
+       mutex_lock(&gsp->msgq.mutex);
+       if (WARN_ON(gsp->msgq.ntfy_nr >= ARRAY_SIZE(gsp->msgq.ntfy))) {
+               ret = -ENOSPC;
+       } else {
+               gsp->msgq.ntfy[gsp->msgq.ntfy_nr].fn = fn;
+               gsp->msgq.ntfy[gsp->msgq.ntfy_nr].func = func;
+               gsp->msgq.ntfy[gsp->msgq.ntfy_nr].priv = priv;
+               gsp->msgq.ntfy_nr++;
+       }
+       mutex_unlock(&gsp->msgq.mutex);
+       return ret;
+}
+
+static int
+r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn)
+{
+       void *repv;
+
+       mutex_lock(&gsp->cmdq.mutex);
+       repv = r535_gsp_msg_recv(gsp, fn, 0);
+       mutex_unlock(&gsp->cmdq.mutex);
+       if (IS_ERR(repv))
+               return PTR_ERR(repv);
+
+       return 0;
+}
+
+static void *
+r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
+{
+       struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data);
+       struct nvfw_gsp_rpc *msg;
+       u32 fn = rpc->function;
+       void *repv = NULL;
+       int ret;
+
+       if (gsp->subdev.debug >= NV_DBG_TRACE) {
+               nvkm_trace(&gsp->subdev, "rpc fn:%d len:0x%x/0x%zx\n", rpc->function,
+                          rpc->length, rpc->length - sizeof(*rpc));
+               print_hex_dump(KERN_INFO, "rpc: ", DUMP_PREFIX_OFFSET, 16, 1,
+                              rpc->data, rpc->length - sizeof(*rpc), true);
+       }
+
+       ret = r535_gsp_cmdq_push(gsp, rpc);
+       if (ret) {
+               mutex_unlock(&gsp->cmdq.mutex);
+               return ERR_PTR(ret);
+       }
+
+       if (wait) {
+               msg = r535_gsp_msg_recv(gsp, fn, repc);
+               if (!IS_ERR_OR_NULL(msg))
+                       repv = msg->data;
+               else
+                       repv = msg;
+       }
+
+       return repv;
+}
+
+static void
+r535_gsp_event_dtor(struct nvkm_gsp_event *event)
+{
+       struct nvkm_gsp_device *device = event->device;
+       struct nvkm_gsp_client *client = device->object.client;
+       struct nvkm_gsp *gsp = client->gsp;
+
+       mutex_lock(&gsp->client_id.mutex);
+       if (event->func) {
+               list_del(&event->head);
+               event->func = NULL;
+       }
+       mutex_unlock(&gsp->client_id.mutex);
+
+       nvkm_gsp_rm_free(&event->object);
+       event->device = NULL;
+}
+
+static int
+r535_gsp_device_event_get(struct nvkm_gsp_event *event)
+{
+       struct nvkm_gsp_device *device = event->device;
+       NV2080_CTRL_EVENT_SET_NOTIFICATION_PARAMS *ctrl;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&device->subdevice,
+                                   NV2080_CTRL_CMD_EVENT_SET_NOTIFICATION, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->event = event->id;
+       ctrl->action = NV2080_CTRL_EVENT_SET_NOTIFICATION_ACTION_REPEAT;
+       return nvkm_gsp_rm_ctrl_wr(&device->subdevice, ctrl);
+}
+
+static int
+r535_gsp_device_event_ctor(struct nvkm_gsp_device *device, u32 handle, u32 id,
+                          nvkm_gsp_event_func func, struct nvkm_gsp_event *event)
+{
+       struct nvkm_gsp_client *client = device->object.client;
+       struct nvkm_gsp *gsp = client->gsp;
+       NV0005_ALLOC_PARAMETERS *args;
+       int ret;
+
+       args = nvkm_gsp_rm_alloc_get(&device->subdevice, handle,
+                                    NV01_EVENT_KERNEL_CALLBACK_EX, sizeof(*args),
+                                    &event->object);
+       if (IS_ERR(args))
+               return PTR_ERR(args);
+
+       args->hParentClient = client->object.handle;
+       args->hSrcResource = 0;
+       args->hClass = NV01_EVENT_KERNEL_CALLBACK_EX;
+       args->notifyIndex = NV01_EVENT_CLIENT_RM | id;
+       args->data = NULL;
+
+       ret = nvkm_gsp_rm_alloc_wr(&event->object, args);
+       if (ret)
+               return ret;
+
+       event->device = device;
+       event->id = id;
+
+       ret = r535_gsp_device_event_get(event);
+       if (ret) {
+               nvkm_gsp_event_dtor(event);
+               return ret;
+       }
+
+       mutex_lock(&gsp->client_id.mutex);
+       event->func = func;
+       list_add(&event->head, &client->events);
+       mutex_unlock(&gsp->client_id.mutex);
+       return 0;
+}
+
+static void
+r535_gsp_device_dtor(struct nvkm_gsp_device *device)
+{
+       nvkm_gsp_rm_free(&device->subdevice);
+       nvkm_gsp_rm_free(&device->object);
+}
+
+static int
+r535_gsp_subdevice_ctor(struct nvkm_gsp_device *device)
+{
+       NV2080_ALLOC_PARAMETERS *args;
+
+       return nvkm_gsp_rm_alloc(&device->object, 0x5d1d0000, NV20_SUBDEVICE_0, sizeof(*args),
+                                &device->subdevice);
+}
+
+static int
+r535_gsp_device_ctor(struct nvkm_gsp_client *client, struct nvkm_gsp_device *device)
+{
+       NV0080_ALLOC_PARAMETERS *args;
+       int ret;
+
+       args = nvkm_gsp_rm_alloc_get(&client->object, 0xde1d0000, NV01_DEVICE_0, sizeof(*args),
+                                    &device->object);
+       if (IS_ERR(args))
+               return PTR_ERR(args);
+
+       args->hClientShare = client->object.handle;
+
+       ret = nvkm_gsp_rm_alloc_wr(&device->object, args);
+       if (ret)
+               return ret;
+
+       ret = r535_gsp_subdevice_ctor(device);
+       if (ret)
+               nvkm_gsp_rm_free(&device->object);
+
+       return ret;
+}
+
+static void
+r535_gsp_client_dtor(struct nvkm_gsp_client *client)
+{
+       struct nvkm_gsp *gsp = client->gsp;
+
+       nvkm_gsp_rm_free(&client->object);
+
+       mutex_lock(&gsp->client_id.mutex);
+       idr_remove(&gsp->client_id.idr, client->object.handle & 0xffff);
+       mutex_unlock(&gsp->client_id.mutex);
+
+       client->gsp = NULL;
+}
+
+static int
+r535_gsp_client_ctor(struct nvkm_gsp *gsp, struct nvkm_gsp_client *client)
+{
+       NV0000_ALLOC_PARAMETERS *args;
+       int ret;
+
+       mutex_lock(&gsp->client_id.mutex);
+       ret = idr_alloc(&gsp->client_id.idr, client, 0, 0xffff + 1, GFP_KERNEL);
+       mutex_unlock(&gsp->client_id.mutex);
+       if (ret < 0)
+               return ret;
+
+       client->gsp = gsp;
+       client->object.client = client;
+       INIT_LIST_HEAD(&client->events);
+
+       args = nvkm_gsp_rm_alloc_get(&client->object, 0xc1d00000 | ret, NV01_ROOT, sizeof(*args),
+                                    &client->object);
+       if (IS_ERR(args)) {
+               r535_gsp_client_dtor(client);
+               return ret;
+       }
+
+       args->hClient = client->object.handle;
+       args->processID = ~0;
+
+       ret = nvkm_gsp_rm_alloc_wr(&client->object, args);
+       if (ret) {
+               r535_gsp_client_dtor(client);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int
+r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object)
+{
+       struct nvkm_gsp_client *client = object->client;
+       struct nvkm_gsp *gsp = client->gsp;
+       rpc_free_v03_00 *rpc;
+
+       nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x free\n",
+                  client->object.handle, object->handle);
+
+       rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_FREE, sizeof(*rpc));
+       if (WARN_ON(IS_ERR_OR_NULL(rpc)))
+               return -EIO;
+
+       rpc->params.hRoot = client->object.handle;
+       rpc->params.hObjectParent = 0;
+       rpc->params.hObjectOld = object->handle;
+       return nvkm_gsp_rpc_wr(gsp, rpc, true);
+}
+
+static void
+r535_gsp_rpc_rm_alloc_done(struct nvkm_gsp_object *object, void *repv)
+{
+       rpc_gsp_rm_alloc_v03_00 *rpc = container_of(repv, typeof(*rpc), params);
+
+       nvkm_gsp_rpc_done(object->client->gsp, rpc);
+}
+
+static void *
+r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
+{
+       rpc_gsp_rm_alloc_v03_00 *rpc = container_of(argv, typeof(*rpc), params);
+       struct nvkm_gsp *gsp = object->client->gsp;
+       void *ret;
+
+       rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc) + repc);
+       if (IS_ERR_OR_NULL(rpc))
+               return rpc;
+
+       if (rpc->status) {
+               nvkm_error(&gsp->subdev, "RM_ALLOC: 0x%x\n", rpc->status);
+               ret = ERR_PTR(-EINVAL);
+       } else {
+               ret = repc ? rpc->params : NULL;
+       }
+
+       if (IS_ERR_OR_NULL(ret))
+               nvkm_gsp_rpc_done(gsp, rpc);
+
+       return ret;
+}
+
+static void *
+r535_gsp_rpc_rm_alloc_get(struct nvkm_gsp_object *object, u32 oclass, u32 argc)
+{
+       struct nvkm_gsp_client *client = object->client;
+       struct nvkm_gsp *gsp = client->gsp;
+       rpc_gsp_rm_alloc_v03_00 *rpc;
+
+       nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x new obj:0x%08x cls:0x%08x argc:%d\n",
+                  client->object.handle, object->parent->handle, object->handle, oclass, argc);
+
+       rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_ALLOC, sizeof(*rpc) + argc);
+       if (IS_ERR(rpc))
+               return rpc;
+
+       rpc->hClient = client->object.handle;
+       rpc->hParent = object->parent->handle;
+       rpc->hObject = object->handle;
+       rpc->hClass = oclass;
+       rpc->status = 0;
+       rpc->paramsSize = argc;
+       return rpc->params;
+}
+
+static void
+r535_gsp_rpc_rm_ctrl_done(struct nvkm_gsp_object *object, void *repv)
+{
+       rpc_gsp_rm_control_v03_00 *rpc = container_of(repv, typeof(*rpc), params);
+
+       nvkm_gsp_rpc_done(object->client->gsp, rpc);
+}
+
+static void *
+r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void *argv, u32 repc)
+{
+       rpc_gsp_rm_control_v03_00 *rpc = container_of(argv, typeof(*rpc), params);
+       struct nvkm_gsp *gsp = object->client->gsp;
+       void *ret;
+
+       rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc);
+       if (IS_ERR_OR_NULL(rpc))
+               return rpc;
+
+       if (rpc->status) {
+               nvkm_error(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x failed: 0x%08x\n",
+                          object->client->object.handle, object->handle, rpc->cmd, rpc->status);
+               ret = ERR_PTR(-EINVAL);
+       } else {
+               ret = repc ? rpc->params : NULL;
+       }
+
+       if (IS_ERR_OR_NULL(ret))
+               nvkm_gsp_rpc_done(gsp, rpc);
+
+       return ret;
+}
+
+static void *
+r535_gsp_rpc_rm_ctrl_get(struct nvkm_gsp_object *object, u32 cmd, u32 argc)
+{
+       struct nvkm_gsp_client *client = object->client;
+       struct nvkm_gsp *gsp = client->gsp;
+       rpc_gsp_rm_control_v03_00 *rpc;
+
+       nvkm_debug(&gsp->subdev, "cli:0x%08x obj:0x%08x ctrl cmd:0x%08x argc:%d\n",
+                  client->object.handle, object->handle, cmd, argc);
+
+       rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_RM_CONTROL, sizeof(*rpc) + argc);
+       if (IS_ERR(rpc))
+               return rpc;
+
+       rpc->hClient    = client->object.handle;
+       rpc->hObject    = object->handle;
+       rpc->cmd        = cmd;
+       rpc->status     = 0;
+       rpc->paramsSize = argc;
+       return rpc->params;
+}
+
+static void
+r535_gsp_rpc_done(struct nvkm_gsp *gsp, void *repv)
+{
+       struct nvfw_gsp_rpc *rpc = container_of(repv, typeof(*rpc), data);
+
+       r535_gsp_msg_done(gsp, rpc);
+}
+
+static void *
+r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc)
+{
+       struct nvfw_gsp_rpc *rpc;
+
+       rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + argc, sizeof(u64)));
+       if (!rpc)
+               return NULL;
+
+       rpc->header_version = 0x03000000;
+       rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V';
+       rpc->function = fn;
+       rpc->rpc_result = 0xffffffff;
+       rpc->rpc_result_private = 0xffffffff;
+       rpc->length = sizeof(*rpc) + argc;
+       return rpc->data;
+}
+
+static void *
+r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
+{
+       struct nvfw_gsp_rpc *rpc = container_of(argv, typeof(*rpc), data);
+       struct r535_gsp_msg *cmd = container_of((void *)rpc, typeof(*cmd), data);
+       const u32 max_msg_size = (16 * 0x1000) - sizeof(struct r535_gsp_msg);
+       const u32 max_rpc_size = max_msg_size - sizeof(*rpc);
+       u32 rpc_size = rpc->length - sizeof(*rpc);
+       void *repv;
+
+       mutex_lock(&gsp->cmdq.mutex);
+       if (rpc_size > max_rpc_size) {
+               const u32 fn = rpc->function;
+
+               /* Adjust length, and send initial RPC. */
+               rpc->length = sizeof(*rpc) + max_rpc_size;
+               cmd->checksum = rpc->length;
+
+               repv = r535_gsp_rpc_send(gsp, argv, false, 0);
+               if (IS_ERR(repv))
+                       goto done;
+
+               argv += max_rpc_size;
+               rpc_size -= max_rpc_size;
+
+               /* Remaining chunks sent as CONTINUATION_RECORD RPCs. */
+               while (rpc_size) {
+                       u32 size = min(rpc_size, max_rpc_size);
+                       void *next;
+
+                       next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size);
+                       if (IS_ERR(next)) {
+                               repv = next;
+                               goto done;
+                       }
+
+                       memcpy(next, argv, size);
+
+                       repv = r535_gsp_rpc_send(gsp, next, false, 0);
+                       if (IS_ERR(repv))
+                               goto done;
+
+                       argv += size;
+                       rpc_size -= size;
+               }
+
+               /* Wait for reply. */
+               if (wait) {
+                       rpc = r535_gsp_msg_recv(gsp, fn, repc);
+                       if (!IS_ERR_OR_NULL(rpc))
+                               repv = rpc->data;
+                       else
+                               repv = rpc;
+               } else {
+                       repv = NULL;
+               }
+       } else {
+               repv = r535_gsp_rpc_send(gsp, argv, wait, repc);
+       }
+
+done:
+       mutex_unlock(&gsp->cmdq.mutex);
+       return repv;
+}
+
+const struct nvkm_gsp_rm
+r535_gsp_rm = {
+       .rpc_get = r535_gsp_rpc_get,
+       .rpc_push = r535_gsp_rpc_push,
+       .rpc_done = r535_gsp_rpc_done,
+
+       .rm_ctrl_get = r535_gsp_rpc_rm_ctrl_get,
+       .rm_ctrl_push = r535_gsp_rpc_rm_ctrl_push,
+       .rm_ctrl_done = r535_gsp_rpc_rm_ctrl_done,
+
+       .rm_alloc_get = r535_gsp_rpc_rm_alloc_get,
+       .rm_alloc_push = r535_gsp_rpc_rm_alloc_push,
+       .rm_alloc_done = r535_gsp_rpc_rm_alloc_done,
+
+       .rm_free = r535_gsp_rpc_rm_free,
+
+       .client_ctor = r535_gsp_client_ctor,
+       .client_dtor = r535_gsp_client_dtor,
+
+       .device_ctor = r535_gsp_device_ctor,
+       .device_dtor = r535_gsp_device_dtor,
+
+       .event_ctor = r535_gsp_device_event_ctor,
+       .event_dtor = r535_gsp_event_dtor,
+};
+
+static void
+r535_gsp_msgq_work(struct work_struct *work)
+{
+       struct nvkm_gsp *gsp = container_of(work, typeof(*gsp), msgq.work);
+
+       mutex_lock(&gsp->cmdq.mutex);
+       if (*gsp->msgq.rptr != *gsp->msgq.wptr)
+               r535_gsp_msg_recv(gsp, 0, 0);
+       mutex_unlock(&gsp->cmdq.mutex);
+}
+
+static irqreturn_t
+r535_gsp_intr(struct nvkm_inth *inth)
+{
+       struct nvkm_gsp *gsp = container_of(inth, typeof(*gsp), subdev.inth);
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       u32 intr = nvkm_falcon_rd32(&gsp->falcon, 0x0008);
+       u32 inte = nvkm_falcon_rd32(&gsp->falcon, gsp->falcon.func->addr2 +
+                                                 gsp->falcon.func->riscv_irqmask);
+       u32 stat = intr & inte;
+
+       if (!stat) {
+               nvkm_debug(subdev, "inte %08x %08x\n", intr, inte);
+               return IRQ_NONE;
+       }
+
+       if (stat & 0x00000040) {
+               nvkm_falcon_wr32(&gsp->falcon, 0x004, 0x00000040);
+               schedule_work(&gsp->msgq.work);
+               stat &= ~0x00000040;
+       }
+
+       if (stat) {
+               nvkm_error(subdev, "intr %08x\n", stat);
+               nvkm_falcon_wr32(&gsp->falcon, 0x014, stat);
+               nvkm_falcon_wr32(&gsp->falcon, 0x004, stat);
+       }
+
+       nvkm_falcon_intr_retrigger(&gsp->falcon);
+       return IRQ_HANDLED;
+}
+
+static int
+r535_gsp_intr_get_table(struct nvkm_gsp *gsp)
+{
+       NV2080_CTRL_INTERNAL_INTR_GET_KERNEL_TABLE_PARAMS *ctrl;
+       int ret = 0;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+                                   NV2080_CTRL_CMD_INTERNAL_INTR_GET_KERNEL_TABLE, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl = nvkm_gsp_rm_ctrl_push(&gsp->internal.device.subdevice, ctrl, sizeof(*ctrl));
+       if (WARN_ON(IS_ERR(ctrl)))
+               return PTR_ERR(ctrl);
+
+       for (unsigned i = 0; i < ctrl->tableLen; i++) {
+               enum nvkm_subdev_type type;
+               int inst;
+
+               nvkm_debug(&gsp->subdev,
+                          "%2d: engineIdx %3d pmcIntrMask %08x stall %08x nonStall %08x\n", i,
+                          ctrl->table[i].engineIdx, ctrl->table[i].pmcIntrMask,
+                          ctrl->table[i].vectorStall, ctrl->table[i].vectorNonStall);
+
+               switch (ctrl->table[i].engineIdx) {
+               case MC_ENGINE_IDX_GSP:
+                       type = NVKM_SUBDEV_GSP;
+                       inst = 0;
+                       break;
+               case MC_ENGINE_IDX_DISP:
+                       type = NVKM_ENGINE_DISP;
+                       inst = 0;
+                       break;
+               case MC_ENGINE_IDX_CE0 ... MC_ENGINE_IDX_CE9:
+                       type = NVKM_ENGINE_CE;
+                       inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_CE0;
+                       break;
+               case MC_ENGINE_IDX_GR0:
+                       type = NVKM_ENGINE_GR;
+                       inst = 0;
+                       break;
+               case MC_ENGINE_IDX_NVDEC0 ... MC_ENGINE_IDX_NVDEC7:
+                       type = NVKM_ENGINE_NVDEC;
+                       inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVDEC0;
+                       break;
+               case MC_ENGINE_IDX_MSENC ... MC_ENGINE_IDX_MSENC2:
+                       type = NVKM_ENGINE_NVENC;
+                       inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_MSENC;
+                       break;
+               case MC_ENGINE_IDX_NVJPEG0 ... MC_ENGINE_IDX_NVJPEG7:
+                       type = NVKM_ENGINE_NVJPG;
+                       inst = ctrl->table[i].engineIdx - MC_ENGINE_IDX_NVJPEG0;
+                       break;
+               case MC_ENGINE_IDX_OFA0:
+                       type = NVKM_ENGINE_OFA;
+                       inst = 0;
+                       break;
+               default:
+                       continue;
+               }
+
+               if (WARN_ON(gsp->intr_nr == ARRAY_SIZE(gsp->intr))) {
+                       ret = -ENOSPC;
+                       break;
+               }
+
+               gsp->intr[gsp->intr_nr].type = type;
+               gsp->intr[gsp->intr_nr].inst = inst;
+               gsp->intr[gsp->intr_nr].stall = ctrl->table[i].vectorStall;
+               gsp->intr[gsp->intr_nr].nonstall = ctrl->table[i].vectorNonStall;
+               gsp->intr_nr++;
+       }
+
+       nvkm_gsp_rm_ctrl_done(&gsp->internal.device.subdevice, ctrl);
+       return ret;
+}
+
+static int
+r535_gsp_rpc_get_gsp_static_info(struct nvkm_gsp *gsp)
+{
+       GspStaticConfigInfo *rpc;
+       int last_usable = -1;
+
+       rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc));
+       if (IS_ERR(rpc))
+               return PTR_ERR(rpc);
+
+       gsp->internal.client.object.client = &gsp->internal.client;
+       gsp->internal.client.object.parent = NULL;
+       gsp->internal.client.object.handle = rpc->hInternalClient;
+       gsp->internal.client.gsp = gsp;
+
+       gsp->internal.device.object.client = &gsp->internal.client;
+       gsp->internal.device.object.parent = &gsp->internal.client.object;
+       gsp->internal.device.object.handle = rpc->hInternalDevice;
+
+       gsp->internal.device.subdevice.client = &gsp->internal.client;
+       gsp->internal.device.subdevice.parent = &gsp->internal.device.object;
+       gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice;
+
+       gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase;
+       gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase;
+
+       for (int i = 0; i < rpc->fbRegionInfoParams.numFBRegions; i++) {
+               NV2080_CTRL_CMD_FB_GET_FB_REGION_FB_REGION_INFO *reg =
+                       &rpc->fbRegionInfoParams.fbRegion[i];
+
+               nvkm_debug(&gsp->subdev, "fb region %d: "
+                          "%016llx-%016llx rsvd:%016llx perf:%08x comp:%d iso:%d prot:%d\n", i,
+                          reg->base, reg->limit, reg->reserved, reg->performance,
+                          reg->supportCompressed, reg->supportISO, reg->bProtected);
+
+               if (!reg->reserved && !reg->bProtected) {
+                       if (reg->supportCompressed && reg->supportISO &&
+                           !WARN_ON_ONCE(gsp->fb.region_nr >= ARRAY_SIZE(gsp->fb.region))) {
+                                       const u64 size = (reg->limit + 1) - reg->base;
+
+                                       gsp->fb.region[gsp->fb.region_nr].addr = reg->base;
+                                       gsp->fb.region[gsp->fb.region_nr].size = size;
+                                       gsp->fb.region_nr++;
+                       }
+
+                       last_usable = i;
+               }
+       }
+
+       if (last_usable >= 0) {
+               u32 rsvd_base = rpc->fbRegionInfoParams.fbRegion[last_usable].limit + 1;
+
+               gsp->fb.rsvd_size = gsp->fb.heap.addr - rsvd_base;
+       }
+
+       for (int gpc = 0; gpc < ARRAY_SIZE(rpc->tpcInfo); gpc++) {
+               if (rpc->gpcInfo.gpcMask & BIT(gpc)) {
+                       gsp->gr.tpcs += hweight32(rpc->tpcInfo[gpc].tpcMask);
+                       gsp->gr.gpcs++;
+               }
+       }
+
+       nvkm_gsp_rpc_done(gsp, rpc);
+       return 0;
+}
+
+static int
+r535_gsp_postinit(struct nvkm_gsp *gsp)
+{
+       struct nvkm_device *device = gsp->subdev.device;
+       int ret;
+
+       ret = r535_gsp_rpc_get_gsp_static_info(gsp);
+       if (WARN_ON(ret))
+               return ret;
+
+       INIT_WORK(&gsp->msgq.work, r535_gsp_msgq_work);
+
+       ret = r535_gsp_intr_get_table(gsp);
+       if (WARN_ON(ret))
+               return ret;
+
+       ret = nvkm_gsp_intr_stall(gsp, gsp->subdev.type, gsp->subdev.inst);
+       if (WARN_ON(ret < 0))
+               return ret;
+
+       ret = nvkm_inth_add(&device->vfn->intr, ret, NVKM_INTR_PRIO_NORMAL, &gsp->subdev,
+                           r535_gsp_intr, &gsp->subdev.inth);
+       if (WARN_ON(ret))
+               return ret;
+
+       nvkm_inth_allow(&gsp->subdev.inth);
+       nvkm_wr32(device, 0x110004, 0x00000040);
+       return ret;
+}
+
+static int
+r535_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp, bool suspend)
+{
+       rpc_unloading_guest_driver_v1F_07 *rpc;
+
+       rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_UNLOADING_GUEST_DRIVER, sizeof(*rpc));
+       if (IS_ERR(rpc))
+               return PTR_ERR(rpc);
+
+       if (suspend) {
+               rpc->bInPMTransition = 1;
+               rpc->bGc6Entering = 0;
+               rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3;
+       } else {
+               rpc->bInPMTransition = 0;
+               rpc->bGc6Entering = 0;
+               rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0;
+       }
+
+       return nvkm_gsp_rpc_wr(gsp, rpc, true);
+}
+
+/* dword only */
+struct nv_gsp_registry_entries {
+       const char *name;
+       u32 value;
+};
+
+static const struct nv_gsp_registry_entries r535_registry_entries[] = {
+       { "RMSecBusResetEnable", 1 },
+       { "RMForcePcieConfigSave", 1 },
+};
+#define NV_GSP_REG_NUM_ENTRIES ARRAY_SIZE(r535_registry_entries)
+
+static int
+r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
+{
+       PACKED_REGISTRY_TABLE *rpc;
+       char *strings;
+       int str_offset;
+       int i;
+       size_t rpc_size = sizeof(*rpc) + sizeof(rpc->entries[0]) * NV_GSP_REG_NUM_ENTRIES;
+
+       /* add strings + null terminator */
+       for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++)
+               rpc_size += strlen(r535_registry_entries[i].name) + 1;
+
+       rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_SET_REGISTRY, rpc_size);
+       if (IS_ERR(rpc))
+               return PTR_ERR(rpc);
+
+       rpc->size = sizeof(*rpc);
+       rpc->numEntries = NV_GSP_REG_NUM_ENTRIES;
+
+       str_offset = offsetof(typeof(*rpc), entries[NV_GSP_REG_NUM_ENTRIES]);
+       strings = (char *)&rpc->entries[NV_GSP_REG_NUM_ENTRIES];
+       for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++) {
+               int name_len = strlen(r535_registry_entries[i].name) + 1;
+
+               rpc->entries[i].nameOffset = str_offset;
+               rpc->entries[i].type = 1;
+               rpc->entries[i].data = r535_registry_entries[i].value;
+               rpc->entries[i].length = 4;
+               memcpy(strings, r535_registry_entries[i].name, name_len);
+               strings += name_len;
+               str_offset += name_len;
+       }
+
+       return nvkm_gsp_rpc_wr(gsp, rpc, false);
+}
+
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+static void
+r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps)
+{
+       const guid_t NVOP_DSM_GUID =
+               GUID_INIT(0xA486D8F8, 0x0BDA, 0x471B,
+                         0xA7, 0x2B, 0x60, 0x42, 0xA6, 0xB5, 0xBE, 0xE0);
+       u64 NVOP_DSM_REV = 0x00000100;
+       union acpi_object argv4 = {
+               .buffer.type    = ACPI_TYPE_BUFFER,
+               .buffer.length  = 4,
+               .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL),
+       }, *obj;
+
+       caps->status = 0xffff;
+
+       if (!acpi_check_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, BIT_ULL(0x1a)))
+               return;
+
+       obj = acpi_evaluate_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, 0x1a, &argv4);
+       if (!obj)
+               return;
+
+       printk(KERN_ERR "nvop: obj type %d\n", obj->type);
+       printk(KERN_ERR "nvop: obj len %d\n", obj->buffer.length);
+
+       if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
+           WARN_ON(obj->buffer.length != 4))
+               return;
+
+       caps->status = 0;
+       caps->optimusCaps = *(u32 *)obj->buffer.pointer;
+       printk(KERN_ERR "nvop: caps %08x\n", caps->optimusCaps);
+
+       ACPI_FREE(obj);
+
+       kfree(argv4.buffer.pointer);
+}
+
+static void
+r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt)
+{
+       const guid_t JT_DSM_GUID =
+               GUID_INIT(0xCBECA351L, 0x067B, 0x4924,
+                         0x9C, 0xBD, 0xB4, 0x6B, 0x00, 0xB8, 0x6F, 0x34);
+       u64 JT_DSM_REV = 0x00000103;
+       u32 caps;
+       union acpi_object argv4 = {
+               .buffer.type    = ACPI_TYPE_BUFFER,
+               .buffer.length  = sizeof(caps),
+               .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL),
+       }, *obj;
+
+       jt->status = 0xffff;
+
+       obj = acpi_evaluate_dsm(handle, &JT_DSM_GUID, JT_DSM_REV, 0x1, &argv4);
+       if (!obj)
+               return;
+
+       printk(KERN_ERR "jt: obj type %d\n", obj->type);
+       printk(KERN_ERR "jt: obj len %d\n", obj->buffer.length);
+
+       if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
+           WARN_ON(obj->buffer.length != 4))
+               return;
+
+       jt->status = 0;
+       jt->jtCaps = *(u32 *)obj->buffer.pointer;
+       jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20;
+       jt->bSBIOSCaps = 0;
+       printk(KERN_ERR "jt: caps %08x rev:%04x\n", jt->jtCaps, jt->jtRevId);
+
+       ACPI_FREE(obj);
+
+       kfree(argv4.buffer.pointer);
+}
+
+static void
+r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode,
+                                                MUX_METHOD_DATA_ELEMENT *part)
+{
+       acpi_handle iter = NULL, handle_mux;
+       acpi_status status;
+       unsigned long long value;
+
+       mode->status = 0xffff;
+       part->status = 0xffff;
+
+       do {
+               status = acpi_get_next_object(ACPI_TYPE_DEVICE, handle, iter, &iter);
+               if (ACPI_FAILURE(status) || !iter)
+                       return;
+
+               status = acpi_evaluate_integer(iter, "_ADR", NULL, &value);
+               if (ACPI_FAILURE(status) || value != id)
+                       continue;
+
+               handle_mux = iter;
+       } while (!handle_mux);
+
+       if (!handle_mux)
+               return;
+
+       status = acpi_evaluate_integer(handle_mux, "MXDM", NULL, &value);
+       if (ACPI_SUCCESS(status)) {
+               mode->acpiId = id;
+               mode->mode   = value;
+               mode->status = 0;
+       }
+
+       status = acpi_evaluate_integer(handle_mux, "MXDS", NULL, &value);
+       if (ACPI_SUCCESS(status)) {
+               part->acpiId = id;
+               part->mode   = value;
+               part->status = 0;
+       }
+}
+
+static void
+r535_gsp_acpi_mux(acpi_handle handle, DOD_METHOD_DATA *dod, MUX_METHOD_DATA *mux)
+{
+       mux->tableLen = dod->acpiIdListLen / sizeof(dod->acpiIdList[0]);
+
+       for (int i = 0; i < mux->tableLen; i++) {
+               r535_gsp_acpi_mux_id(handle, dod->acpiIdList[i], &mux->acpiIdMuxModeTable[i],
+                                                                &mux->acpiIdMuxPartTable[i]);
+       }
+}
+
+static void
+r535_gsp_acpi_dod(acpi_handle handle, DOD_METHOD_DATA *dod)
+{
+       acpi_status status;
+       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object *_DOD;
+
+       dod->status = 0xffff;
+
+       status = acpi_evaluate_object(handle, "_DOD", NULL, &output);
+       if (ACPI_FAILURE(status))
+               return;
+
+       _DOD = output.pointer;
+
+       if (WARN_ON(_DOD->type != ACPI_TYPE_PACKAGE) ||
+           WARN_ON(_DOD->package.count > ARRAY_SIZE(dod->acpiIdList)))
+               return;
+
+       for (int i = 0; i < _DOD->package.count; i++) {
+               if (WARN_ON(_DOD->package.elements[i].type != ACPI_TYPE_INTEGER))
+                       return;
+
+               dod->acpiIdList[i] = _DOD->package.elements[i].integer.value;
+               dod->acpiIdListLen += sizeof(dod->acpiIdList[0]);
+       }
+
+       printk(KERN_ERR "_DOD: ok! len:%d\n", dod->acpiIdListLen);
+       dod->status = 0;
+}
+#endif
+
+static void
+r535_gsp_acpi_info(struct nvkm_gsp *gsp, ACPI_METHOD_DATA *acpi)
+{
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+       acpi_handle handle = ACPI_HANDLE(gsp->subdev.device->dev);
+
+       if (!handle)
+               return;
+
+       acpi->bValid = 1;
+
+       r535_gsp_acpi_dod(handle, &acpi->dodMethodData);
+       if (acpi->dodMethodData.status == 0)
+               r535_gsp_acpi_mux(handle, &acpi->dodMethodData, &acpi->muxMethodData);
+
+       r535_gsp_acpi_jt(handle, &acpi->jtMethodData);
+       r535_gsp_acpi_caps(handle, &acpi->capsMethodData);
+#endif
+}
+
+static int
+r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp)
+{
+       struct nvkm_device *device = gsp->subdev.device;
+       struct nvkm_device_pci *pdev = container_of(device, typeof(*pdev), device);
+       GspSystemInfo *info;
+
+       if (WARN_ON(device->type == NVKM_DEVICE_TEGRA))
+               return -ENOSYS;
+
+       info = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_GSP_SET_SYSTEM_INFO, sizeof(*info));
+       if (IS_ERR(info))
+               return PTR_ERR(info);
+
+       info->gpuPhysAddr = device->func->resource_addr(device, 0);
+       info->gpuPhysFbAddr = device->func->resource_addr(device, 1);
+       info->gpuPhysInstAddr = device->func->resource_addr(device, 3);
+       info->nvDomainBusDeviceFunc = pci_dev_id(pdev->pdev);
+       info->maxUserVa = TASK_SIZE;
+       info->pciConfigMirrorBase = 0x088000;
+       info->pciConfigMirrorSize = 0x001000;
+       r535_gsp_acpi_info(gsp, &info->acpiMethodData);
+
+       return nvkm_gsp_rpc_wr(gsp, info, false);
+}
+
+static int
+r535_gsp_msg_os_error_log(void *priv, u32 fn, void *repv, u32 repc)
+{
+       struct nvkm_gsp *gsp = priv;
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       rpc_os_error_log_v17_00 *msg = repv;
+
+       if (WARN_ON(repc < sizeof(*msg)))
+               return -EINVAL;
+
+       nvkm_error(subdev, "Xid:%d %s\n", msg->exceptType, msg->errString);
+       return 0;
+}
+
+static int
+r535_gsp_msg_rc_triggered(void *priv, u32 fn, void *repv, u32 repc)
+{
+       rpc_rc_triggered_v17_02 *msg = repv;
+       struct nvkm_gsp *gsp = priv;
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       struct nvkm_chan *chan;
+       unsigned long flags;
+
+       if (WARN_ON(repc < sizeof(*msg)))
+               return -EINVAL;
+
+       nvkm_error(subdev, "rc engn:%08x chid:%d type:%d scope:%d part:%d\n",
+                  msg->nv2080EngineType, msg->chid, msg->exceptType, msg->scope,
+                  msg->partitionAttributionId);
+
+       chan = nvkm_chan_get_chid(&subdev->device->fifo->engine, msg->chid / 8, &flags);
+       if (!chan) {
+               nvkm_error(subdev, "rc chid:%d not found!\n", msg->chid);
+               return 0;
+       }
+
+       nvkm_chan_error(chan, false);
+       nvkm_chan_put(&chan, flags);
+       return 0;
+}
+
+static int
+r535_gsp_msg_mmu_fault_queued(void *priv, u32 fn, void *repv, u32 repc)
+{
+       struct nvkm_gsp *gsp = priv;
+       struct nvkm_subdev *subdev = &gsp->subdev;
+
+       WARN_ON(repc != 0);
+
+       nvkm_error(subdev, "mmu fault queued\n");
+       return 0;
+}
+
+static int
+r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc)
+{
+       struct nvkm_gsp *gsp = priv;
+       struct nvkm_gsp_client *client;
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       rpc_post_event_v17_00 *msg = repv;
+
+       if (WARN_ON(repc < sizeof(*msg)))
+               return -EINVAL;
+       if (WARN_ON(repc != sizeof(*msg) + msg->eventDataSize))
+               return -EINVAL;
+
+       nvkm_debug(subdev, "event: %08x %08x %d %08x %08x %d %d\n",
+                  msg->hClient, msg->hEvent, msg->notifyIndex, msg->data,
+                  msg->status, msg->eventDataSize, msg->bNotifyList);
+
+       mutex_lock(&gsp->client_id.mutex);
+       client = idr_find(&gsp->client_id.idr, msg->hClient & 0xffff);
+       if (client) {
+               struct nvkm_gsp_event *event;
+               bool handled = false;
+
+               list_for_each_entry(event, &client->events, head) {
+                       if (event->object.handle == msg->hEvent) {
+                               event->func(event, msg->eventData, msg->eventDataSize);
+                               handled = true;
+                       }
+               }
+
+               if (!handled) {
+                       nvkm_error(subdev, "event: cid 0x%08x event 0x%08x not found!\n",
+                                  msg->hClient, msg->hEvent);
+               }
+       } else {
+               nvkm_error(subdev, "event: cid 0x%08x not found!\n", msg->hClient);
+       }
+       mutex_unlock(&gsp->client_id.mutex);
+       return 0;
+}
+
+static int
+r535_gsp_msg_run_cpu_sequencer(void *priv, u32 fn, void *repv, u32 repc)
+{
+       struct nvkm_gsp *gsp = priv;
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       struct nvkm_device *device = subdev->device;
+       rpc_run_cpu_sequencer_v17_00 *seq = repv;
+       int ptr = 0, ret;
+
+       nvkm_debug(subdev, "seq: %08x %08x\n", seq->bufferSizeDWord, seq->cmdIndex);
+
+       while (ptr < seq->cmdIndex) {
+               GSP_SEQUENCER_BUFFER_CMD *cmd = (void *)&seq->commandBuffer[ptr];
+
+               ptr += 1;
+               ptr += GSP_SEQUENCER_PAYLOAD_SIZE_DWORDS(cmd->opCode);
+
+               switch (cmd->opCode) {
+               case GSP_SEQ_BUF_OPCODE_REG_WRITE: {
+                       u32 addr = cmd->payload.regWrite.addr;
+                       u32 data = cmd->payload.regWrite.val;
+
+                       nvkm_trace(subdev, "seq wr32 %06x %08x\n", addr, data);
+                       nvkm_wr32(device, addr, data);
+               }
+                       break;
+               case GSP_SEQ_BUF_OPCODE_REG_MODIFY: {
+                       u32 addr = cmd->payload.regModify.addr;
+                       u32 mask = cmd->payload.regModify.mask;
+                       u32 data = cmd->payload.regModify.val;
+
+                       nvkm_trace(subdev, "seq mask %06x %08x %08x\n", addr, mask, data);
+                       nvkm_mask(device, addr, mask, data);
+               }
+                       break;
+               case GSP_SEQ_BUF_OPCODE_REG_POLL: {
+                       u32 addr = cmd->payload.regPoll.addr;
+                       u32 mask = cmd->payload.regPoll.mask;
+                       u32 data = cmd->payload.regPoll.val;
+                       u32 usec = cmd->payload.regPoll.timeout ?: 4000000;
+                       //u32 error = cmd->payload.regPoll.error;
+
+                       nvkm_trace(subdev, "seq poll %06x %08x %08x %d\n", addr, mask, data, usec);
+                       nvkm_rd32(device, addr);
+                       nvkm_usec(device, usec,
+                               if ((nvkm_rd32(device, addr) & mask) == data)
+                                       break;
+                       );
+               }
+                       break;
+               case GSP_SEQ_BUF_OPCODE_DELAY_US: {
+                       u32 usec = cmd->payload.delayUs.val;
+
+                       nvkm_trace(subdev, "seq usec %d\n", usec);
+                       udelay(usec);
+               }
+                       break;
+               case GSP_SEQ_BUF_OPCODE_REG_STORE: {
+                       u32 addr = cmd->payload.regStore.addr;
+                       u32 slot = cmd->payload.regStore.index;
+
+                       seq->regSaveArea[slot] = nvkm_rd32(device, addr);
+                       nvkm_trace(subdev, "seq save %08x -> %d: %08x\n", addr, slot,
+                                  seq->regSaveArea[slot]);
+               }
+                       break;
+               case GSP_SEQ_BUF_OPCODE_CORE_RESET:
+                       nvkm_trace(subdev, "seq core reset\n");
+                       nvkm_falcon_reset(&gsp->falcon);
+                       nvkm_falcon_mask(&gsp->falcon, 0x624, 0x00000080, 0x00000080);
+                       nvkm_falcon_wr32(&gsp->falcon, 0x10c, 0x00000000);
+                       break;
+               case GSP_SEQ_BUF_OPCODE_CORE_START:
+                       nvkm_trace(subdev, "seq core start\n");
+                       if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000040)
+                               nvkm_falcon_wr32(&gsp->falcon, 0x130, 0x00000002);
+                       else
+                               nvkm_falcon_wr32(&gsp->falcon, 0x100, 0x00000002);
+                       break;
+               case GSP_SEQ_BUF_OPCODE_CORE_WAIT_FOR_HALT:
+                       nvkm_trace(subdev, "seq core wait halt\n");
+                       nvkm_msec(device, 2000,
+                               if (nvkm_falcon_rd32(&gsp->falcon, 0x100) & 0x00000010)
+                                       break;
+                       );
+                       break;
+               case GSP_SEQ_BUF_OPCODE_CORE_RESUME: {
+                       struct nvkm_sec2 *sec2 = device->sec2;
+                       u32 mbox0;
+
+                       nvkm_trace(subdev, "seq core resume\n");
+
+                       ret = gsp->func->reset(gsp);
+                       if (WARN_ON(ret))
+                               return ret;
+
+                       nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr));
+                       nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr));
+
+                       nvkm_falcon_start(&sec2->falcon);
+
+                       if (nvkm_msec(device, 2000,
+                               if (nvkm_rd32(device, 0x1180f8) & 0x04000000)
+                                       break;
+                       ) < 0)
+                               return -ETIMEDOUT;
+
+                       mbox0 = nvkm_falcon_rd32(&sec2->falcon, 0x040);
+                       if (WARN_ON(mbox0)) {
+                               nvkm_error(&gsp->subdev, "seq core resume sec2: 0x%x\n", mbox0);
+                               return -EIO;
+                       }
+
+                       nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version);
+
+                       if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon)))
+                               return -EIO;
+               }
+                       break;
+               default:
+                       nvkm_error(subdev, "unknown sequencer opcode %08x\n", cmd->opCode);
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static void
+nvkm_gsp_mem_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_mem *mem)
+{
+       if (mem->data) {
+               dma_free_coherent(gsp->subdev.device->dev, mem->size, mem->data, mem->addr);
+               mem->data = NULL;
+       }
+}
+
+static int
+nvkm_gsp_mem_ctor(struct nvkm_gsp *gsp, u32 size, struct nvkm_gsp_mem *mem)
+{
+       mem->size = size;
+       mem->data = dma_alloc_coherent(gsp->subdev.device->dev, size, &mem->addr, GFP_KERNEL);
+       if (WARN_ON(!mem->data))
+               return -ENOMEM;
+
+       return 0;
+}
+
+
+static int
+r535_gsp_booter_unload(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
+{
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       struct nvkm_device *device = subdev->device;
+       u32 wpr2_hi;
+       int ret;
+
+       wpr2_hi = nvkm_rd32(device, 0x1fa828);
+       if (!wpr2_hi) {
+               nvkm_debug(subdev, "WPR2 not set - skipping booter unload\n");
+               return 0;
+       }
+
+       ret = nvkm_falcon_fw_boot(&gsp->booter.unload, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
+       if (WARN_ON(ret))
+               return ret;
+
+       wpr2_hi = nvkm_rd32(device, 0x1fa828);
+       if (WARN_ON(wpr2_hi))
+               return -EIO;
+
+       return 0;
+}
+
+static int
+r535_gsp_booter_load(struct nvkm_gsp *gsp, u32 mbox0, u32 mbox1)
+{
+       int ret;
+
+       ret = nvkm_falcon_fw_boot(&gsp->booter.load, &gsp->subdev, true, &mbox0, &mbox1, 0, 0);
+       if (ret)
+               return ret;
+
+       nvkm_falcon_wr32(&gsp->falcon, 0x080, gsp->boot.app_version);
+
+       if (WARN_ON(!nvkm_falcon_riscv_active(&gsp->falcon)))
+               return -EIO;
+
+       return 0;
+}
+
+static int
+r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
+{
+       GspFwWprMeta *meta;
+       int ret;
+
+       ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->wpr_meta);
+       if (ret)
+               return ret;
+
+       meta = gsp->wpr_meta.data;
+
+       meta->magic = GSP_FW_WPR_META_MAGIC;
+       meta->revision = GSP_FW_WPR_META_REVISION;
+
+       meta->sysmemAddrOfRadix3Elf = gsp->radix3.mem[0].addr;
+       meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size;
+
+       meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
+       meta->sizeOfBootloader = gsp->boot.fw.size;
+       meta->bootloaderCodeOffset = gsp->boot.code_offset;
+       meta->bootloaderDataOffset = gsp->boot.data_offset;
+       meta->bootloaderManifestOffset = gsp->boot.manifest_offset;
+
+       meta->sysmemAddrOfSignature = gsp->sig.addr;
+       meta->sizeOfSignature = gsp->sig.size;
+
+       meta->gspFwRsvdStart = gsp->fb.heap.addr;
+       meta->nonWprHeapOffset = gsp->fb.heap.addr;
+       meta->nonWprHeapSize = gsp->fb.heap.size;
+       meta->gspFwWprStart = gsp->fb.wpr2.addr;
+       meta->gspFwHeapOffset = gsp->fb.wpr2.heap.addr;
+       meta->gspFwHeapSize = gsp->fb.wpr2.heap.size;
+       meta->gspFwOffset = gsp->fb.wpr2.elf.addr;
+       meta->bootBinOffset = gsp->fb.wpr2.boot.addr;
+       meta->frtsOffset = gsp->fb.wpr2.frts.addr;
+       meta->frtsSize = gsp->fb.wpr2.frts.size;
+       meta->gspFwWprEnd = ALIGN_DOWN(gsp->fb.bios.vga_workspace.addr, 0x20000);
+       meta->fbSize = gsp->fb.size;
+       meta->vgaWorkspaceOffset = gsp->fb.bios.vga_workspace.addr;
+       meta->vgaWorkspaceSize = gsp->fb.bios.vga_workspace.size;
+       meta->bootCount = 0;
+       meta->partitionRpcAddr = 0;
+       meta->partitionRpcRequestOffset = 0;
+       meta->partitionRpcReplyOffset = 0;
+       meta->verified = 0;
+       return 0;
+}
+
+static int
+r535_gsp_shared_init(struct nvkm_gsp *gsp)
+{
+       struct {
+               msgqTxHeader tx;
+               msgqRxHeader rx;
+       } *cmdq, *msgq;
+       int ret, i;
+
+       gsp->shm.cmdq.size = 0x40000;
+       gsp->shm.msgq.size = 0x40000;
+
+       gsp->shm.ptes.nr  = (gsp->shm.cmdq.size + gsp->shm.msgq.size) >> GSP_PAGE_SHIFT;
+       gsp->shm.ptes.nr += DIV_ROUND_UP(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE);
+       gsp->shm.ptes.size = ALIGN(gsp->shm.ptes.nr * sizeof(u64), GSP_PAGE_SIZE);
+
+       ret = nvkm_gsp_mem_ctor(gsp, gsp->shm.ptes.size +
+                                    gsp->shm.cmdq.size +
+                                    gsp->shm.msgq.size,
+                               &gsp->shm.mem);
+       if (ret)
+               return ret;
+
+       gsp->shm.ptes.ptr = gsp->shm.mem.data;
+       gsp->shm.cmdq.ptr = (u8 *)gsp->shm.ptes.ptr + gsp->shm.ptes.size;
+       gsp->shm.msgq.ptr = (u8 *)gsp->shm.cmdq.ptr + gsp->shm.cmdq.size;
+
+       for (i = 0; i < gsp->shm.ptes.nr; i++)
+               gsp->shm.ptes.ptr[i] = gsp->shm.mem.addr + (i << GSP_PAGE_SHIFT);
+
+       cmdq = gsp->shm.cmdq.ptr;
+       cmdq->tx.version = 0;
+       cmdq->tx.size = gsp->shm.cmdq.size;
+       cmdq->tx.entryOff = GSP_PAGE_SIZE;
+       cmdq->tx.msgSize = GSP_PAGE_SIZE;
+       cmdq->tx.msgCount = (cmdq->tx.size - cmdq->tx.entryOff) / cmdq->tx.msgSize;
+       cmdq->tx.writePtr = 0;
+       cmdq->tx.flags = 1;
+       cmdq->tx.rxHdrOff = offsetof(typeof(*cmdq), rx.readPtr);
+
+       msgq = gsp->shm.msgq.ptr;
+
+       gsp->cmdq.cnt = cmdq->tx.msgCount;
+       gsp->cmdq.wptr = &cmdq->tx.writePtr;
+       gsp->cmdq.rptr = &msgq->rx.readPtr;
+       gsp->msgq.cnt = cmdq->tx.msgCount;
+       gsp->msgq.wptr = &msgq->tx.writePtr;
+       gsp->msgq.rptr = &cmdq->rx.readPtr;
+       return 0;
+}
+
+static int
+r535_gsp_rmargs_init(struct nvkm_gsp *gsp, bool resume)
+{
+       GSP_ARGUMENTS_CACHED *args;
+       int ret;
+
+       if (!resume) {
+               ret = r535_gsp_shared_init(gsp);
+               if (ret)
+                       return ret;
+
+               ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->rmargs);
+               if (ret)
+                       return ret;
+       }
+
+       args = gsp->rmargs.data;
+       args->messageQueueInitArguments.sharedMemPhysAddr = gsp->shm.mem.addr;
+       args->messageQueueInitArguments.pageTableEntryCount = gsp->shm.ptes.nr;
+       args->messageQueueInitArguments.cmdQueueOffset =
+               (u8 *)gsp->shm.cmdq.ptr - (u8 *)gsp->shm.mem.data;
+       args->messageQueueInitArguments.statQueueOffset =
+               (u8 *)gsp->shm.msgq.ptr - (u8 *)gsp->shm.mem.data;
+
+       if (!resume) {
+               args->srInitArguments.oldLevel = 0;
+               args->srInitArguments.flags = 0;
+               args->srInitArguments.bInPMTransition = 0;
+       } else {
+               args->srInitArguments.oldLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_3;
+               args->srInitArguments.flags = 0;
+               args->srInitArguments.bInPMTransition = 1;
+       }
+
+       return 0;
+}
+
+static inline u64
+r535_gsp_libos_id8(const char *name)
+{
+       u64 id = 0;
+
+       for (int i = 0; i < sizeof(id) && *name; i++, name++)
+               id = (id << 8) | *name;
+
+       return id;
+}
+
+static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
+{
+       unsigned int num_pages = DIV_ROUND_UP_ULL(size, GSP_PAGE_SIZE);
+       unsigned int i;
+
+       for (i = 0; i < num_pages; i++)
+               ptes[i] = (u64)addr + (i << GSP_PAGE_SHIFT);
+}
+
+static int
+r535_gsp_libos_init(struct nvkm_gsp *gsp)
+{
+       LibosMemoryRegionInitArgument *args;
+       int ret;
+
+       ret = nvkm_gsp_mem_ctor(gsp, 0x1000, &gsp->libos);
+       if (ret)
+               return ret;
+
+       args = gsp->libos.data;
+
+       ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->loginit);
+       if (ret)
+               return ret;
+
+       args[0].id8  = r535_gsp_libos_id8("LOGINIT");
+       args[0].pa   = gsp->loginit.addr;
+       args[0].size = gsp->loginit.size;
+       args[0].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
+       args[0].loc  = LIBOS_MEMORY_REGION_LOC_SYSMEM;
+       create_pte_array(gsp->loginit.data + sizeof(u64), gsp->loginit.addr, gsp->loginit.size);
+
+       ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logintr);
+       if (ret)
+               return ret;
+
+       args[1].id8  = r535_gsp_libos_id8("LOGINTR");
+       args[1].pa   = gsp->logintr.addr;
+       args[1].size = gsp->logintr.size;
+       args[1].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
+       args[1].loc  = LIBOS_MEMORY_REGION_LOC_SYSMEM;
+       create_pte_array(gsp->logintr.data + sizeof(u64), gsp->logintr.addr, gsp->logintr.size);
+
+       ret = nvkm_gsp_mem_ctor(gsp, 0x10000, &gsp->logrm);
+       if (ret)
+               return ret;
+
+       args[2].id8  = r535_gsp_libos_id8("LOGRM");
+       args[2].pa   = gsp->logrm.addr;
+       args[2].size = gsp->logrm.size;
+       args[2].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
+       args[2].loc  = LIBOS_MEMORY_REGION_LOC_SYSMEM;
+       create_pte_array(gsp->logrm.data + sizeof(u64), gsp->logrm.addr, gsp->logrm.size);
+
+       ret = r535_gsp_rmargs_init(gsp, false);
+       if (ret)
+               return ret;
+
+       args[3].id8  = r535_gsp_libos_id8("RMARGS");
+       args[3].pa   = gsp->rmargs.addr;
+       args[3].size = gsp->rmargs.size;
+       args[3].kind = LIBOS_MEMORY_REGION_CONTIGUOUS;
+       args[3].loc  = LIBOS_MEMORY_REGION_LOC_SYSMEM;
+       return 0;
+}
+
+void
+nvkm_gsp_sg_free(struct nvkm_device *device, struct sg_table *sgt)
+{
+       struct scatterlist *sgl;
+       int i;
+
+       dma_unmap_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0);
+
+       for_each_sgtable_sg(sgt, sgl, i) {
+               struct page *page = sg_page(sgl);
+
+               __free_page(page);
+       }
+
+       sg_free_table(sgt);
+}
+
+int
+nvkm_gsp_sg(struct nvkm_device *device, u64 size, struct sg_table *sgt)
+{
+       const u64 pages = DIV_ROUND_UP(size, PAGE_SIZE);
+       struct scatterlist *sgl;
+       int ret, i;
+
+       ret = sg_alloc_table(sgt, pages, GFP_KERNEL);
+       if (ret)
+               return ret;
+
+       for_each_sgtable_sg(sgt, sgl, i) {
+               struct page *page = alloc_page(GFP_KERNEL);
+
+               if (!page) {
+                       nvkm_gsp_sg_free(device, sgt);
+                       return -ENOMEM;
+               }
+
+               sg_set_page(sgl, page, PAGE_SIZE, 0);
+       }
+
+       ret = dma_map_sgtable(device->dev, sgt, DMA_BIDIRECTIONAL, 0);
+       if (ret)
+               nvkm_gsp_sg_free(device, sgt);
+
+       return ret;
+}
+
+static void
+nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
+{
+       for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--)
+               nvkm_gsp_mem_dtor(gsp, &rx3->mem[i]);
+}
+
+static int
+nvkm_gsp_radix3_sg(struct nvkm_device *device, struct sg_table *sgt, u64 size,
+                  struct nvkm_gsp_radix3 *rx3)
+{
+       u64 addr;
+
+       for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) {
+               u64 *ptes;
+               int idx;
+
+               rx3->mem[i].size = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE);
+               rx3->mem[i].data = dma_alloc_coherent(device->dev, rx3->mem[i].size,
+                                                     &rx3->mem[i].addr, GFP_KERNEL);
+               if (WARN_ON(!rx3->mem[i].data))
+                       return -ENOMEM;
+
+               ptes = rx3->mem[i].data;
+               if (i == 2) {
+                       struct scatterlist *sgl;
+
+                       for_each_sgtable_dma_sg(sgt, sgl, idx) {
+                               for (int j = 0; j < sg_dma_len(sgl) / GSP_PAGE_SIZE; j++)
+                                       *ptes++ = sg_dma_address(sgl) + (GSP_PAGE_SIZE * j);
+                       }
+               } else {
+                       for (int j = 0; j < size / GSP_PAGE_SIZE; j++)
+                               *ptes++ = addr + GSP_PAGE_SIZE * j;
+               }
+
+               size = rx3->mem[i].size;
+               addr = rx3->mem[i].addr;
+       }
+
+       return 0;
+}
+
+int
+r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
+{
+       u32 mbox0 = 0xff, mbox1 = 0xff;
+       int ret;
+
+       if (!gsp->running)
+               return 0;
+
+       if (suspend) {
+               GspFwWprMeta *meta = gsp->wpr_meta.data;
+               u64 len = meta->gspFwWprEnd - meta->gspFwWprStart;
+               GspFwSRMeta *sr;
+
+               ret = nvkm_gsp_sg(gsp->subdev.device, len, &gsp->sr.sgt);
+               if (ret)
+                       return ret;
+
+               ret = nvkm_gsp_radix3_sg(gsp->subdev.device, &gsp->sr.sgt, len, &gsp->sr.radix3);
+               if (ret)
+                       return ret;
+
+               ret = nvkm_gsp_mem_ctor(gsp, sizeof(*sr), &gsp->sr.meta);
+               if (ret)
+                       return ret;
+
+               sr = gsp->sr.meta.data;
+               sr->magic = GSP_FW_SR_META_MAGIC;
+               sr->revision = GSP_FW_SR_META_REVISION;
+               sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.mem[0].addr;
+               sr->sizeOfSuspendResumeData = len;
+
+               mbox0 = lower_32_bits(gsp->sr.meta.addr);
+               mbox1 = upper_32_bits(gsp->sr.meta.addr);
+       }
+
+       ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend);
+       if (WARN_ON(ret))
+               return ret;
+
+       nvkm_msec(gsp->subdev.device, 2000,
+               if (nvkm_falcon_rd32(&gsp->falcon, 0x040) & 0x80000000)
+                       break;
+       );
+
+       nvkm_falcon_reset(&gsp->falcon);
+
+       ret = nvkm_gsp_fwsec_sb(gsp);
+       WARN_ON(ret);
+
+       ret = r535_gsp_booter_unload(gsp, mbox0, mbox1);
+       WARN_ON(ret);
+
+       gsp->running = false;
+       return 0;
+}
+
+int
+r535_gsp_init(struct nvkm_gsp *gsp)
+{
+       u32 mbox0, mbox1;
+       int ret;
+
+       if (!gsp->sr.meta.data) {
+               mbox0 = lower_32_bits(gsp->wpr_meta.addr);
+               mbox1 = upper_32_bits(gsp->wpr_meta.addr);
+       } else {
+               r535_gsp_rmargs_init(gsp, true);
+
+               mbox0 = lower_32_bits(gsp->sr.meta.addr);
+               mbox1 = upper_32_bits(gsp->sr.meta.addr);
+       }
+
+       /* Execute booter to handle (eventually...) booting GSP-RM. */
+       ret = r535_gsp_booter_load(gsp, mbox0, mbox1);
+       if (WARN_ON(ret))
+               goto done;
+
+       ret = r535_gsp_rpc_poll(gsp, NV_VGPU_MSG_EVENT_GSP_INIT_DONE);
+       if (ret)
+               goto done;
+
+       gsp->running = true;
+
+done:
+       if (gsp->sr.meta.data) {
+               nvkm_gsp_mem_dtor(gsp, &gsp->sr.meta);
+               nvkm_gsp_radix3_dtor(gsp, &gsp->sr.radix3);
+               nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt);
+               return ret;
+       }
+
+       if (ret == 0)
+               ret = r535_gsp_postinit(gsp);
+
+       return ret;
+}
+
+static int
+r535_gsp_rm_boot_ctor(struct nvkm_gsp *gsp)
+{
+       const struct firmware *fw = gsp->fws.bl;
+       const struct nvfw_bin_hdr *hdr;
+       RM_RISCV_UCODE_DESC *desc;
+       int ret;
+
+       hdr = nvfw_bin_hdr(&gsp->subdev, fw->data);
+       desc = (void *)fw->data + hdr->header_offset;
+
+       ret = nvkm_gsp_mem_ctor(gsp, hdr->data_size, &gsp->boot.fw);
+       if (ret)
+               return ret;
+
+       memcpy(gsp->boot.fw.data, fw->data + hdr->data_offset, hdr->data_size);
+
+       gsp->boot.code_offset = desc->monitorCodeOffset;
+       gsp->boot.data_offset = desc->monitorDataOffset;
+       gsp->boot.manifest_offset = desc->manifestOffset;
+       gsp->boot.app_version = desc->appVersion;
+       return 0;
+}
+
+static const struct nvkm_firmware_func
+r535_gsp_fw = {
+       .type = NVKM_FIRMWARE_IMG_SGT,
+};
+
+static int
+r535_gsp_elf_section(struct nvkm_gsp *gsp, const char *name, const u8 **pdata, u64 *psize)
+{
+       const u8 *img = gsp->fws.rm->data;
+       const struct elf64_hdr *ehdr = (const struct elf64_hdr *)img;
+       const struct elf64_shdr *shdr = (const struct elf64_shdr *)&img[ehdr->e_shoff];
+       const char *names = &img[shdr[ehdr->e_shstrndx].sh_offset];
+
+       for (int i = 0; i < ehdr->e_shnum; i++, shdr++) {
+               if (!strcmp(&names[shdr->sh_name], name)) {
+                       *pdata = &img[shdr->sh_offset];
+                       *psize = shdr->sh_size;
+                       return 0;
+               }
+       }
+
+       nvkm_error(&gsp->subdev, "section '%s' not found\n", name);
+       return -ENOENT;
+}
+
+static void
+r535_gsp_dtor_fws(struct nvkm_gsp *gsp)
+{
+       nvkm_firmware_put(gsp->fws.bl);
+       gsp->fws.bl = NULL;
+       nvkm_firmware_put(gsp->fws.booter.unload);
+       gsp->fws.booter.unload = NULL;
+       nvkm_firmware_put(gsp->fws.booter.load);
+       gsp->fws.booter.load = NULL;
+       nvkm_firmware_put(gsp->fws.rm);
+       gsp->fws.rm = NULL;
+}
+
+void
+r535_gsp_dtor(struct nvkm_gsp *gsp)
+{
+       idr_destroy(&gsp->client_id.idr);
+       mutex_destroy(&gsp->client_id.mutex);
+
+       nvkm_gsp_radix3_dtor(gsp, &gsp->radix3);
+       nvkm_gsp_mem_dtor(gsp, &gsp->sig);
+       nvkm_firmware_dtor(&gsp->fw);
+
+       nvkm_falcon_fw_dtor(&gsp->booter.unload);
+       nvkm_falcon_fw_dtor(&gsp->booter.load);
+
+       mutex_destroy(&gsp->msgq.mutex);
+       mutex_destroy(&gsp->cmdq.mutex);
+
+       r535_gsp_dtor_fws(gsp);
+}
+
+int
+r535_gsp_oneinit(struct nvkm_gsp *gsp)
+{
+       struct nvkm_device *device = gsp->subdev.device;
+       const u8 *data;
+       u64 size;
+       int ret;
+
+       mutex_init(&gsp->cmdq.mutex);
+       mutex_init(&gsp->msgq.mutex);
+
+       ret = gsp->func->booter.ctor(gsp, "booter-load", gsp->fws.booter.load,
+                                    &device->sec2->falcon, &gsp->booter.load);
+       if (ret)
+               return ret;
+
+       ret = gsp->func->booter.ctor(gsp, "booter-unload", gsp->fws.booter.unload,
+                                    &device->sec2->falcon, &gsp->booter.unload);
+       if (ret)
+               return ret;
+
+       /* Load GSP firmware from ELF image into DMA-accessible memory. */
+       ret = r535_gsp_elf_section(gsp, ".fwimage", &data, &size);
+       if (ret)
+               return ret;
+
+       ret = nvkm_firmware_ctor(&r535_gsp_fw, "gsp-rm", device, data, size, &gsp->fw);
+       if (ret)
+               return ret;
+
+       /* Load relevant signature from ELF image. */
+       ret = r535_gsp_elf_section(gsp, gsp->func->sig_section, &data, &size);
+       if (ret)
+               return ret;
+
+       ret = nvkm_gsp_mem_ctor(gsp, ALIGN(size, 256), &gsp->sig);
+       if (ret)
+               return ret;
+
+       memcpy(gsp->sig.data, data, size);
+
+       /* Build radix3 page table for ELF image. */
+       ret = nvkm_gsp_radix3_sg(device, &gsp->fw.mem.sgt, gsp->fw.len, &gsp->radix3);
+       if (ret)
+               return ret;
+
+       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_GSP_RUN_CPU_SEQUENCER,
+                             r535_gsp_msg_run_cpu_sequencer, gsp);
+       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_POST_EVENT, r535_gsp_msg_post_event, gsp);
+       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_RC_TRIGGERED,
+                             r535_gsp_msg_rc_triggered, gsp);
+       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_MMU_FAULT_QUEUED,
+                             r535_gsp_msg_mmu_fault_queued, gsp);
+       r535_gsp_msg_ntfy_add(gsp, NV_VGPU_MSG_EVENT_OS_ERROR_LOG, r535_gsp_msg_os_error_log, gsp);
+
+       ret = r535_gsp_rm_boot_ctor(gsp);
+       if (ret)
+               return ret;
+
+       /* Release FW images - we've copied them to DMA buffers now. */
+       r535_gsp_dtor_fws(gsp);
+
+       /* Calculate FB layout. */
+       gsp->fb.wpr2.frts.size = 0x100000;
+       gsp->fb.wpr2.frts.addr = ALIGN_DOWN(gsp->fb.bios.addr, 0x20000) - gsp->fb.wpr2.frts.size;
+
+       gsp->fb.wpr2.boot.size = gsp->boot.fw.size;
+       gsp->fb.wpr2.boot.addr = ALIGN_DOWN(gsp->fb.wpr2.frts.addr - gsp->fb.wpr2.boot.size, 0x1000);
+
+       gsp->fb.wpr2.elf.size = gsp->fw.len;
+       gsp->fb.wpr2.elf.addr = ALIGN_DOWN(gsp->fb.wpr2.boot.addr - gsp->fb.wpr2.elf.size, 0x10000);
+
+       {
+               u32 fb_size_gb = DIV_ROUND_UP_ULL(gsp->fb.size, 1 << 30);
+
+               gsp->fb.wpr2.heap.size =
+                       gsp->func->wpr_heap.os_carveout_size +
+                       gsp->func->wpr_heap.base_size +
+                       ALIGN(GSP_FW_HEAP_PARAM_SIZE_PER_GB_FB * fb_size_gb, 1 << 20) +
+                       ALIGN(GSP_FW_HEAP_PARAM_CLIENT_ALLOC_SIZE, 1 << 20);
+
+               gsp->fb.wpr2.heap.size = max(gsp->fb.wpr2.heap.size, gsp->func->wpr_heap.min_size);
+       }
+
+       gsp->fb.wpr2.heap.addr = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.size, 0x100000);
+       gsp->fb.wpr2.heap.size = ALIGN_DOWN(gsp->fb.wpr2.elf.addr - gsp->fb.wpr2.heap.addr, 0x100000);
+
+       gsp->fb.wpr2.addr = ALIGN_DOWN(gsp->fb.wpr2.heap.addr - sizeof(GspFwWprMeta), 0x100000);
+       gsp->fb.wpr2.size = gsp->fb.wpr2.frts.addr + gsp->fb.wpr2.frts.size - gsp->fb.wpr2.addr;
+
+       gsp->fb.heap.size = 0x100000;
+       gsp->fb.heap.addr = gsp->fb.wpr2.addr - gsp->fb.heap.size;
+
+       ret = nvkm_gsp_fwsec_frts(gsp);
+       if (WARN_ON(ret))
+               return ret;
+
+       ret = r535_gsp_libos_init(gsp);
+       if (WARN_ON(ret))
+               return ret;
+
+       ret = r535_gsp_wpr_meta_init(gsp);
+       if (WARN_ON(ret))
+               return ret;
+
+       ret = r535_gsp_rpc_set_system_info(gsp);
+       if (WARN_ON(ret))
+               return ret;
+
+       ret = r535_gsp_rpc_set_registry(gsp);
+       if (WARN_ON(ret))
+               return ret;
+
+       /* Reset GSP into RISC-V mode. */
+       ret = gsp->func->reset(gsp);
+       if (WARN_ON(ret))
+               return ret;
+
+       nvkm_falcon_wr32(&gsp->falcon, 0x040, lower_32_bits(gsp->libos.addr));
+       nvkm_falcon_wr32(&gsp->falcon, 0x044, upper_32_bits(gsp->libos.addr));
+
+       mutex_init(&gsp->client_id.mutex);
+       idr_init(&gsp->client_id.idr);
+       return 0;
+}
+
+static int
+r535_gsp_load_fw(struct nvkm_gsp *gsp, const char *name, const char *ver,
+                const struct firmware **pfw)
+{
+       char fwname[64];
+
+       snprintf(fwname, sizeof(fwname), "gsp/%s-%s", name, ver);
+       return nvkm_firmware_get(&gsp->subdev, fwname, 0, pfw);
+}
+
+int
+r535_gsp_load(struct nvkm_gsp *gsp, int ver, const struct nvkm_gsp_fwif *fwif)
+{
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       int ret;
+
+       if (!nvkm_boolopt(subdev->device->cfgopt, "NvGspRm", fwif->enable))
+               return -EINVAL;
+
+       if ((ret = r535_gsp_load_fw(gsp, "gsp", fwif->ver, &gsp->fws.rm)) ||
+           (ret = r535_gsp_load_fw(gsp, "booter_load", fwif->ver, &gsp->fws.booter.load)) ||
+           (ret = r535_gsp_load_fw(gsp, "booter_unload", fwif->ver, &gsp->fws.booter.unload)) ||
+           (ret = r535_gsp_load_fw(gsp, "bootloader", fwif->ver, &gsp->fws.bl))) {
+               r535_gsp_dtor_fws(gsp);
+               return ret;
+       }
+
+       return 0;
+}
+
+#define NVKM_GSP_FIRMWARE(chip)                                  \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_load-535.113.01.bin");   \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/booter_unload-535.113.01.bin"); \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/bootloader-535.113.01.bin");    \
+MODULE_FIRMWARE("nvidia/"#chip"/gsp/gsp-535.113.01.bin")
+
+NVKM_GSP_FIRMWARE(tu102);
+NVKM_GSP_FIRMWARE(tu104);
+NVKM_GSP_FIRMWARE(tu106);
+
+NVKM_GSP_FIRMWARE(tu116);
+NVKM_GSP_FIRMWARE(tu117);
+
+NVKM_GSP_FIRMWARE(ga100);
+
+NVKM_GSP_FIRMWARE(ga102);
+NVKM_GSP_FIRMWARE(ga103);
+NVKM_GSP_FIRMWARE(ga104);
+NVKM_GSP_FIRMWARE(ga106);
+NVKM_GSP_FIRMWARE(ga107);
+
+NVKM_GSP_FIRMWARE(ad102);
+NVKM_GSP_FIRMWARE(ad103);
+NVKM_GSP_FIRMWARE(ad104);
+NVKM_GSP_FIRMWARE(ad106);
+NVKM_GSP_FIRMWARE(ad107);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu102.c
new file mode 100644 (file)
index 0000000..59c5f2b
--- /dev/null
@@ -0,0 +1,198 @@
+/*
+ * Copyright 2022 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/fb.h>
+
+#include <nvfw/flcn.h>
+#include <nvfw/fw.h>
+#include <nvfw/hs.h>
+
+int
+tu102_gsp_booter_ctor(struct nvkm_gsp *gsp, const char *name, const struct firmware *blob,
+                     struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw)
+{
+       struct nvkm_subdev *subdev = &gsp->subdev;
+       const struct nvkm_falcon_fw_func *func = &gm200_flcn_fw;
+       const struct nvfw_bin_hdr *hdr;
+       const struct nvfw_hs_header_v2 *hshdr;
+       const struct nvfw_hs_load_header_v2 *lhdr;
+       u32 loc, sig, cnt;
+       int ret;
+
+       hdr = nvfw_bin_hdr(subdev, blob->data);
+       hshdr = nvfw_hs_header_v2(subdev, blob->data + hdr->header_offset);
+       loc = *(u32 *)(blob->data + hshdr->patch_loc);
+       sig = *(u32 *)(blob->data + hshdr->patch_sig);
+       cnt = *(u32 *)(blob->data + hshdr->num_sig);
+
+       ret = nvkm_falcon_fw_ctor(func, name, subdev->device, true,
+                                 blob->data + hdr->data_offset, hdr->data_size, falcon, fw);
+       if (ret)
+               goto done;
+
+       ret = nvkm_falcon_fw_sign(fw, loc, hshdr->sig_prod_size / cnt, blob->data,
+                                 cnt, hshdr->sig_prod_offset + sig, 0, 0);
+       if (ret)
+               goto done;
+
+       lhdr = nvfw_hs_load_header_v2(subdev, blob->data + hshdr->header_offset);
+
+       fw->nmem_base_img = 0;
+       fw->nmem_base = lhdr->os_code_offset;
+       fw->nmem_size = lhdr->os_code_size;
+       fw->imem_base_img = fw->nmem_size;
+       fw->imem_base = lhdr->app[0].offset;
+       fw->imem_size = lhdr->app[0].size;
+       fw->dmem_base_img = lhdr->os_data_offset;
+       fw->dmem_base = 0;
+       fw->dmem_size = lhdr->os_data_size;
+       fw->dmem_sign = loc - fw->dmem_base_img;
+       fw->boot_addr = lhdr->os_code_offset;
+
+done:
+       if (ret)
+               nvkm_falcon_fw_dtor(fw);
+
+       return ret;
+}
+
+static int
+tu102_gsp_fwsec_load_bld(struct nvkm_falcon_fw *fw)
+{
+       struct flcn_bl_dmem_desc_v2 desc = {
+               .ctx_dma = FALCON_DMAIDX_PHYS_SYS_NCOH,
+               .code_dma_base = fw->fw.phys,
+               .non_sec_code_off = fw->nmem_base,
+               .non_sec_code_size = fw->nmem_size,
+               .sec_code_off = fw->imem_base,
+               .sec_code_size = fw->imem_size,
+               .code_entry_point = 0,
+               .data_dma_base = fw->fw.phys + fw->dmem_base_img,
+               .data_size = fw->dmem_size,
+               .argc = 0,
+               .argv = 0,
+       };
+
+       flcn_bl_dmem_desc_v2_dump(fw->falcon->user, &desc);
+
+       nvkm_falcon_mask(fw->falcon, 0x600 + desc.ctx_dma * 4, 0x00000007, 0x00000005);
+
+       return nvkm_falcon_pio_wr(fw->falcon, (u8 *)&desc, 0, 0, DMEM, 0, sizeof(desc), 0, 0);
+}
+
+const struct nvkm_falcon_fw_func
+tu102_gsp_fwsec = {
+       .reset = gm200_flcn_fw_reset,
+       .load = gm200_flcn_fw_load,
+       .load_bld = tu102_gsp_fwsec_load_bld,
+       .boot = gm200_flcn_fw_boot,
+};
+
+int
+tu102_gsp_reset(struct nvkm_gsp *gsp)
+{
+       return gsp->falcon.func->reset_eng(&gsp->falcon);
+}
+
+static u64
+tu102_gsp_vga_workspace_addr(struct nvkm_gsp *gsp, u64 fb_size)
+{
+       struct nvkm_device *device = gsp->subdev.device;
+       const u64 base = fb_size - 0x100000;
+       u64 addr = 0;
+
+       if (device->disp)
+               addr = nvkm_rd32(gsp->subdev.device, 0x625f04);
+       if (!(addr & 0x00000008))
+               return base;
+
+       addr = (addr & 0xffffff00) << 8;
+       if (addr < base)
+               return fb_size - 0x20000;
+
+       return addr;
+}
+
+int
+tu102_gsp_oneinit(struct nvkm_gsp *gsp)
+{
+       gsp->fb.size = nvkm_fb_vidmem_size(gsp->subdev.device);
+
+       gsp->fb.bios.vga_workspace.addr = tu102_gsp_vga_workspace_addr(gsp, gsp->fb.size);
+       gsp->fb.bios.vga_workspace.size = gsp->fb.size - gsp->fb.bios.vga_workspace.addr;
+       gsp->fb.bios.addr = gsp->fb.bios.vga_workspace.addr;
+       gsp->fb.bios.size = gsp->fb.bios.vga_workspace.size;
+
+       return r535_gsp_oneinit(gsp);
+}
+
+const struct nvkm_falcon_func
+tu102_gsp_flcn = {
+       .disable = gm200_flcn_disable,
+       .enable = gm200_flcn_enable,
+       .addr2 = 0x1000,
+       .riscv_irqmask = 0x2b4,
+       .reset_eng = gp102_flcn_reset_eng,
+       .reset_wait_mem_scrubbing = gm200_flcn_reset_wait_mem_scrubbing,
+       .bind_inst = gm200_flcn_bind_inst,
+       .bind_stat = gm200_flcn_bind_stat,
+       .bind_intr = true,
+       .imem_pio = &gm200_flcn_imem_pio,
+       .dmem_pio = &gm200_flcn_dmem_pio,
+       .riscv_active = tu102_flcn_riscv_active,
+};
+
+static const struct nvkm_gsp_func
+tu102_gsp_r535_113_01 = {
+       .flcn = &tu102_gsp_flcn,
+       .fwsec = &tu102_gsp_fwsec,
+
+       .sig_section = ".fwsignature_tu10x",
+
+       .wpr_heap.base_size = 8 << 20,
+       .wpr_heap.min_size = 64 << 20,
+
+       .booter.ctor = tu102_gsp_booter_ctor,
+
+       .dtor = r535_gsp_dtor,
+       .oneinit = tu102_gsp_oneinit,
+       .init = r535_gsp_init,
+       .fini = r535_gsp_fini,
+       .reset = tu102_gsp_reset,
+
+       .rm = &r535_gsp_rm,
+};
+
+static struct nvkm_gsp_fwif
+tu102_gsps[] = {
+       {  0,  r535_gsp_load, &tu102_gsp_r535_113_01, "535.113.01" },
+       { -1, gv100_gsp_nofw, &gv100_gsp },
+       {}
+};
+
+int
+tu102_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+             struct nvkm_gsp **pgsp)
+{
+       return nvkm_gsp_new_(tu102_gsps, device, type, inst, pgsp);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/tu116.c
new file mode 100644 (file)
index 0000000..04fbd9e
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2022 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static const struct nvkm_gsp_func
+tu116_gsp_r535_113_01 = {
+       .flcn = &tu102_gsp_flcn,
+       .fwsec = &tu102_gsp_fwsec,
+
+       .sig_section = ".fwsignature_tu11x",
+
+       .wpr_heap.base_size = 8 << 20,
+       .wpr_heap.min_size = 64 << 20,
+
+       .booter.ctor = tu102_gsp_booter_ctor,
+
+       .dtor = r535_gsp_dtor,
+       .oneinit = tu102_gsp_oneinit,
+       .init = r535_gsp_init,
+       .fini = r535_gsp_fini,
+       .reset = tu102_gsp_reset,
+
+       .rm = &r535_gsp_rm,
+};
+
+static struct nvkm_gsp_fwif
+tu116_gsps[] = {
+       {  0,  r535_gsp_load, &tu116_gsp_r535_113_01, "535.113.01" },
+       { -1, gv100_gsp_nofw, &gv100_gsp },
+       {}
+};
+
+int
+tu116_gsp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+             struct nvkm_gsp **pgsp)
+{
+       return nvkm_gsp_new_(tu116_gsps, device, type, inst, pgsp);
+}
index 46917eb600f9dd3b929f53cf3feeb0b8c3684d47..0494775113128f3fb8981e202af94150e38e317a 100644 (file)
@@ -24,6 +24,8 @@
 #include "priv.h"
 #include "pad.h"
 
+#include <subdev/gsp.h>
+
 static void
 gm200_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable)
 {
@@ -44,5 +46,8 @@ int
 gm200_i2c_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
              struct nvkm_i2c **pi2c)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_i2c_new_(&gm200_i2c, device, type, inst, pi2c);
 }
index 06cbe19ce3766e9f182be86268a6fea800b56ec5..553d540f27365a625828c519a2597de7a45083af 100644 (file)
@@ -4,3 +4,5 @@ nvkm-y += nvkm/subdev/instmem/nv04.o
 nvkm-y += nvkm/subdev/instmem/nv40.o
 nvkm-y += nvkm/subdev/instmem/nv50.o
 nvkm-y += nvkm/subdev/instmem/gk20a.o
+
+nvkm-y += nvkm/subdev/instmem/r535.o
index 24886eabe8dc3f2eaa655f39decb9e12d2b03f3f..a2cd3330efc66f418ae23c5ffb7ef3ee9afb57f6 100644 (file)
@@ -28,7 +28,7 @@
 /******************************************************************************
  * instmem object base implementation
  *****************************************************************************/
-static void
+void
 nvkm_instobj_load(struct nvkm_instobj *iobj)
 {
        struct nvkm_memory *memory = &iobj->memory;
@@ -48,7 +48,7 @@ nvkm_instobj_load(struct nvkm_instobj *iobj)
        iobj->suspend = NULL;
 }
 
-static int
+int
 nvkm_instobj_save(struct nvkm_instobj *iobj)
 {
        struct nvkm_memory *memory = &iobj->memory;
@@ -179,24 +179,14 @@ static int
 nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
 {
        struct nvkm_instmem *imem = nvkm_instmem(subdev);
-       struct nvkm_instobj *iobj;
+       int ret;
 
        if (suspend) {
-               list_for_each_entry(iobj, &imem->list, head) {
-                       if (iobj->preserve) {
-                               int ret = nvkm_instobj_save(iobj);
-                               if (ret)
-                                       return ret;
-                       }
-               }
-
-               nvkm_bar_bar2_fini(subdev->device);
+               ret = imem->func->suspend(imem);
+               if (ret)
+                       return ret;
 
-               list_for_each_entry(iobj, &imem->boot, head) {
-                       int ret = nvkm_instobj_save(iobj);
-                       if (ret)
-                               return ret;
-               }
+               imem->suspend = true;
        }
 
        if (imem->func->fini)
@@ -209,20 +199,16 @@ static int
 nvkm_instmem_init(struct nvkm_subdev *subdev)
 {
        struct nvkm_instmem *imem = nvkm_instmem(subdev);
-       struct nvkm_instobj *iobj;
 
-       list_for_each_entry(iobj, &imem->boot, head) {
-               if (iobj->suspend)
-                       nvkm_instobj_load(iobj);
-       }
+       if (imem->suspend) {
+               if (imem->func->resume)
+                       imem->func->resume(imem);
 
-       nvkm_bar_bar2_init(subdev->device);
-
-       list_for_each_entry(iobj, &imem->list, head) {
-               if (iobj->suspend)
-                       nvkm_instobj_load(iobj);
+               imem->suspend = false;
+               return 0;
        }
 
+       nvkm_bar_bar2_init(subdev->device);
        return 0;
 }
 
index a4ac94a2ab57fccc10e2c9e4956d823322edc265..1b811d6972a16df8c4335552b2655578510d38f8 100644 (file)
@@ -564,6 +564,8 @@ gk20a_instmem_dtor(struct nvkm_instmem *base)
 static const struct nvkm_instmem_func
 gk20a_instmem = {
        .dtor = gk20a_instmem_dtor,
+       .suspend = nv04_instmem_suspend,
+       .resume = nv04_instmem_resume,
        .memory_new = gk20a_instobj_new,
        .zero = false,
 };
index 25603b01d6f8421a1bb01d73559a1dfb87b0e2d7..e5320ef849bfc99cabf205bc68bb9faf07974176 100644 (file)
@@ -25,6 +25,7 @@
 #include "priv.h"
 
 #include <core/ramht.h>
+#include <subdev/bar.h>
 
 struct nv04_instmem {
        struct nvkm_instmem base;
@@ -154,6 +155,48 @@ nv04_instmem_wr32(struct nvkm_instmem *imem, u32 addr, u32 data)
        nvkm_wr32(imem->subdev.device, 0x700000 + addr, data);
 }
 
+void
+nv04_instmem_resume(struct nvkm_instmem *imem)
+{
+       struct nvkm_instobj *iobj;
+
+       list_for_each_entry(iobj, &imem->boot, head) {
+               if (iobj->suspend)
+                       nvkm_instobj_load(iobj);
+       }
+
+       nvkm_bar_bar2_init(imem->subdev.device);
+
+       list_for_each_entry(iobj, &imem->list, head) {
+               if (iobj->suspend)
+                       nvkm_instobj_load(iobj);
+       }
+}
+
+int
+nv04_instmem_suspend(struct nvkm_instmem *imem)
+{
+       struct nvkm_instobj *iobj;
+
+       list_for_each_entry(iobj, &imem->list, head) {
+               if (iobj->preserve) {
+                       int ret = nvkm_instobj_save(iobj);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       nvkm_bar_bar2_fini(imem->subdev.device);
+
+       list_for_each_entry(iobj, &imem->boot, head) {
+               int ret = nvkm_instobj_save(iobj);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+
 static int
 nv04_instmem_oneinit(struct nvkm_instmem *base)
 {
@@ -210,6 +253,8 @@ static const struct nvkm_instmem_func
 nv04_instmem = {
        .dtor = nv04_instmem_dtor,
        .oneinit = nv04_instmem_oneinit,
+       .suspend = nv04_instmem_suspend,
+       .resume = nv04_instmem_resume,
        .rd32 = nv04_instmem_rd32,
        .wr32 = nv04_instmem_wr32,
        .memory_new = nv04_instobj_new,
index 4b2d7465d22f75c3ca25cddf7f3a8730a2a51f74..a7f3fc342d87e03b031b5008d939c2eb46f49404 100644 (file)
@@ -27,6 +27,7 @@
 #include <core/memory.h>
 #include <subdev/bar.h>
 #include <subdev/fb.h>
+#include <subdev/gsp.h>
 #include <subdev/mmu.h>
 
 struct nv50_instmem {
@@ -394,24 +395,44 @@ nv50_instmem_fini(struct nvkm_instmem *base)
        nv50_instmem(base)->addr = ~0ULL;
 }
 
+static void *
+nv50_instmem_dtor(struct nvkm_instmem *base)
+{
+       return nv50_instmem(base);
+}
+
 static const struct nvkm_instmem_func
 nv50_instmem = {
+       .dtor = nv50_instmem_dtor,
        .fini = nv50_instmem_fini,
+       .suspend = nv04_instmem_suspend,
+       .resume = nv04_instmem_resume,
        .memory_new = nv50_instobj_new,
        .memory_wrap = nv50_instobj_wrap,
        .zero = false,
 };
 
 int
-nv50_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
-                struct nvkm_instmem **pimem)
+nv50_instmem_new_(const struct nvkm_instmem_func *func,
+                 struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+                 struct nvkm_instmem **pimem)
 {
        struct nv50_instmem *imem;
 
        if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
                return -ENOMEM;
-       nvkm_instmem_ctor(&nv50_instmem, device, type, inst, &imem->base);
+       nvkm_instmem_ctor(func, device, type, inst, &imem->base);
        INIT_LIST_HEAD(&imem->lru);
        *pimem = &imem->base;
        return 0;
 }
+
+int
+nv50_instmem_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+                struct nvkm_instmem **pimem)
+{
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_instmem_new(&nv50_instmem, device, type, inst, pimem);
+
+       return nv50_instmem_new_(&nv50_instmem, device, type, inst, pimem);
+}
index 390ca00ab5678b3bac845eb00f0dc56b5920c3a4..4c14c96fb60a28cdf940784809c4050bda3dc24d 100644 (file)
@@ -7,6 +7,8 @@
 struct nvkm_instmem_func {
        void *(*dtor)(struct nvkm_instmem *);
        int (*oneinit)(struct nvkm_instmem *);
+       int (*suspend)(struct nvkm_instmem *);
+       void (*resume)(struct nvkm_instmem *);
        void (*fini)(struct nvkm_instmem *);
        u32  (*rd32)(struct nvkm_instmem *, u32 addr);
        void (*wr32)(struct nvkm_instmem *, u32 addr, u32 data);
@@ -16,10 +18,19 @@ struct nvkm_instmem_func {
        bool zero;
 };
 
+int nv50_instmem_new_(const struct nvkm_instmem_func *, struct nvkm_device *,
+                     enum nvkm_subdev_type, int, struct nvkm_instmem **);
+
 void nvkm_instmem_ctor(const struct nvkm_instmem_func *, struct nvkm_device *,
                       enum nvkm_subdev_type, int, struct nvkm_instmem *);
 void nvkm_instmem_boot(struct nvkm_instmem *);
 
+int nv04_instmem_suspend(struct nvkm_instmem *);
+void nv04_instmem_resume(struct nvkm_instmem *);
+
+int r535_instmem_new(const struct nvkm_instmem_func *,
+                    struct nvkm_device *, enum nvkm_subdev_type, int, struct nvkm_instmem **);
+
 #include <core/memory.h>
 
 struct nvkm_instobj {
@@ -32,4 +43,6 @@ struct nvkm_instobj {
 void nvkm_instobj_ctor(const struct nvkm_memory_func *func,
                       struct nvkm_instmem *, struct nvkm_instobj *);
 void nvkm_instobj_dtor(struct nvkm_instmem *, struct nvkm_instobj *);
+int nvkm_instobj_save(struct nvkm_instobj *);
+void nvkm_instobj_load(struct nvkm_instobj *);
 #endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c
new file mode 100644 (file)
index 0000000..5f3c9c0
--- /dev/null
@@ -0,0 +1,333 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <subdev/gsp.h>
+
+#include <nvhw/drf.h>
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl84a0.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl2080/ctrl2080internal.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+#include <nvrm/535.113.01/nvidia/generated/g_fbsr_nvoc.h>
+#include <nvrm/535.113.01/nvidia/generated/g_rpc-structures.h>
+#include <nvrm/535.113.01/nvidia/kernel/inc/vgpu/rpc_global_enums.h>
+
+struct fbsr_item {
+       const char *type;
+       u64 addr;
+       u64 size;
+
+       struct list_head head;
+};
+
+struct fbsr {
+       struct list_head items;
+
+       u64 size;
+       int regions;
+
+       struct nvkm_gsp_client client;
+       struct nvkm_gsp_device device;
+
+       u64 hmemory;
+       u64 sys_offset;
+};
+
+static int
+fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target aper,
+            u64 phys, u64 size, struct sg_table *sgt, struct nvkm_gsp_object *object)
+{
+       struct nvkm_gsp_client *client = device->object.client;
+       struct nvkm_gsp *gsp = client->gsp;
+       const u32 pages = size / GSP_PAGE_SIZE;
+       rpc_alloc_memory_v13_01 *rpc;
+       int ret;
+
+       rpc = nvkm_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY,
+                              sizeof(*rpc) + pages * sizeof(rpc->pteDesc.pte_pde[0]));
+       if (IS_ERR(rpc))
+               return PTR_ERR(rpc);
+
+       rpc->hClient = client->object.handle;
+       rpc->hDevice = device->object.handle;
+       rpc->hMemory = handle;
+       if (aper == NVKM_MEM_TARGET_HOST) {
+               rpc->hClass = NV01_MEMORY_LIST_SYSTEM;
+               rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, NONCONTIGUOUS) |
+                            NVDEF(NVOS02, FLAGS, LOCATION, PCI) |
+                            NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP);
+       } else {
+               rpc->hClass = NV01_MEMORY_LIST_FBMEM;
+               rpc->flags = NVDEF(NVOS02, FLAGS, PHYSICALITY, CONTIGUOUS) |
+                            NVDEF(NVOS02, FLAGS, LOCATION, VIDMEM) |
+                            NVDEF(NVOS02, FLAGS, MAPPING, NO_MAP);
+               rpc->format = 6; /* NV_MMU_PTE_KIND_GENERIC_MEMORY */
+       }
+       rpc->pteAdjust = 0;
+       rpc->length = size;
+       rpc->pageCount = pages;
+       rpc->pteDesc.idr = 0;
+       rpc->pteDesc.reserved1 = 0;
+       rpc->pteDesc.length = pages;
+
+       if (sgt) {
+               struct scatterlist *sgl;
+               int pte = 0, idx;
+
+               for_each_sgtable_dma_sg(sgt, sgl, idx) {
+                       for (int i = 0; i < sg_dma_len(sgl) / GSP_PAGE_SIZE; i++)
+                               rpc->pteDesc.pte_pde[pte++].pte = (sg_dma_address(sgl) >> 12) + i;
+
+               }
+       } else {
+               for (int i = 0; i < pages; i++)
+                       rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i;
+       }
+
+       ret = nvkm_gsp_rpc_wr(gsp, rpc, true);
+       if (ret)
+               return ret;
+
+       object->client = device->object.client;
+       object->parent = &device->object;
+       object->handle = handle;
+       return 0;
+}
+
+static int
+fbsr_send(struct fbsr *fbsr, struct fbsr_item *item)
+{
+       NV2080_CTRL_INTERNAL_FBSR_SEND_REGION_INFO_PARAMS *ctrl;
+       struct nvkm_gsp *gsp = fbsr->client.gsp;
+       struct nvkm_gsp_object memlist;
+       int ret;
+
+       ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_VRAM,
+                          item->addr, item->size, NULL, &memlist);
+       if (ret)
+               return ret;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+                                   NV2080_CTRL_CMD_INTERNAL_FBSR_SEND_REGION_INFO,
+                                   sizeof(*ctrl));
+       if (IS_ERR(ctrl)) {
+               ret = PTR_ERR(ctrl);
+               goto done;
+       }
+
+       ctrl->fbsrType = FBSR_TYPE_DMA;
+       ctrl->hClient = fbsr->client.object.handle;
+       ctrl->hVidMem = fbsr->hmemory++;
+       ctrl->vidOffset = 0;
+       ctrl->sysOffset = fbsr->sys_offset;
+       ctrl->size = item->size;
+
+       ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+done:
+       nvkm_gsp_rm_free(&memlist);
+       if (ret)
+               return ret;
+
+       fbsr->sys_offset += item->size;
+       return 0;
+}
+
+static int
+fbsr_init(struct fbsr *fbsr, struct sg_table *sgt, u64 items_size)
+{
+       NV2080_CTRL_INTERNAL_FBSR_INIT_PARAMS *ctrl;
+       struct nvkm_gsp *gsp = fbsr->client.gsp;
+       struct nvkm_gsp_object memlist;
+       int ret;
+
+       ret = fbsr_memlist(&fbsr->device, fbsr->hmemory, NVKM_MEM_TARGET_HOST,
+                          0, fbsr->size, sgt, &memlist);
+       if (ret)
+               return ret;
+
+       ctrl = nvkm_gsp_rm_ctrl_get(&gsp->internal.device.subdevice,
+                                   NV2080_CTRL_CMD_INTERNAL_FBSR_INIT, sizeof(*ctrl));
+       if (IS_ERR(ctrl))
+               return PTR_ERR(ctrl);
+
+       ctrl->fbsrType = FBSR_TYPE_DMA;
+       ctrl->numRegions = fbsr->regions;
+       ctrl->hClient = fbsr->client.object.handle;
+       ctrl->hSysMem = fbsr->hmemory++;
+       ctrl->gspFbAllocsSysOffset = items_size;
+
+       ret = nvkm_gsp_rm_ctrl_wr(&gsp->internal.device.subdevice, ctrl);
+       if (ret)
+               return ret;
+
+       nvkm_gsp_rm_free(&memlist);
+       return 0;
+}
+
+static bool
+fbsr_vram(struct fbsr *fbsr, const char *type, u64 addr, u64 size)
+{
+       struct fbsr_item *item;
+
+       if (!(item = kzalloc(sizeof(*item), GFP_KERNEL)))
+               return false;
+
+       item->type = type;
+       item->addr = addr;
+       item->size = size;
+       list_add_tail(&item->head, &fbsr->items);
+       return true;
+}
+
+static bool
+fbsr_inst(struct fbsr *fbsr, const char *type, struct nvkm_memory *memory)
+{
+       return fbsr_vram(fbsr, type, nvkm_memory_addr(memory), nvkm_memory_size(memory));
+}
+
+static void
+r535_instmem_resume(struct nvkm_instmem *imem)
+{
+       /* RM has restored VRAM contents already, so just need to free the sysmem buffer. */
+       if (imem->rm.fbsr_valid) {
+               nvkm_gsp_sg_free(imem->subdev.device, &imem->rm.fbsr);
+               imem->rm.fbsr_valid = false;
+       }
+}
+
+static int
+r535_instmem_suspend(struct nvkm_instmem *imem)
+{
+       struct nvkm_subdev *subdev = &imem->subdev;
+       struct nvkm_device *device = subdev->device;
+       struct nvkm_gsp *gsp = device->gsp;
+       struct nvkm_instobj *iobj;
+       struct fbsr fbsr = {};
+       struct fbsr_item *item, *temp;
+       u64 items_size;
+       int ret;
+
+       INIT_LIST_HEAD(&fbsr.items);
+       fbsr.hmemory = 0xcaf00003;
+
+       /* Create a list of all regions we need RM to save during suspend. */
+       list_for_each_entry(iobj, &imem->list, head) {
+               if (iobj->preserve) {
+                       if (!fbsr_inst(&fbsr, "inst", &iobj->memory))
+                               return -ENOMEM;
+               }
+       }
+
+       list_for_each_entry(iobj, &imem->boot, head) {
+               if (!fbsr_inst(&fbsr, "boot", &iobj->memory))
+                       return -ENOMEM;
+       }
+
+       if (!fbsr_vram(&fbsr, "gsp-non-wpr", gsp->fb.heap.addr, gsp->fb.heap.size))
+               return -ENOMEM;
+
+       /* Determine memory requirements. */
+       list_for_each_entry(item, &fbsr.items, head) {
+               nvkm_debug(subdev, "fbsr: %016llx %016llx %s\n",
+                          item->addr, item->size, item->type);
+               fbsr.size += item->size;
+               fbsr.regions++;
+       }
+
+       items_size = fbsr.size;
+       nvkm_debug(subdev, "fbsr: %d regions (0x%llx bytes)\n", fbsr.regions, items_size);
+
+       fbsr.size += gsp->fb.rsvd_size;
+       fbsr.size += gsp->fb.bios.vga_workspace.size;
+       nvkm_debug(subdev, "fbsr: size: 0x%llx bytes\n", fbsr.size);
+
+       ret = nvkm_gsp_sg(gsp->subdev.device, fbsr.size, &imem->rm.fbsr);
+       if (ret)
+               goto done;
+
+       /* Tell RM about the sysmem which will hold VRAM contents across suspend. */
+       ret = nvkm_gsp_client_device_ctor(gsp, &fbsr.client, &fbsr.device);
+       if (ret)
+               goto done_sgt;
+
+       ret = fbsr_init(&fbsr, &imem->rm.fbsr, items_size);
+       if (WARN_ON(ret))
+               goto done_sgt;
+
+       /* Send VRAM regions that need saving. */
+       list_for_each_entry(item, &fbsr.items, head) {
+               ret = fbsr_send(&fbsr, item);
+               if (WARN_ON(ret))
+                       goto done_sgt;
+       }
+
+       imem->rm.fbsr_valid = true;
+
+       /* Cleanup everything except the sysmem backup, which will be removed after resume. */
+done_sgt:
+       if (ret) /* ... unless we failed already. */
+               nvkm_gsp_sg_free(device, &imem->rm.fbsr);
+done:
+       list_for_each_entry_safe(item, temp, &fbsr.items, head) {
+               list_del(&item->head);
+               kfree(item);
+       }
+
+       nvkm_gsp_device_dtor(&fbsr.device);
+       nvkm_gsp_client_dtor(&fbsr.client);
+       return ret;
+}
+
+static void *
+r535_instmem_dtor(struct nvkm_instmem *imem)
+{
+       kfree(imem->func);
+       return imem;
+}
+
+int
+r535_instmem_new(const struct nvkm_instmem_func *hw,
+                struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+                struct nvkm_instmem **pinstmem)
+{
+       struct nvkm_instmem_func *rm;
+       int ret;
+
+       if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_instmem_dtor;
+       rm->fini = hw->fini;
+       rm->suspend = r535_instmem_suspend;
+       rm->resume  = r535_instmem_resume;
+       rm->memory_new = hw->memory_new;
+       rm->memory_wrap = hw->memory_wrap;
+       rm->zero = false;
+
+       ret = nv50_instmem_new_(rm, device, type, inst, pinstmem);
+       if (ret)
+               kfree(rm);
+
+       return ret;
+}
index 159d9f8c95f34f621cb73dc51ff0514e9790ece6..951f01e3032a122ca4a84ae0078fa590433f9e20 100644 (file)
@@ -21,6 +21,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 static void
 ga102_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4])
 {
@@ -53,5 +55,8 @@ int
 ga102_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
              struct nvkm_ltc **pltc)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_ltc_new_(&ga102_ltc, device, type, inst, pltc);
 }
index 265a05fd5f6bca4dcf164860d094155b4036ecfc..053302ecb0a5ef060a3f3c12a8f6bfb93685edac 100644 (file)
@@ -21,6 +21,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 void
 gp102_ltc_zbc_clear_stencil(struct nvkm_ltc *ltc, int i, const u32 stencil)
 {
@@ -49,5 +51,8 @@ int
 gp102_ltc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
              struct nvkm_ltc **pltc)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_ltc_new_(&gp102_ltc, device, type, inst, pltc);
 }
index 5d28d30d09d5c512d8bcef50f7300ff75d9b57aa..65e9f04972dce3b19ac5dfb659d1b95d4eb9a6ee 100644 (file)
@@ -21,6 +21,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 static void
 ga100_mc_device_disable(struct nvkm_mc *mc, u32 mask)
 {
@@ -72,5 +74,8 @@ ga100_mc = {
 int
 ga100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_mc_new_(&ga100_mc, device, type, inst, pmc);
 }
index eb2ab03f43607353466e3bea0c5b616d21047cc9..05d2fa95e05eaca175aa090fa2513f8f16051dab 100644 (file)
@@ -23,6 +23,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 const struct nvkm_intr_data
 gp100_mc_intrs[] = {
        { NVKM_ENGINE_DISP    , 0, 0, 0x04000000, true },
@@ -98,5 +100,8 @@ gp100_mc = {
 int
 gp100_mc_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst, struct nvkm_mc **pmc)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_mc_new_(&gp100_mc, device, type, inst, pmc);
 }
index a602b0cb5b31d9089781cd5961b0afad294f1b09..7ba35ea59c06d001109e59408f167f84103a8003 100644 (file)
@@ -16,6 +16,8 @@ nvkm-y += nvkm/subdev/mmu/gp10b.o
 nvkm-y += nvkm/subdev/mmu/gv100.o
 nvkm-y += nvkm/subdev/mmu/tu102.o
 
+nvkm-y += nvkm/subdev/mmu/r535.o
+
 nvkm-y += nvkm/subdev/mmu/mem.o
 nvkm-y += nvkm/subdev/mmu/memnv04.o
 nvkm-y += nvkm/subdev/mmu/memnv50.o
index ad3b44a9e0e71583fd154b653e7f9e2f03e77f80..b67ace7ae93ce4b49316507d05f0a2283a8e7dd1 100644 (file)
@@ -403,6 +403,10 @@ nvkm_mmu_dtor(struct nvkm_subdev *subdev)
 
        nvkm_mmu_ptc_fini(mmu);
        mutex_destroy(&mmu->mutex);
+
+       if (mmu->func->dtor)
+               mmu->func->dtor(mmu);
+
        return mmu;
 }
 
index 5265bf4d8366c0133a0318b318f26071ac3262fa..e9ca6537778ce182e2313700187d4483600a2199 100644 (file)
@@ -4,12 +4,16 @@
 #define nvkm_mmu(p) container_of((p), struct nvkm_mmu, subdev)
 #include <subdev/mmu.h>
 
+int r535_mmu_new(const struct nvkm_mmu_func *hw, struct nvkm_device *, enum nvkm_subdev_type, int,
+                struct nvkm_mmu **);
+
 void nvkm_mmu_ctor(const struct nvkm_mmu_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
                   struct nvkm_mmu *);
 int nvkm_mmu_new_(const struct nvkm_mmu_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
                  struct nvkm_mmu **);
 
 struct nvkm_mmu_func {
+       void (*dtor)(struct nvkm_mmu *);
        void (*init)(struct nvkm_mmu *);
 
        u8  dma_bits;
@@ -37,6 +41,8 @@ struct nvkm_mmu_func {
 
        const u8 *(*kind)(struct nvkm_mmu *, int *count, u8 *invalid);
        bool kind_sys;
+
+       int (*promote_vmm)(struct nvkm_vmm *);
 };
 
 extern const struct nvkm_mmu_func nv04_mmu;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/r535.c
new file mode 100644 (file)
index 0000000..d3e9545
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "vmm.h"
+
+#include <nvrm/nvtypes.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/class/cl90f1.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/ctrl/ctrl90f1.h>
+#include <nvrm/535.113.01/common/sdk/nvidia/inc/nvos.h>
+
+static int
+r535_mmu_promote_vmm(struct nvkm_vmm *vmm)
+{
+       NV_VASPACE_ALLOCATION_PARAMETERS *args;
+       int ret;
+
+       ret = nvkm_gsp_client_device_ctor(vmm->mmu->subdev.device->gsp,
+                                         &vmm->rm.client, &vmm->rm.device);
+       if (ret)
+               return ret;
+
+       args = nvkm_gsp_rm_alloc_get(&vmm->rm.device.object, 0x90f10000, FERMI_VASPACE_A,
+                                    sizeof(*args), &vmm->rm.object);
+       if (IS_ERR(args))
+               return PTR_ERR(args);
+
+       args->index = NV_VASPACE_ALLOCATION_INDEX_GPU_NEW;
+
+       ret = nvkm_gsp_rm_alloc_wr(&vmm->rm.object, args);
+       if (ret)
+               return ret;
+
+       {
+               NV90F1_CTRL_VASPACE_COPY_SERVER_RESERVED_PDES_PARAMS *ctrl;
+
+               mutex_lock(&vmm->mutex.vmm);
+               ret = nvkm_vmm_get_locked(vmm, true, false, false, 0x1d, 32, 0x20000000,
+                                         &vmm->rm.rsvd);
+               mutex_unlock(&vmm->mutex.vmm);
+               if (ret)
+                       return ret;
+
+               ctrl = nvkm_gsp_rm_ctrl_get(&vmm->rm.object,
+                                           NV90F1_CTRL_CMD_VASPACE_COPY_SERVER_RESERVED_PDES,
+                                           sizeof(*ctrl));
+               if (IS_ERR(ctrl))
+                       return PTR_ERR(ctrl);
+
+               ctrl->pageSize = 0x20000000;
+               ctrl->virtAddrLo = vmm->rm.rsvd->addr;
+               ctrl->virtAddrHi = vmm->rm.rsvd->addr + vmm->rm.rsvd->size - 1;
+               ctrl->numLevelsToCopy = vmm->pd->pde[0]->pde[0] ? 3 : 2;
+               ctrl->levels[0].physAddress = vmm->pd->pt[0]->addr;
+               ctrl->levels[0].size = 0x20;
+               ctrl->levels[0].aperture = 1;
+               ctrl->levels[0].pageShift = 0x2f;
+               ctrl->levels[1].physAddress = vmm->pd->pde[0]->pt[0]->addr;
+               ctrl->levels[1].size = 0x1000;
+               ctrl->levels[1].aperture = 1;
+               ctrl->levels[1].pageShift = 0x26;
+               if (vmm->pd->pde[0]->pde[0]) {
+                       ctrl->levels[2].physAddress = vmm->pd->pde[0]->pde[0]->pt[0]->addr;
+                       ctrl->levels[2].size = 0x1000;
+                       ctrl->levels[2].aperture = 1;
+                       ctrl->levels[2].pageShift = 0x1d;
+               }
+
+               ret = nvkm_gsp_rm_ctrl_wr(&vmm->rm.object, ctrl);
+       }
+
+       return ret;
+}
+
+static void
+r535_mmu_dtor(struct nvkm_mmu *mmu)
+{
+       kfree(mmu->func);
+}
+
+int
+r535_mmu_new(const struct nvkm_mmu_func *hw,
+            struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
+            struct nvkm_mmu **pmmu)
+{
+       struct nvkm_mmu_func *rm;
+       int ret;
+
+       if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_mmu_dtor;
+       rm->dma_bits = hw->dma_bits;
+       rm->mmu = hw->mmu;
+       rm->mem = hw->mem;
+       rm->vmm = hw->vmm;
+       rm->kind = hw->kind;
+       rm->kind_sys = hw->kind_sys;
+       rm->promote_vmm = r535_mmu_promote_vmm;
+
+       ret = nvkm_mmu_new_(rm, device, type, inst, pmmu);
+       if (ret)
+               kfree(rm);
+
+       return ret;
+}
index 8d060ce47f8657aeef67c7533f0f24f2d4e2930e..df662ce4a4b0173bc40cced588cf6dd71aa0de03 100644 (file)
@@ -24,6 +24,7 @@
 #include "vmm.h"
 
 #include <core/option.h>
+#include <subdev/gsp.h>
 
 #include <nvif/class.h>
 
@@ -54,5 +55,8 @@ int
 tu102_mmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
              struct nvkm_mmu **pmmu)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_mmu_new(&tu102_mmu, device, type, inst, pmmu);
+
        return nvkm_mmu_new_(&tu102_mmu, device, type, inst, pmmu);
 }
index 8e459d88ff8f89426e163e43069a1a0698b9c44f..cf490ff2b9f142be86ab9ff8944b0155268e0bd0 100644 (file)
@@ -572,6 +572,12 @@ nvkm_uvmm_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
        }
        uvmm->vmm->managed.raw = raw;
 
+       if (mmu->func->promote_vmm) {
+               ret = mmu->func->promote_vmm(uvmm->vmm);
+               if (ret)
+                       return ret;
+       }
+
        page = uvmm->vmm->func->page;
        args->v0.page_nr = 0;
        while (page && (page++)->shift)
index eb5fcadcb39aa66caaf27308eb36de602a483019..9c97800fe03777e27fe9926b1bc9c86041843a94 100644 (file)
@@ -1030,6 +1030,13 @@ nvkm_vmm_dtor(struct nvkm_vmm *vmm)
        struct nvkm_vma *vma;
        struct rb_node *node;
 
+       if (vmm->rm.client.gsp) {
+               nvkm_gsp_rm_free(&vmm->rm.object);
+               nvkm_gsp_device_dtor(&vmm->rm.device);
+               nvkm_gsp_client_dtor(&vmm->rm.client);
+               nvkm_vmm_put(vmm, &vmm->rm.rsvd);
+       }
+
        if (0)
                nvkm_vmm_dump(vmm);
 
index 0095d58d4d9a10ef88e173b176bf5d2fddb06bc1..e34bc60764010f8307a01ce4fe21808bb99e5e43 100644 (file)
@@ -35,9 +35,11 @@ tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
 
        mutex_lock(&vmm->mmu->mutex);
 
-       nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8);
+       if (!vmm->rm.bar2_pdb)
+               nvkm_wr32(device, 0xb830a0, vmm->pd->pt[0]->addr >> 8);
+       else
+               nvkm_wr32(device, 0xb830a0, vmm->rm.bar2_pdb >> 8);
        nvkm_wr32(device, 0xb830a4, 0x00000000);
-       nvkm_wr32(device, 0x100e68, 0x00000000);
        nvkm_wr32(device, 0xb830b0, 0x80000000 | type);
 
        nvkm_msec(device, 2000,
index cd3148360996f24964f9f892524672ddacbc8168..da5b2b2190d3d6f741dc1f7eb06dc3ca3519bfcb 100644 (file)
@@ -23,6 +23,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 static const struct nvkm_falcon_func
 gp102_pmu_flcn = {
        .disable = gm200_flcn_disable,
@@ -54,5 +56,8 @@ int
 gp102_pmu_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
              struct nvkm_pmu **ppmu)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_pmu_new_(gp102_pmu_fwif, device, type, inst, ppmu);
 }
index b4eaf6db36d728fda55143f85d5ab61b9212beb9..b4530073bfdc44ed8f9b3abe6935cf8b533b8026 100644 (file)
@@ -23,6 +23,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 static const struct nvkm_subdev_func
 gm200_privring = {
        .intr = gk104_privring_intr,
@@ -32,5 +34,8 @@ int
 gm200_privring_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
                   struct nvkm_subdev **pprivring)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_subdev_new_(&gm200_privring, device, type, inst, pprivring);
 }
index 44f021392b955d9f708bd1c77475b6680c22dd31..5392833d361483950259464673a8e7a3d464628f 100644 (file)
@@ -23,6 +23,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 static int
 gp100_temp_get(struct nvkm_therm *therm)
 {
@@ -52,5 +54,8 @@ int
 gp100_therm_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
                struct nvkm_therm **ptherm)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_therm_new_(&gp100_therm, device, type, inst, ptherm);
 }
index 84790cf52b9029b97cc62da1e08d9f8ae1a96950..129eabb8b9e60802c820013e2dcfc159bab4e73e 100644 (file)
@@ -21,6 +21,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 static int
 ga100_top_parse(struct nvkm_top *top)
 {
@@ -76,7 +78,7 @@ ga100_top_parse(struct nvkm_top *top)
                case 0x00000012: I_(NVKM_SUBDEV_IOCTRL, inst); break;
                case 0x00000013: I_(NVKM_ENGINE_CE    , inst); break;
                case 0x00000014: O_(NVKM_SUBDEV_GSP   ,    0); break;
-               case 0x00000015: O_(NVKM_ENGINE_NVJPG ,    0); break;
+               case 0x00000015: I_(NVKM_ENGINE_NVJPG , inst); break;
                case 0x00000016: O_(NVKM_ENGINE_OFA   ,    0); break;
                case 0x00000017: O_(NVKM_SUBDEV_FLA   ,    0); break;
                        break;
@@ -104,5 +106,8 @@ int
 ga100_top_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
              struct nvkm_top **ptop)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_top_new_(&ga100_top, device, type, inst, ptop);
 }
index 2bbba8244cbf14354007984206b39ac417cd7123..da55dac8c286a4faf914068216a5b72db5c135bb 100644 (file)
@@ -23,6 +23,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 static int
 gk104_top_parse(struct nvkm_top *top)
 {
@@ -89,7 +91,7 @@ gk104_top_parse(struct nvkm_top *top)
                case 0x00000012: I_(NVKM_SUBDEV_IOCTRL, inst); break;
                case 0x00000013: I_(NVKM_ENGINE_CE    , inst); break;
                case 0x00000014: O_(NVKM_SUBDEV_GSP   ,    0); break;
-               case 0x00000015: O_(NVKM_ENGINE_NVJPG ,    0); break;
+               case 0x00000015: I_(NVKM_ENGINE_NVJPG , inst); break;
                default:
                        break;
                }
@@ -115,5 +117,8 @@ int
 gk104_top_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
              struct nvkm_top **ptop)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return -ENODEV;
+
        return nvkm_top_new_(&gk104_top, device, type, inst, ptop);
 }
index 23cd21b40a25e463c52480e6e859290898e86a72..23a85460615f267ccba7fb75e3b8a9c080d7f19e 100644 (file)
@@ -4,3 +4,5 @@ nvkm-y += nvkm/subdev/vfn/uvfn.o
 nvkm-y += nvkm/subdev/vfn/gv100.o
 nvkm-y += nvkm/subdev/vfn/tu102.o
 nvkm-y += nvkm/subdev/vfn/ga100.o
+
+nvkm-y += nvkm/subdev/vfn/r535.o
index fd5c6931322dc57c0b3d6fbd5d7ef0aee49d57f5..bb0bb6fda54b9d353ded9ebedb4dc81d73a2890a 100644 (file)
@@ -21,6 +21,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 #include <nvif/class.h>
 
 static const struct nvkm_intr_data
@@ -43,5 +45,8 @@ int
 ga100_vfn_new(struct nvkm_device *device,
              enum nvkm_subdev_type type, int inst, struct nvkm_vfn **pvfn)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_vfn_new(&ga100_vfn, device, type, inst, 0xb80000, pvfn);
+
        return nvkm_vfn_new_(&ga100_vfn, device, type, inst, 0xb80000, pvfn);
 }
index 96d53c02041b582449854a3992136484e80cef13..3a09781ad0320452c27009be5244c7ca4edbb627 100644 (file)
@@ -5,16 +5,21 @@
 #include <subdev/vfn.h>
 
 struct nvkm_vfn_func {
+       void (*dtor)(struct nvkm_vfn *);
+
        const struct nvkm_intr_func *intr;
        const struct nvkm_intr_data *intrs;
 
        struct {
                u32 addr;
                u32 size;
-               const struct nvkm_sclass base;
+               struct nvkm_sclass base;
        } user;
 };
 
+int r535_vfn_new(const struct nvkm_vfn_func *hw, struct nvkm_device *, enum nvkm_subdev_type, int,
+                u32 addr, struct nvkm_vfn **);
+
 int nvkm_vfn_new_(const struct nvkm_vfn_func *, struct nvkm_device *, enum nvkm_subdev_type, int,
                  u32 addr, struct nvkm_vfn **);
 
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/vfn/r535.c
new file mode 100644 (file)
index 0000000..dce3373
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2023 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+static void
+r535_vfn_dtor(struct nvkm_vfn *vfn)
+{
+       kfree(vfn->func);
+}
+
+int
+r535_vfn_new(const struct nvkm_vfn_func *hw,
+            struct nvkm_device *device, enum nvkm_subdev_type type, int inst, u32 addr,
+            struct nvkm_vfn **pvfn)
+{
+       struct nvkm_vfn_func *rm;
+       int ret;
+
+       if (!(rm = kzalloc(sizeof(*rm), GFP_KERNEL)))
+               return -ENOMEM;
+
+       rm->dtor = r535_vfn_dtor;
+       rm->intr = hw->intr;
+       rm->user = hw->user;
+
+       ret = nvkm_vfn_new_(rm, device, type, inst, addr, pvfn);
+       if (ret)
+               kfree(rm);
+
+       return ret;
+}
index 3d063fb5e136612c2995933bf23ed13b65219f25..a3bf13c5c79b133aef3eb71f8434091ab602f2d7 100644 (file)
@@ -21,6 +21,8 @@
  */
 #include "priv.h"
 
+#include <subdev/gsp.h>
+
 #include <nvif/class.h>
 
 static void
@@ -104,5 +106,8 @@ int
 tu102_vfn_new(struct nvkm_device *device,
              enum nvkm_subdev_type type, int inst, struct nvkm_vfn **pvfn)
 {
+       if (nvkm_gsp_rm(device->gsp))
+               return r535_vfn_new(&tu102_vfn, device, type, inst, 0xb80000, pvfn);
+
        return nvkm_vfn_new_(&tu102_vfn, device, type, inst, 0xb80000, pvfn);
 }
index 6492a70e3c396a5b18590a5bf342c18b31560646..404b0483bb7cb91d98703f1cf13a404cb642d1d6 100644 (file)
@@ -1229,6 +1229,9 @@ int qxl_destroy_monitors_object(struct qxl_device *qdev)
        if (!qdev->monitors_config_bo)
                return 0;
 
+       kfree(qdev->dumb_heads);
+       qdev->dumb_heads = NULL;
+
        qdev->monitors_config = NULL;
        qdev->ram_header->monitors_config = 0;
 
index 8a6621f1e82caa855acadb97a5daf31605445ce1..2db40789235cb15c654ebd206cc36430cb955bb4 100644 (file)
@@ -3893,7 +3893,7 @@ typedef struct _ATOM_GPIO_PIN_ASSIGNMENT
 typedef struct _ATOM_GPIO_PIN_LUT
 {
   ATOM_COMMON_TABLE_HEADER  sHeader;
-  ATOM_GPIO_PIN_ASSIGNMENT     asGPIO_Pin[1];
+  ATOM_GPIO_PIN_ASSIGNMENT     asGPIO_Pin[];
 }ATOM_GPIO_PIN_LUT;
 
 /****************************************************************************/ 
@@ -4061,7 +4061,7 @@ typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT         //usSrcDstTableOffset
   UCHAR               ucNumberOfSrc;
   USHORT              usSrcObjectID[1];
   UCHAR               ucNumberOfDst;
-  USHORT              usDstObjectID[1];
+  USHORT              usDstObjectID[];
 }ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT;
 
 
@@ -4233,7 +4233,7 @@ typedef struct  _ATOM_CONNECTOR_DEVICE_TAG_RECORD
   ATOM_COMMON_RECORD_HEADER   sheader;
   UCHAR                       ucNumberOfDevice;
   UCHAR                       ucReserved;
-  ATOM_CONNECTOR_DEVICE_TAG   asDeviceTag[1];         //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation
+  ATOM_CONNECTOR_DEVICE_TAG   asDeviceTag[];          //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation
 }ATOM_CONNECTOR_DEVICE_TAG_RECORD;
 
 
@@ -4293,7 +4293,7 @@ typedef struct  _ATOM_OBJECT_GPIO_CNTL_RECORD
   ATOM_COMMON_RECORD_HEADER   sheader;
   UCHAR                       ucFlags;                // Future expnadibility
   UCHAR                       ucNumberOfPins;         // Number of GPIO pins used to control the object
-  ATOM_GPIO_PIN_CONTROL_PAIR  asGpio[1];              // the real gpio pin pair determined by number of pins ucNumberOfPins
+  ATOM_GPIO_PIN_CONTROL_PAIR  asGpio[];               // the real gpio pin pair determined by number of pins ucNumberOfPins
 }ATOM_OBJECT_GPIO_CNTL_RECORD;
 
 //Definitions for GPIO pin state 
@@ -4444,7 +4444,7 @@ typedef struct  _ATOM_BRACKET_LAYOUT_RECORD
   UCHAR                       ucWidth;
   UCHAR                       ucConnNum;
   UCHAR                       ucReserved;
-  ATOM_CONNECTOR_LAYOUT_INFO  asConnInfo[1];
+  ATOM_CONNECTOR_LAYOUT_INFO  asConnInfo[];
 }ATOM_BRACKET_LAYOUT_RECORD;
 
 /****************************************************************************/ 
@@ -4600,7 +4600,7 @@ typedef struct  _ATOM_I2C_VOLTAGE_OBJECT_V3
    UCHAR    ucVoltageControlAddress;
    UCHAR    ucVoltageControlOffset;            
    ULONG    ulReserved;
-   VOLTAGE_LUT_ENTRY asVolI2cLut[1];        // end with 0xff
+   VOLTAGE_LUT_ENTRY asVolI2cLut[];         // end with 0xff
 }ATOM_I2C_VOLTAGE_OBJECT_V3;
 
 // ATOM_I2C_VOLTAGE_OBJECT_V3.ucVoltageControlFlag
@@ -4625,7 +4625,7 @@ typedef struct  _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
    UCHAR    ucLeakageEntryNum;           // indicate the entry number of LeakageId/Voltage Lut table
    UCHAR    ucReserved[2];               
    ULONG    ulMaxVoltageLevel;
-   LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1];   
+   LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[];
 }ATOM_LEAKAGE_VOLTAGE_OBJECT_V3;
 
 
@@ -4753,7 +4753,7 @@ typedef struct _ATOM_POWER_SOURCE_INFO
 {
                ATOM_COMMON_TABLE_HEADER                asHeader;
                UCHAR                                                                                           asPwrbehave[16];
-               ATOM_POWER_SOURCE_OBJECT                asPwrObj[1];
+               ATOM_POWER_SOURCE_OBJECT                asPwrObj[];
 }ATOM_POWER_SOURCE_INFO;
 
 
@@ -5440,7 +5440,7 @@ typedef struct _ATOM_FUSION_SYSTEM_INFO_V2
 typedef struct _ATOM_I2C_DATA_RECORD
 {
   UCHAR         ucNunberOfBytes;                                              //Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop"
-  UCHAR         ucI2CData[1];                                                 //I2C data in bytes, should be less than 16 bytes usually
+  UCHAR         ucI2CData[];                                                  //I2C data in bytes, should be less than 16 bytes usually
 }ATOM_I2C_DATA_RECORD;
 
 
@@ -5451,14 +5451,14 @@ typedef struct _ATOM_I2C_DEVICE_SETUP_INFO
   UCHAR                                        ucSSChipID;             //SS chip being used
   UCHAR                                        ucSSChipSlaveAddr;      //Slave Address to set up this SS chip
   UCHAR                           ucNumOfI2CDataRecords;  //number of data block
-  ATOM_I2C_DATA_RECORD            asI2CData[1];  
+  ATOM_I2C_DATA_RECORD            asI2CData[];
 }ATOM_I2C_DEVICE_SETUP_INFO;
 
 //==========================================================================================
 typedef struct  _ATOM_ASIC_MVDD_INFO
 {
   ATOM_COMMON_TABLE_HEADER           sHeader; 
-  ATOM_I2C_DEVICE_SETUP_INFO      asI2CSetup[1];
+  ATOM_I2C_DEVICE_SETUP_INFO      asI2CSetup[];
 }ATOM_ASIC_MVDD_INFO;
 
 //==========================================================================================
@@ -5520,7 +5520,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO
 typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V2
 {
   ATOM_COMMON_TABLE_HEADER           sHeader; 
-  ATOM_ASIC_SS_ASSIGNMENT_V2             asSpreadSpectrum[1];      //this is point only. 
+  ATOM_ASIC_SS_ASSIGNMENT_V2             asSpreadSpectrum[];       //this is point only.
 }ATOM_ASIC_INTERNAL_SS_INFO_V2;
 
 typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3
@@ -5542,7 +5542,7 @@ typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3
 typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
 {
   ATOM_COMMON_TABLE_HEADER           sHeader; 
-  ATOM_ASIC_SS_ASSIGNMENT_V3             asSpreadSpectrum[1];      //this is pointer only. 
+  ATOM_ASIC_SS_ASSIGNMENT_V3             asSpreadSpectrum[];       //this is pointer only.
 }ATOM_ASIC_INTERNAL_SS_INFO_V3;
 
 
@@ -6282,7 +6282,7 @@ typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS
 
 typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK{
        ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS                    ulMemoryID;
-       ULONG                                                                                                                           aulMemData[1];
+       ULONG                                                                                                                           aulMemData[];
 }ATOM_MEMORY_SETTING_DATA_BLOCK;
 
 
@@ -7092,7 +7092,7 @@ typedef struct _ATOM_DISP_OUT_INFO_V3
   UCHAR  ucCoreRefClkSource;                    // value of CORE_REF_CLK_SOURCE
   UCHAR  ucDispCaps;
   UCHAR  ucReserved[2];
-  ASIC_TRANSMITTER_INFO_V2  asTransmitterInfo[1];     // for alligment only
+  ASIC_TRANSMITTER_INFO_V2  asTransmitterInfo[];      // for alligment only
 }ATOM_DISP_OUT_INFO_V3;
 
 //ucDispCaps
@@ -7324,12 +7324,12 @@ typedef struct _CLOCK_CONDITION_SETTING_ENTRY{
   USHORT usMaxClockFreq;
   UCHAR  ucEncodeMode;
   UCHAR  ucPhySel;
-  ULONG  ulAnalogSetting[1];
+  ULONG  ulAnalogSetting[];
 }CLOCK_CONDITION_SETTING_ENTRY;
 
 typedef struct _CLOCK_CONDITION_SETTING_INFO{
   USHORT usEntrySize;
-  CLOCK_CONDITION_SETTING_ENTRY asClkCondSettingEntry[1];
+  CLOCK_CONDITION_SETTING_ENTRY asClkCondSettingEntry[];
 }CLOCK_CONDITION_SETTING_INFO;
 
 typedef struct _PHY_CONDITION_REG_VAL{
@@ -7346,27 +7346,27 @@ typedef struct _PHY_CONDITION_REG_VAL_V2{
 typedef struct _PHY_CONDITION_REG_INFO{
   USHORT usRegIndex;
   USHORT usSize;
-  PHY_CONDITION_REG_VAL asRegVal[1];
+  PHY_CONDITION_REG_VAL asRegVal[];
 }PHY_CONDITION_REG_INFO;
 
 typedef struct _PHY_CONDITION_REG_INFO_V2{
   USHORT usRegIndex;
   USHORT usSize;
-  PHY_CONDITION_REG_VAL_V2 asRegVal[1];
+  PHY_CONDITION_REG_VAL_V2 asRegVal[];
 }PHY_CONDITION_REG_INFO_V2;
 
 typedef struct _PHY_ANALOG_SETTING_INFO{
   UCHAR  ucEncodeMode;
   UCHAR  ucPhySel;
   USHORT usSize;
-  PHY_CONDITION_REG_INFO  asAnalogSetting[1];
+  PHY_CONDITION_REG_INFO  asAnalogSetting[];
 }PHY_ANALOG_SETTING_INFO;
 
 typedef struct _PHY_ANALOG_SETTING_INFO_V2{
   UCHAR  ucEncodeMode;
   UCHAR  ucPhySel;
   USHORT usSize;
-  PHY_CONDITION_REG_INFO_V2  asAnalogSetting[1];
+  PHY_CONDITION_REG_INFO_V2  asAnalogSetting[];
 }PHY_ANALOG_SETTING_INFO_V2;
 
 typedef struct _GFX_HAVESTING_PARAMETERS {
index ad14112999ad8abaedf19f0e9c53685273fe7e8e..027220b8fe1c5fbd462a2763156d192b4e855feb 100644 (file)
@@ -1,11 +1,12 @@
 # SPDX-License-Identifier: GPL-2.0
 config DRM_SHMOBILE
        tristate "DRM Support for SH Mobile"
-       depends on DRM
+       depends on DRM && PM
        depends on ARCH_RENESAS || ARCH_SHMOBILE || COMPILE_TEST
        select BACKLIGHT_CLASS_DEVICE
        select DRM_KMS_HELPER
        select DRM_GEM_DMA_HELPER
+       select VIDEOMODE_HELPERS
        help
          Choose this option if you have an SH Mobile chipset.
          If M is selected the module will be called shmob-drm.
index 861edafed8562c875b544715c554cdf3b787f269..2679555d61a702073ae9b2771f3c5d995cf564e0 100644 (file)
@@ -1,6 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
-shmob-drm-y := shmob_drm_backlight.o \
-              shmob_drm_crtc.o \
+shmob-drm-y := shmob_drm_crtc.o \
               shmob_drm_drv.o \
               shmob_drm_kms.o \
               shmob_drm_plane.o
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_backlight.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_backlight.c
deleted file mode 100644 (file)
index 794573b..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * shmob_drm_backlight.c  --  SH Mobile DRM Backlight
- *
- * Copyright (C) 2012 Renesas Electronics Corporation
- *
- * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- */
-
-#include <linux/backlight.h>
-
-#include "shmob_drm_backlight.h"
-#include "shmob_drm_crtc.h"
-#include "shmob_drm_drv.h"
-
-static int shmob_drm_backlight_update(struct backlight_device *bdev)
-{
-       struct shmob_drm_connector *scon = bl_get_data(bdev);
-       struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
-       const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
-       int brightness = backlight_get_brightness(bdev);
-
-       return bdata->set_brightness(brightness);
-}
-
-static int shmob_drm_backlight_get_brightness(struct backlight_device *bdev)
-{
-       struct shmob_drm_connector *scon = bl_get_data(bdev);
-       struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
-       const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
-
-       return bdata->get_brightness();
-}
-
-static const struct backlight_ops shmob_drm_backlight_ops = {
-       .options        = BL_CORE_SUSPENDRESUME,
-       .update_status  = shmob_drm_backlight_update,
-       .get_brightness = shmob_drm_backlight_get_brightness,
-};
-
-void shmob_drm_backlight_dpms(struct shmob_drm_connector *scon, int mode)
-{
-       if (scon->backlight == NULL)
-               return;
-
-       scon->backlight->props.power = mode == DRM_MODE_DPMS_ON
-                                    ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN;
-       backlight_update_status(scon->backlight);
-}
-
-int shmob_drm_backlight_init(struct shmob_drm_connector *scon)
-{
-       struct shmob_drm_device *sdev = scon->connector.dev->dev_private;
-       const struct shmob_drm_backlight_data *bdata = &sdev->pdata->backlight;
-       struct drm_connector *connector = &scon->connector;
-       struct drm_device *dev = connector->dev;
-       struct backlight_device *backlight;
-
-       if (!bdata->max_brightness)
-               return 0;
-
-       backlight = backlight_device_register(bdata->name, dev->dev, scon,
-                                             &shmob_drm_backlight_ops, NULL);
-       if (IS_ERR(backlight)) {
-               dev_err(dev->dev, "unable to register backlight device: %ld\n",
-                       PTR_ERR(backlight));
-               return PTR_ERR(backlight);
-       }
-
-       backlight->props.max_brightness = bdata->max_brightness;
-       backlight->props.brightness = bdata->max_brightness;
-       backlight->props.power = FB_BLANK_POWERDOWN;
-       backlight_update_status(backlight);
-
-       scon->backlight = backlight;
-       return 0;
-}
-
-void shmob_drm_backlight_exit(struct shmob_drm_connector *scon)
-{
-       backlight_device_unregister(scon->backlight);
-}
diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_backlight.h b/drivers/gpu/drm/renesas/shmobile/shmob_drm_backlight.h
deleted file mode 100644 (file)
index d9abb7a..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * shmob_drm_backlight.h  --  SH Mobile DRM Backlight
- *
- * Copyright (C) 2012 Renesas Electronics Corporation
- *
- * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- */
-
-#ifndef __SHMOB_DRM_BACKLIGHT_H__
-#define __SHMOB_DRM_BACKLIGHT_H__
-
-struct shmob_drm_connector;
-
-void shmob_drm_backlight_dpms(struct shmob_drm_connector *scon, int mode);
-int shmob_drm_backlight_init(struct shmob_drm_connector *scon);
-void shmob_drm_backlight_exit(struct shmob_drm_connector *scon);
-
-#endif /* __SHMOB_DRM_BACKLIGHT_H__ */
index 11dd2bc803e7cb6236a5092b3db39a79defe0279..2e2f37b9d0a4bafae4b5cc857baccf0e3d8fc724 100644 (file)
@@ -7,9 +7,18 @@
  * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  */
 
-#include <linux/backlight.h>
 #include <linux/clk.h>
-
+#include <linux/media-bus-format.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fb_dma_helper.h>
 #include <drm/drm_gem_dma_helper.h>
 #include <drm/drm_modeset_helper.h>
 #include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_panel.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_simple_kms_helper.h>
 #include <drm/drm_vblank.h>
 
-#include "shmob_drm_backlight.h"
+#include <video/videomode.h>
+
 #include "shmob_drm_crtc.h"
 #include "shmob_drm_drv.h"
 #include "shmob_drm_kms.h"
 #include "shmob_drm_plane.h"
 #include "shmob_drm_regs.h"
 
-/*
- * TODO: panel support
- */
-
 /* -----------------------------------------------------------------------------
- * Clock management
+ * Page Flip
  */
 
-static int shmob_drm_clk_on(struct shmob_drm_device *sdev)
+void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
 {
-       int ret;
+       struct drm_pending_vblank_event *event;
+       struct drm_device *dev = scrtc->base.dev;
+       unsigned long flags;
 
-       if (sdev->clock) {
-               ret = clk_prepare_enable(sdev->clock);
-               if (ret < 0)
-                       return ret;
+       spin_lock_irqsave(&dev->event_lock, flags);
+       event = scrtc->event;
+       scrtc->event = NULL;
+       if (event) {
+               drm_crtc_send_vblank_event(&scrtc->base, event);
+               wake_up(&scrtc->flip_wait);
+               drm_crtc_vblank_put(&scrtc->base);
        }
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+}
 
-       return 0;
+static bool shmob_drm_crtc_page_flip_pending(struct shmob_drm_crtc *scrtc)
+{
+       struct drm_device *dev = scrtc->base.dev;
+       unsigned long flags;
+       bool pending;
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+       pending = scrtc->event != NULL;
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+
+       return pending;
 }
 
-static void shmob_drm_clk_off(struct shmob_drm_device *sdev)
+static void shmob_drm_crtc_wait_page_flip(struct shmob_drm_crtc *scrtc)
 {
-       if (sdev->clock)
-               clk_disable_unprepare(sdev->clock);
+       struct drm_crtc *crtc = &scrtc->base;
+       struct shmob_drm_device *sdev = to_shmob_device(crtc->dev);
+
+       if (wait_event_timeout(scrtc->flip_wait,
+                              !shmob_drm_crtc_page_flip_pending(scrtc),
+                              msecs_to_jiffies(50)))
+               return;
+
+       dev_warn(sdev->dev, "page flip timeout\n");
+
+       shmob_drm_crtc_finish_page_flip(scrtc);
 }
 
 /* -----------------------------------------------------------------------------
  * CRTC
  */
 
+static const struct {
+       u32 fmt;
+       u32 ldmt1r;
+} shmob_drm_bus_fmts[] = {
+       { MEDIA_BUS_FMT_RGB888_3X8,      LDMT1R_MIFTYP_RGB8 },
+       { MEDIA_BUS_FMT_RGB666_2X9_BE,   LDMT1R_MIFTYP_RGB9 },
+       { MEDIA_BUS_FMT_RGB888_2X12_BE,  LDMT1R_MIFTYP_RGB12A },
+       { MEDIA_BUS_FMT_RGB444_1X12,     LDMT1R_MIFTYP_RGB12B },
+       { MEDIA_BUS_FMT_RGB565_1X16,     LDMT1R_MIFTYP_RGB16 },
+       { MEDIA_BUS_FMT_RGB666_1X18,     LDMT1R_MIFTYP_RGB18 },
+       { MEDIA_BUS_FMT_RGB888_1X24,     LDMT1R_MIFTYP_RGB24 },
+       { MEDIA_BUS_FMT_UYVY8_1X16,      LDMT1R_MIFTYP_YCBCR },
+};
+
 static void shmob_drm_crtc_setup_geometry(struct shmob_drm_crtc *scrtc)
 {
-       struct drm_crtc *crtc = &scrtc->crtc;
-       struct shmob_drm_device *sdev = crtc->dev->dev_private;
-       const struct shmob_drm_interface_data *idata = &sdev->pdata->iface;
+       struct drm_crtc *crtc = &scrtc->base;
+       struct shmob_drm_device *sdev = to_shmob_device(crtc->dev);
+       const struct drm_display_info *info = &sdev->connector->display_info;
        const struct drm_display_mode *mode = &crtc->mode;
+       unsigned int i;
        u32 value;
 
-       value = sdev->ldmt1r
-             | ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : LDMT1R_VPOL)
-             | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : LDMT1R_HPOL)
-             | ((idata->flags & SHMOB_DRM_IFACE_FL_DWPOL) ? LDMT1R_DWPOL : 0)
-             | ((idata->flags & SHMOB_DRM_IFACE_FL_DIPOL) ? LDMT1R_DIPOL : 0)
-             | ((idata->flags & SHMOB_DRM_IFACE_FL_DAPOL) ? LDMT1R_DAPOL : 0)
-             | ((idata->flags & SHMOB_DRM_IFACE_FL_HSCNT) ? LDMT1R_HSCNT : 0)
-             | ((idata->flags & SHMOB_DRM_IFACE_FL_DWCNT) ? LDMT1R_DWCNT : 0);
-       lcdc_write(sdev, LDMT1R, value);
-
-       if (idata->interface >= SHMOB_DRM_IFACE_SYS8A &&
-           idata->interface <= SHMOB_DRM_IFACE_SYS24) {
-               /* Setup SYS bus. */
-               value = (idata->sys.cs_setup << LDMT2R_CSUP_SHIFT)
-                     | (idata->sys.vsync_active_high ? LDMT2R_RSV : 0)
-                     | (idata->sys.vsync_dir_input ? LDMT2R_VSEL : 0)
-                     | (idata->sys.write_setup << LDMT2R_WCSC_SHIFT)
-                     | (idata->sys.write_cycle << LDMT2R_WCEC_SHIFT)
-                     | (idata->sys.write_strobe << LDMT2R_WCLW_SHIFT);
-               lcdc_write(sdev, LDMT2R, value);
-
-               value = (idata->sys.read_latch << LDMT3R_RDLC_SHIFT)
-                     | (idata->sys.read_setup << LDMT3R_RCSC_SHIFT)
-                     | (idata->sys.read_cycle << LDMT3R_RCEC_SHIFT)
-                     | (idata->sys.read_strobe << LDMT3R_RCLW_SHIFT);
-               lcdc_write(sdev, LDMT3R, value);
+       if (!info->num_bus_formats || !info->bus_formats) {
+               dev_warn(sdev->dev, "No bus format reported, using RGB888\n");
+               value = LDMT1R_MIFTYP_RGB24;
+       } else {
+               for (i = 0; i < ARRAY_SIZE(shmob_drm_bus_fmts); i++) {
+                       if (shmob_drm_bus_fmts[i].fmt == info->bus_formats[0])
+                               break;
+               }
+               if (i < ARRAY_SIZE(shmob_drm_bus_fmts)) {
+                       value = shmob_drm_bus_fmts[i].ldmt1r;
+               } else {
+                       dev_warn(sdev->dev,
+                                "unsupported bus format 0x%x, using RGB888\n",
+                                info->bus_formats[0]);
+                       value = LDMT1R_MIFTYP_RGB24;
+               }
        }
 
+       if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
+               value |= LDMT1R_DWPOL;
+       if (info->bus_flags & DRM_BUS_FLAG_DE_LOW)
+               value |= LDMT1R_DIPOL;
+       if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+               value |= LDMT1R_VPOL;
+       if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+               value |= LDMT1R_HPOL;
+       lcdc_write(sdev, LDMT1R, value);
+
        value = ((mode->hdisplay / 8) << 16)                    /* HDCN */
              | (mode->htotal / 8);                             /* HTCN */
        lcdc_write(sdev, LDHCNR, value);
@@ -121,7 +168,7 @@ static void shmob_drm_crtc_setup_geometry(struct shmob_drm_crtc *scrtc)
 
 static void shmob_drm_crtc_start_stop(struct shmob_drm_crtc *scrtc, bool start)
 {
-       struct shmob_drm_device *sdev = scrtc->crtc.dev->dev_private;
+       struct shmob_drm_device *sdev = to_shmob_device(scrtc->base.dev);
        u32 value;
 
        value = lcdc_read(sdev, LDCNT2R);
@@ -145,34 +192,23 @@ static void shmob_drm_crtc_start_stop(struct shmob_drm_crtc *scrtc, bool start)
        }
 }
 
-/*
- * shmob_drm_crtc_start - Configure and start the LCDC
- * @scrtc: the SH Mobile CRTC
- *
- * Configure and start the LCDC device. External devices (clocks, MERAM, panels,
- * ...) are not touched by this function.
- */
-static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
+static inline struct shmob_drm_crtc *to_shmob_crtc(struct drm_crtc *crtc)
+{
+       return container_of(crtc, struct shmob_drm_crtc, base);
+}
+
+static void shmob_drm_crtc_atomic_enable(struct drm_crtc *crtc,
+                                        struct drm_atomic_state *state)
 {
-       struct drm_crtc *crtc = &scrtc->crtc;
-       struct shmob_drm_device *sdev = crtc->dev->dev_private;
-       const struct shmob_drm_interface_data *idata = &sdev->pdata->iface;
-       const struct shmob_drm_format_info *format;
-       struct drm_device *dev = sdev->ddev;
-       struct drm_plane *plane;
+       struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
+       struct shmob_drm_device *sdev = to_shmob_device(crtc->dev);
+       unsigned int clk_div = sdev->config.clk_div;
+       struct device *dev = sdev->dev;
        u32 value;
        int ret;
 
-       if (scrtc->started)
-               return;
-
-       format = shmob_drm_format_info(crtc->primary->fb->format->format);
-       if (WARN_ON(format == NULL))
-               return;
-
-       /* Enable clocks before accessing the hardware. */
-       ret = shmob_drm_clk_on(sdev);
-       if (ret < 0)
+       ret = pm_runtime_resume_and_get(dev);
+       if (ret)
                return;
 
        /* Reset and enable the LCDC. */
@@ -188,79 +224,50 @@ static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc)
        lcdc_write(sdev, LDPMR, 0);
 
        value = sdev->lddckr;
-       if (idata->clk_div) {
+       if (clk_div) {
                /* FIXME: sh7724 can only use 42, 48, 54 and 60 for the divider
                 * denominator.
                 */
                lcdc_write(sdev, LDDCKPAT1R, 0);
-               lcdc_write(sdev, LDDCKPAT2R, (1 << (idata->clk_div / 2)) - 1);
+               lcdc_write(sdev, LDDCKPAT2R, (1 << (clk_div / 2)) - 1);
 
-               if (idata->clk_div == 1)
+               if (clk_div == 1)
                        value |= LDDCKR_MOSEL;
                else
-                       value |= idata->clk_div;
+                       value |= clk_div;
        }
 
        lcdc_write(sdev, LDDCKR, value);
        lcdc_write(sdev, LDDCKSTPR, 0);
        lcdc_wait_bit(sdev, LDDCKSTPR, ~0, 0);
 
-       /* TODO: Setup SYS panel */
-
        /* Setup geometry, format, frame buffer memory and operation mode. */
        shmob_drm_crtc_setup_geometry(scrtc);
 
-       /* TODO: Handle YUV colorspaces. Hardcode REC709 for now. */
-       lcdc_write(sdev, LDDFR, format->lddfr | LDDFR_CF1);
-       lcdc_write(sdev, LDMLSR, scrtc->line_size);
-       lcdc_write(sdev, LDSA1R, scrtc->dma[0]);
-       if (format->yuv)
-               lcdc_write(sdev, LDSA2R, scrtc->dma[1]);
        lcdc_write(sdev, LDSM1R, 0);
 
-       /* Word and long word swap. */
-       switch (format->fourcc) {
-       case DRM_FORMAT_RGB565:
-       case DRM_FORMAT_NV21:
-       case DRM_FORMAT_NV61:
-       case DRM_FORMAT_NV42:
-               value = LDDDSR_LS | LDDDSR_WS;
-               break;
-       case DRM_FORMAT_RGB888:
-       case DRM_FORMAT_NV12:
-       case DRM_FORMAT_NV16:
-       case DRM_FORMAT_NV24:
-               value = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS;
-               break;
-       case DRM_FORMAT_ARGB8888:
-       case DRM_FORMAT_XRGB8888:
-       default:
-               value = LDDDSR_LS;
-               break;
-       }
-       lcdc_write(sdev, LDDDSR, value);
-
-       /* Setup planes. */
-       drm_for_each_legacy_plane(plane, dev) {
-               if (plane->crtc == crtc)
-                       shmob_drm_plane_setup(plane);
-       }
-
        /* Enable the display output. */
        lcdc_write(sdev, LDCNT1R, LDCNT1R_DE);
 
        shmob_drm_crtc_start_stop(scrtc, true);
 
-       scrtc->started = true;
+       /* Turn vertical blank interrupt reporting back on. */
+       drm_crtc_vblank_on(crtc);
 }
 
-static void shmob_drm_crtc_stop(struct shmob_drm_crtc *scrtc)
+static void shmob_drm_crtc_atomic_disable(struct drm_crtc *crtc,
+                                         struct drm_atomic_state *state)
 {
-       struct drm_crtc *crtc = &scrtc->crtc;
-       struct shmob_drm_device *sdev = crtc->dev->dev_private;
+       struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
+       struct shmob_drm_device *sdev = to_shmob_device(crtc->dev);
 
-       if (!scrtc->started)
-               return;
+       /*
+        * Disable vertical blank interrupt reporting.  We first need to wait
+        * for page flip completion before stopping the CRTC as userspace
+        * expects page flips to eventually complete.
+        */
+       shmob_drm_crtc_wait_page_flip(scrtc);
+       drm_crtc_vblank_off(crtc);
 
        /* Stop the LCDC. */
        shmob_drm_crtc_start_stop(scrtc, false);
@@ -268,145 +275,31 @@ static void shmob_drm_crtc_stop(struct shmob_drm_crtc *scrtc)
        /* Disable the display output. */
        lcdc_write(sdev, LDCNT1R, 0);
 
-       /* Stop clocks. */
-       shmob_drm_clk_off(sdev);
-
-       scrtc->started = false;
-}
-
-void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc)
-{
-       shmob_drm_crtc_stop(scrtc);
-}
-
-void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc)
-{
-       if (scrtc->dpms != DRM_MODE_DPMS_ON)
-               return;
-
-       shmob_drm_crtc_start(scrtc);
+       pm_runtime_put(sdev->dev);
 }
 
-static void shmob_drm_crtc_compute_base(struct shmob_drm_crtc *scrtc,
-                                       int x, int y)
+static void shmob_drm_crtc_atomic_flush(struct drm_crtc *crtc,
+                                       struct drm_atomic_state *state)
 {
-       struct drm_crtc *crtc = &scrtc->crtc;
-       struct drm_framebuffer *fb = crtc->primary->fb;
-       struct drm_gem_dma_object *gem;
-       unsigned int bpp;
-
-       bpp = scrtc->format->yuv ? 8 : scrtc->format->bpp;
-       gem = drm_fb_dma_get_gem_obj(fb, 0);
-       scrtc->dma[0] = gem->dma_addr + fb->offsets[0]
-                     + y * fb->pitches[0] + x * bpp / 8;
-
-       if (scrtc->format->yuv) {
-               bpp = scrtc->format->bpp - 8;
-               gem = drm_fb_dma_get_gem_obj(fb, 1);
-               scrtc->dma[1] = gem->dma_addr + fb->offsets[1]
-                             + y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
-                             + x * (bpp == 16 ? 2 : 1);
-       }
-}
-
-static void shmob_drm_crtc_update_base(struct shmob_drm_crtc *scrtc)
-{
-       struct drm_crtc *crtc = &scrtc->crtc;
-       struct shmob_drm_device *sdev = crtc->dev->dev_private;
-
-       shmob_drm_crtc_compute_base(scrtc, crtc->x, crtc->y);
-
-       lcdc_write_mirror(sdev, LDSA1R, scrtc->dma[0]);
-       if (scrtc->format->yuv)
-               lcdc_write_mirror(sdev, LDSA2R, scrtc->dma[1]);
-
-       lcdc_write(sdev, LDRCNTR, lcdc_read(sdev, LDRCNTR) ^ LDRCNTR_MRS);
-}
-
-#define to_shmob_crtc(c)       container_of(c, struct shmob_drm_crtc, crtc)
-
-static void shmob_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-       struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
-
-       if (scrtc->dpms == mode)
-               return;
-
-       if (mode == DRM_MODE_DPMS_ON)
-               shmob_drm_crtc_start(scrtc);
-       else
-               shmob_drm_crtc_stop(scrtc);
-
-       scrtc->dpms = mode;
-}
-
-static void shmob_drm_crtc_mode_prepare(struct drm_crtc *crtc)
-{
-       shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
-}
+       struct drm_pending_vblank_event *event;
+       struct drm_device *dev = crtc->dev;
+       unsigned long flags;
 
-static int shmob_drm_crtc_mode_set(struct drm_crtc *crtc,
-                                  struct drm_display_mode *mode,
-                                  struct drm_display_mode *adjusted_mode,
-                                  int x, int y,
-                                  struct drm_framebuffer *old_fb)
-{
-       struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
-       struct shmob_drm_device *sdev = crtc->dev->dev_private;
-       const struct shmob_drm_format_info *format;
-
-       format = shmob_drm_format_info(crtc->primary->fb->format->format);
-       if (format == NULL) {
-               dev_dbg(sdev->dev, "mode_set: unsupported format %p4cc\n",
-                       &crtc->primary->fb->format->format);
-               return -EINVAL;
+       if (crtc->state->event) {
+               spin_lock_irqsave(&dev->event_lock, flags);
+               event = crtc->state->event;
+               crtc->state->event = NULL;
+               drm_crtc_send_vblank_event(crtc, event);
+               spin_unlock_irqrestore(&dev->event_lock, flags);
        }
-
-       scrtc->format = format;
-       scrtc->line_size = crtc->primary->fb->pitches[0];
-
-       shmob_drm_crtc_compute_base(scrtc, x, y);
-
-       return 0;
-}
-
-static void shmob_drm_crtc_mode_commit(struct drm_crtc *crtc)
-{
-       shmob_drm_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
-}
-
-static int shmob_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
-                                       struct drm_framebuffer *old_fb)
-{
-       shmob_drm_crtc_update_base(to_shmob_crtc(crtc));
-
-       return 0;
 }
 
 static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
-       .dpms = shmob_drm_crtc_dpms,
-       .prepare = shmob_drm_crtc_mode_prepare,
-       .commit = shmob_drm_crtc_mode_commit,
-       .mode_set = shmob_drm_crtc_mode_set,
-       .mode_set_base = shmob_drm_crtc_mode_set_base,
+       .atomic_flush = shmob_drm_crtc_atomic_flush,
+       .atomic_enable = shmob_drm_crtc_atomic_enable,
+       .atomic_disable = shmob_drm_crtc_atomic_disable,
 };
 
-void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
-{
-       struct drm_pending_vblank_event *event;
-       struct drm_device *dev = scrtc->crtc.dev;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->event_lock, flags);
-       event = scrtc->event;
-       scrtc->event = NULL;
-       if (event) {
-               drm_crtc_send_vblank_event(&scrtc->crtc, event);
-               drm_crtc_vblank_put(&scrtc->crtc);
-       }
-       spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
 static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
                                    struct drm_framebuffer *fb,
                                    struct drm_pending_vblank_event *event,
@@ -414,7 +307,7 @@ static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
                                    struct drm_modeset_acquire_ctx *ctx)
 {
        struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
-       struct drm_device *dev = scrtc->crtc.dev;
+       struct drm_device *dev = scrtc->base.dev;
        unsigned long flags;
 
        spin_lock_irqsave(&dev->event_lock, flags);
@@ -424,12 +317,11 @@ static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
        }
        spin_unlock_irqrestore(&dev->event_lock, flags);
 
-       crtc->primary->fb = fb;
-       shmob_drm_crtc_update_base(scrtc);
+       drm_atomic_set_fb_for_plane(crtc->primary->state, fb);
 
        if (event) {
                event->pipe = 0;
-               drm_crtc_vblank_get(&scrtc->crtc);
+               drm_crtc_vblank_get(&scrtc->base);
                spin_lock_irqsave(&dev->event_lock, flags);
                scrtc->event = event;
                spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -457,7 +349,7 @@ static void shmob_drm_crtc_enable_vblank(struct shmob_drm_device *sdev,
 
 static int shmob_drm_enable_vblank(struct drm_crtc *crtc)
 {
-       struct shmob_drm_device *sdev = crtc->dev->dev_private;
+       struct shmob_drm_device *sdev = to_shmob_device(crtc->dev);
 
        shmob_drm_crtc_enable_vblank(sdev, true);
 
@@ -466,88 +358,65 @@ static int shmob_drm_enable_vblank(struct drm_crtc *crtc)
 
 static void shmob_drm_disable_vblank(struct drm_crtc *crtc)
 {
-       struct shmob_drm_device *sdev = crtc->dev->dev_private;
+       struct shmob_drm_device *sdev = to_shmob_device(crtc->dev);
 
        shmob_drm_crtc_enable_vblank(sdev, false);
 }
 
 static const struct drm_crtc_funcs crtc_funcs = {
+       .reset = drm_atomic_helper_crtc_reset,
        .destroy = drm_crtc_cleanup,
-       .set_config = drm_crtc_helper_set_config,
+       .set_config = drm_atomic_helper_set_config,
        .page_flip = shmob_drm_crtc_page_flip,
+       .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
        .enable_vblank = shmob_drm_enable_vblank,
        .disable_vblank = shmob_drm_disable_vblank,
 };
 
-static const uint32_t modeset_formats[] = {
-       DRM_FORMAT_RGB565,
-       DRM_FORMAT_RGB888,
-       DRM_FORMAT_ARGB8888,
-       DRM_FORMAT_XRGB8888,
-};
-
-static const struct drm_plane_funcs primary_plane_funcs = {
-       DRM_PLANE_NON_ATOMIC_FUNCS,
-};
-
 int shmob_drm_crtc_create(struct shmob_drm_device *sdev)
 {
-       struct drm_crtc *crtc = &sdev->crtc.crtc;
-       struct drm_plane *primary;
+       struct drm_crtc *crtc = &sdev->crtc.base;
+       struct drm_plane *primary, *plane;
+       unsigned int i;
        int ret;
 
-       sdev->crtc.dpms = DRM_MODE_DPMS_OFF;
+       init_waitqueue_head(&sdev->crtc.flip_wait);
 
-       primary = __drm_universal_plane_alloc(sdev->ddev, sizeof(*primary), 0,
-                                             0, &primary_plane_funcs,
-                                             modeset_formats,
-                                             ARRAY_SIZE(modeset_formats),
-                                             NULL, DRM_PLANE_TYPE_PRIMARY,
-                                             NULL);
+       primary = shmob_drm_plane_create(sdev, DRM_PLANE_TYPE_PRIMARY, 0);
        if (IS_ERR(primary))
                return PTR_ERR(primary);
 
-       ret = drm_crtc_init_with_planes(sdev->ddev, crtc, primary, NULL,
+       for (i = 1; i < 5; ++i) {
+               plane = shmob_drm_plane_create(sdev, DRM_PLANE_TYPE_OVERLAY, i);
+               if (IS_ERR(plane))
+                       return PTR_ERR(plane);
+       }
+
+       ret = drm_crtc_init_with_planes(&sdev->ddev, crtc, primary, NULL,
                                        &crtc_funcs, NULL);
-       if (ret < 0) {
-               drm_plane_cleanup(primary);
-               kfree(primary);
+       if (ret < 0)
                return ret;
-       }
 
        drm_crtc_helper_add(crtc, &crtc_helper_funcs);
 
+       /* Start with vertical blank interrupt reporting disabled. */
+       drm_crtc_vblank_off(crtc);
+
        return 0;
 }
 
 /* -----------------------------------------------------------------------------
- * Encoder
+ * Legacy Encoder
  */
 
-#define to_shmob_encoder(e) \
-       container_of(e, struct shmob_drm_encoder, encoder)
-
-static void shmob_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-       struct shmob_drm_encoder *senc = to_shmob_encoder(encoder);
-       struct shmob_drm_device *sdev = encoder->dev->dev_private;
-       struct shmob_drm_connector *scon = &sdev->connector;
-
-       if (senc->dpms == mode)
-               return;
-
-       shmob_drm_backlight_dpms(scon, mode);
-
-       senc->dpms = mode;
-}
-
 static bool shmob_drm_encoder_mode_fixup(struct drm_encoder *encoder,
                                         const struct drm_display_mode *mode,
                                         struct drm_display_mode *adjusted_mode)
 {
        struct drm_device *dev = encoder->dev;
-       struct shmob_drm_device *sdev = dev->dev_private;
-       struct drm_connector *connector = &sdev->connector.connector;
+       struct shmob_drm_device *sdev = to_shmob_device(dev);
+       struct drm_connector *connector = sdev->connector;
        const struct drm_display_mode *panel_mode;
 
        if (list_empty(&connector->modes)) {
@@ -563,60 +432,61 @@ static bool shmob_drm_encoder_mode_fixup(struct drm_encoder *encoder,
        return true;
 }
 
-static void shmob_drm_encoder_mode_prepare(struct drm_encoder *encoder)
-{
-       /* No-op, everything is handled in the CRTC code. */
-}
-
-static void shmob_drm_encoder_mode_set(struct drm_encoder *encoder,
-                                      struct drm_display_mode *mode,
-                                      struct drm_display_mode *adjusted_mode)
-{
-       /* No-op, everything is handled in the CRTC code. */
-}
-
-static void shmob_drm_encoder_mode_commit(struct drm_encoder *encoder)
-{
-       /* No-op, everything is handled in the CRTC code. */
-}
-
 static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
-       .dpms = shmob_drm_encoder_dpms,
        .mode_fixup = shmob_drm_encoder_mode_fixup,
-       .prepare = shmob_drm_encoder_mode_prepare,
-       .commit = shmob_drm_encoder_mode_commit,
-       .mode_set = shmob_drm_encoder_mode_set,
 };
 
+/* -----------------------------------------------------------------------------
+ * Encoder
+ */
+
 int shmob_drm_encoder_create(struct shmob_drm_device *sdev)
 {
-       struct drm_encoder *encoder = &sdev->encoder.encoder;
+       struct drm_encoder *encoder = &sdev->encoder;
+       struct drm_bridge *bridge;
        int ret;
 
-       sdev->encoder.dpms = DRM_MODE_DPMS_OFF;
-
        encoder->possible_crtcs = 1;
 
-       ret = drm_simple_encoder_init(sdev->ddev, encoder,
-                                     DRM_MODE_ENCODER_LVDS);
+       ret = drm_simple_encoder_init(&sdev->ddev, encoder,
+                                     DRM_MODE_ENCODER_DPI);
        if (ret < 0)
                return ret;
 
-       drm_encoder_helper_add(encoder, &encoder_helper_funcs);
+       if (sdev->pdata) {
+               drm_encoder_helper_add(encoder, &encoder_helper_funcs);
+               return 0;
+       }
+
+       /* Create a panel bridge */
+       bridge = devm_drm_of_get_bridge(sdev->dev, sdev->dev->of_node, 0, 0);
+       if (IS_ERR(bridge))
+               return PTR_ERR(bridge);
+
+       /* Attach the bridge to the encoder */
+       ret = drm_bridge_attach(encoder, bridge, NULL,
+                               DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+       if (ret) {
+               dev_err(sdev->dev, "failed to attach bridge: %pe\n",
+                       ERR_PTR(ret));
+               return ret;
+       }
 
        return 0;
 }
 
 /* -----------------------------------------------------------------------------
- * Connector
+ * Legacy Connector
  */
 
-#define to_shmob_connector(c) \
-       container_of(c, struct shmob_drm_connector, connector)
+static inline struct shmob_drm_connector *to_shmob_connector(struct drm_connector *connector)
+{
+       return container_of(connector, struct shmob_drm_connector, base);
+}
 
 static int shmob_drm_connector_get_modes(struct drm_connector *connector)
 {
-       struct shmob_drm_device *sdev = connector->dev->dev_private;
+       struct shmob_drm_connector *scon = to_shmob_connector(connector);
        struct drm_display_mode *mode;
 
        mode = drm_mode_create(connector->dev);
@@ -624,22 +494,10 @@ static int shmob_drm_connector_get_modes(struct drm_connector *connector)
                return 0;
 
        mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
-       mode->clock = sdev->pdata->panel.mode.clock;
-       mode->hdisplay = sdev->pdata->panel.mode.hdisplay;
-       mode->hsync_start = sdev->pdata->panel.mode.hsync_start;
-       mode->hsync_end = sdev->pdata->panel.mode.hsync_end;
-       mode->htotal = sdev->pdata->panel.mode.htotal;
-       mode->vdisplay = sdev->pdata->panel.mode.vdisplay;
-       mode->vsync_start = sdev->pdata->panel.mode.vsync_start;
-       mode->vsync_end = sdev->pdata->panel.mode.vsync_end;
-       mode->vtotal = sdev->pdata->panel.mode.vtotal;
-       mode->flags = sdev->pdata->panel.mode.flags;
-
-       drm_mode_set_name(mode);
-       drm_mode_probed_add(connector, mode);
 
-       connector->display_info.width_mm = sdev->pdata->panel.width_mm;
-       connector->display_info.height_mm = sdev->pdata->panel.height_mm;
+       drm_display_mode_from_videomode(scon->mode, mode);
+
+       drm_mode_probed_add(connector, mode);
 
        return 1;
 }
@@ -659,54 +517,106 @@ static const struct drm_connector_helper_funcs connector_helper_funcs = {
 
 static void shmob_drm_connector_destroy(struct drm_connector *connector)
 {
-       struct shmob_drm_connector *scon = to_shmob_connector(connector);
-
-       shmob_drm_backlight_exit(scon);
        drm_connector_unregister(connector);
        drm_connector_cleanup(connector);
+
+       kfree(connector);
 }
 
 static const struct drm_connector_funcs connector_funcs = {
-       .dpms = drm_helper_connector_dpms,
+       .reset = drm_atomic_helper_connector_reset,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .destroy = shmob_drm_connector_destroy,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
 };
 
-int shmob_drm_connector_create(struct shmob_drm_device *sdev,
-                              struct drm_encoder *encoder)
+static struct drm_connector *
+shmob_drm_connector_init(struct shmob_drm_device *sdev,
+                        struct drm_encoder *encoder)
 {
-       struct drm_connector *connector = &sdev->connector.connector;
+       u32 bus_fmt = sdev->pdata->iface.bus_fmt;
+       struct shmob_drm_connector *scon;
+       struct drm_connector *connector;
+       struct drm_display_info *info;
+       unsigned int i;
        int ret;
 
-       sdev->connector.encoder = encoder;
+       for (i = 0; i < ARRAY_SIZE(shmob_drm_bus_fmts); i++) {
+               if (shmob_drm_bus_fmts[i].fmt == bus_fmt)
+                       break;
+       }
+       if (i == ARRAY_SIZE(shmob_drm_bus_fmts)) {
+               dev_err(sdev->dev, "unsupported bus format 0x%x\n", bus_fmt);
+               return ERR_PTR(-EINVAL);
+       }
 
-       connector->display_info.width_mm = sdev->pdata->panel.width_mm;
-       connector->display_info.height_mm = sdev->pdata->panel.height_mm;
+       scon = kzalloc(sizeof(*scon), GFP_KERNEL);
+       if (!scon)
+               return ERR_PTR(-ENOMEM);
 
-       ret = drm_connector_init(sdev->ddev, connector, &connector_funcs,
-                                DRM_MODE_CONNECTOR_LVDS);
-       if (ret < 0)
-               return ret;
+       connector = &scon->base;
+       scon->encoder = encoder;
+       scon->mode = &sdev->pdata->panel.mode;
+
+       info = &connector->display_info;
+       info->width_mm = sdev->pdata->panel.width_mm;
+       info->height_mm = sdev->pdata->panel.height_mm;
+
+       if (scon->mode->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
+               info->bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
+       if (scon->mode->flags & DISPLAY_FLAGS_DE_LOW)
+               info->bus_flags |= DRM_BUS_FLAG_DE_LOW;
+
+       ret = drm_display_info_set_bus_formats(info, &bus_fmt, 1);
+       if (ret < 0) {
+               kfree(scon);
+               return ERR_PTR(ret);
+       }
+
+       ret = drm_connector_init(&sdev->ddev, connector, &connector_funcs,
+                                DRM_MODE_CONNECTOR_DPI);
+       if (ret < 0) {
+               kfree(scon);
+               return ERR_PTR(ret);
+       }
 
        drm_connector_helper_add(connector, &connector_helper_funcs);
 
-       ret = shmob_drm_backlight_init(&sdev->connector);
-       if (ret < 0)
-               goto err_cleanup;
+       return connector;
+}
+
+/* -----------------------------------------------------------------------------
+ * Connector
+ */
+
+int shmob_drm_connector_create(struct shmob_drm_device *sdev,
+                              struct drm_encoder *encoder)
+{
+       struct drm_connector *connector;
+       int ret;
+
+       if (sdev->pdata)
+               connector = shmob_drm_connector_init(sdev, encoder);
+       else
+               connector = drm_bridge_connector_init(&sdev->ddev, encoder);
+       if (IS_ERR(connector)) {
+               dev_err(sdev->dev, "failed to created connector: %pe\n",
+                       connector);
+               return PTR_ERR(connector);
+       }
 
        ret = drm_connector_attach_encoder(connector, encoder);
        if (ret < 0)
-               goto err_backlight;
+               goto error;
+
+       connector->dpms = DRM_MODE_DPMS_OFF;
 
-       drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
-       drm_object_property_set_value(&connector->base,
-               sdev->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
+       sdev->connector = connector;
 
        return 0;
 
-err_backlight:
-       shmob_drm_backlight_exit(&sdev->connector);
-err_cleanup:
+error:
        drm_connector_cleanup(connector);
        return ret;
 }
index 21718843f46d3d19deb61a48778026026724e042..16e1712dd04e0f2baf411de41cafb90d58af2418 100644 (file)
 #include <drm/drm_connector.h>
 #include <drm/drm_encoder.h>
 
-struct backlight_device;
+#include <linux/wait.h>
+
+#include <video/videomode.h>
+
 struct drm_pending_vblank_event;
 struct shmob_drm_device;
 struct shmob_drm_format_info;
 
 struct shmob_drm_crtc {
-       struct drm_crtc crtc;
+       struct drm_crtc base;
 
        struct drm_pending_vblank_event *event;
-       int dpms;
-
-       const struct shmob_drm_format_info *format;
-       unsigned long dma[2];
-       unsigned int line_size;
-       bool started;
-};
-
-struct shmob_drm_encoder {
-       struct drm_encoder encoder;
-       int dpms;
+       wait_queue_head_t flip_wait;
 };
 
+/* Legacy connector */
 struct shmob_drm_connector {
-       struct drm_connector connector;
+       struct drm_connector base;
        struct drm_encoder *encoder;
-
-       struct backlight_device *backlight;
+       const struct videomode *mode;
 };
 
 int shmob_drm_crtc_create(struct shmob_drm_device *sdev);
 void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc);
-void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc);
-void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc);
 
 int shmob_drm_encoder_create(struct shmob_drm_device *sdev);
 int shmob_drm_connector_create(struct shmob_drm_device *sdev,
index e5db4e0095bad1feae6fe223cdd2d84f10be1200..e83c3e52251dedf9843685484beb7a29d2408fdd 100644 (file)
 #include <linux/io.h>
 #include <linux/mm.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/pm.h>
+#include <linux/pm_runtime.h>
 #include <linux/slab.h>
 
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_fbdev_generic.h>
 #include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_modeset_helper.h>
 #include <drm/drm_module.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/drm_vblank.h>
  * Hardware initialization
  */
 
-static int shmob_drm_init_interface(struct shmob_drm_device *sdev)
-{
-       static const u32 ldmt1r[] = {
-               [SHMOB_DRM_IFACE_RGB8] = LDMT1R_MIFTYP_RGB8,
-               [SHMOB_DRM_IFACE_RGB9] = LDMT1R_MIFTYP_RGB9,
-               [SHMOB_DRM_IFACE_RGB12A] = LDMT1R_MIFTYP_RGB12A,
-               [SHMOB_DRM_IFACE_RGB12B] = LDMT1R_MIFTYP_RGB12B,
-               [SHMOB_DRM_IFACE_RGB16] = LDMT1R_MIFTYP_RGB16,
-               [SHMOB_DRM_IFACE_RGB18] = LDMT1R_MIFTYP_RGB18,
-               [SHMOB_DRM_IFACE_RGB24] = LDMT1R_MIFTYP_RGB24,
-               [SHMOB_DRM_IFACE_YUV422] = LDMT1R_MIFTYP_YCBCR,
-               [SHMOB_DRM_IFACE_SYS8A] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8A,
-               [SHMOB_DRM_IFACE_SYS8B] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8B,
-               [SHMOB_DRM_IFACE_SYS8C] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8C,
-               [SHMOB_DRM_IFACE_SYS8D] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS8D,
-               [SHMOB_DRM_IFACE_SYS9] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS9,
-               [SHMOB_DRM_IFACE_SYS12] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS12,
-               [SHMOB_DRM_IFACE_SYS16A] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16A,
-               [SHMOB_DRM_IFACE_SYS16B] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16B,
-               [SHMOB_DRM_IFACE_SYS16C] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS16C,
-               [SHMOB_DRM_IFACE_SYS18] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS18,
-               [SHMOB_DRM_IFACE_SYS24] = LDMT1R_IFM | LDMT1R_MIFTYP_SYS24,
-       };
-
-       if (sdev->pdata->iface.interface >= ARRAY_SIZE(ldmt1r)) {
-               dev_err(sdev->dev, "invalid interface type %u\n",
-                       sdev->pdata->iface.interface);
-               return -EINVAL;
-       }
-
-       sdev->ldmt1r = ldmt1r[sdev->pdata->iface.interface];
-       return 0;
-}
-
 static int shmob_drm_setup_clocks(struct shmob_drm_device *sdev,
-                                           enum shmob_drm_clk_source clksrc)
+                                 enum shmob_drm_clk_source clksrc)
 {
        struct clk *clk;
        char *clkname;
 
        switch (clksrc) {
        case SHMOB_DRM_CLK_BUS:
-               clkname = "bus_clk";
+               clkname = "fck";
                sdev->lddckr = LDDCKR_ICKSEL_BUS;
                break;
        case SHMOB_DRM_CLK_PERIPHERAL:
-               clkname = "peripheral_clk";
+               clkname = "media";
                sdev->lddckr = LDDCKR_ICKSEL_MIPI;
                break;
        case SHMOB_DRM_CLK_EXTERNAL:
-               clkname = NULL;
+               clkname = "lclk";
                sdev->lddckr = LDDCKR_ICKSEL_HDMI;
                break;
        default:
@@ -105,7 +75,7 @@ static int shmob_drm_setup_clocks(struct shmob_drm_device *sdev,
 static irqreturn_t shmob_drm_irq(int irq, void *arg)
 {
        struct drm_device *dev = arg;
-       struct shmob_drm_device *sdev = dev->dev_private;
+       struct shmob_drm_device *sdev = to_shmob_device(dev);
        unsigned long flags;
        u32 status;
 
@@ -119,7 +89,7 @@ static irqreturn_t shmob_drm_irq(int irq, void *arg)
        spin_unlock_irqrestore(&sdev->irq_lock, flags);
 
        if (status & LDINTR_VES) {
-               drm_handle_vblank(dev, 0);
+               drm_crtc_handle_vblank(&sdev->crtc.base);
                shmob_drm_crtc_finish_page_flip(&sdev->crtc);
        }
 
@@ -129,7 +99,7 @@ static irqreturn_t shmob_drm_irq(int irq, void *arg)
 DEFINE_DRM_GEM_DMA_FOPS(shmob_drm_fops);
 
 static const struct drm_driver shmob_drm_driver = {
-       .driver_features        = DRIVER_GEM | DRIVER_MODESET,
+       .driver_features        = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
        DRM_GEM_DMA_DRIVER_OPS,
        .fops                   = &shmob_drm_fops,
        .name                   = "shmob-drm",
@@ -147,26 +117,45 @@ static int shmob_drm_pm_suspend(struct device *dev)
 {
        struct shmob_drm_device *sdev = dev_get_drvdata(dev);
 
-       drm_kms_helper_poll_disable(sdev->ddev);
-       shmob_drm_crtc_suspend(&sdev->crtc);
+       return drm_mode_config_helper_suspend(&sdev->ddev);
+}
+
+static int shmob_drm_pm_resume(struct device *dev)
+{
+       struct shmob_drm_device *sdev = dev_get_drvdata(dev);
+
+       return drm_mode_config_helper_resume(&sdev->ddev);
+}
+
+static int shmob_drm_pm_runtime_suspend(struct device *dev)
+{
+       struct shmob_drm_device *sdev = dev_get_drvdata(dev);
+
+       if (sdev->clock)
+               clk_disable_unprepare(sdev->clock);
 
        return 0;
 }
 
-static int shmob_drm_pm_resume(struct device *dev)
+static int shmob_drm_pm_runtime_resume(struct device *dev)
 {
        struct shmob_drm_device *sdev = dev_get_drvdata(dev);
+       int ret;
 
-       drm_modeset_lock_all(sdev->ddev);
-       shmob_drm_crtc_resume(&sdev->crtc);
-       drm_modeset_unlock_all(sdev->ddev);
+       if (sdev->clock) {
+               ret = clk_prepare_enable(sdev->clock);
+               if (ret < 0)
+                       return ret;
+       }
 
-       drm_kms_helper_poll_enable(sdev->ddev);
        return 0;
 }
 
-static DEFINE_SIMPLE_DEV_PM_OPS(shmob_drm_pm_ops,
-                               shmob_drm_pm_suspend, shmob_drm_pm_resume);
+static const struct dev_pm_ops shmob_drm_pm_ops = {
+       SYSTEM_SLEEP_PM_OPS(shmob_drm_pm_suspend, shmob_drm_pm_resume)
+       RUNTIME_PM_OPS(shmob_drm_pm_runtime_suspend,
+                      shmob_drm_pm_runtime_resume, NULL)
+};
 
 /* -----------------------------------------------------------------------------
  * Platform driver
@@ -175,37 +164,45 @@ static DEFINE_SIMPLE_DEV_PM_OPS(shmob_drm_pm_ops,
 static void shmob_drm_remove(struct platform_device *pdev)
 {
        struct shmob_drm_device *sdev = platform_get_drvdata(pdev);
-       struct drm_device *ddev = sdev->ddev;
+       struct drm_device *ddev = &sdev->ddev;
 
        drm_dev_unregister(ddev);
+       drm_atomic_helper_shutdown(ddev);
        drm_kms_helper_poll_fini(ddev);
-       free_irq(sdev->irq, ddev);
-       drm_dev_put(ddev);
 }
 
 static int shmob_drm_probe(struct platform_device *pdev)
 {
        struct shmob_drm_platform_data *pdata = pdev->dev.platform_data;
+       const struct shmob_drm_config *config;
        struct shmob_drm_device *sdev;
        struct drm_device *ddev;
-       unsigned int i;
        int ret;
 
-       if (pdata == NULL) {
+       config = of_device_get_match_data(&pdev->dev);
+       if (!config && !pdata) {
                dev_err(&pdev->dev, "no platform data\n");
                return -EINVAL;
        }
 
        /*
-        * Allocate and initialize the driver private data, I/O resources and
-        * clocks.
+        * Allocate and initialize the DRM device, driver private data, I/O
+        * resources and clocks.
         */
-       sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev), GFP_KERNEL);
-       if (sdev == NULL)
-               return -ENOMEM;
+       sdev = devm_drm_dev_alloc(&pdev->dev, &shmob_drm_driver,
+                                 struct shmob_drm_device, ddev);
+       if (IS_ERR(sdev))
+               return PTR_ERR(sdev);
 
+       ddev = &sdev->ddev;
        sdev->dev = &pdev->dev;
-       sdev->pdata = pdata;
+       if (config) {
+               sdev->config = *config;
+       } else {
+               sdev->pdata = pdata;
+               sdev->config.clk_source = pdata->clk_source;
+               sdev->config.clk_div = pdata->iface.clk_div;
+       }
        spin_lock_init(&sdev->irq_lock);
 
        platform_set_drvdata(pdev, sdev);
@@ -214,49 +211,32 @@ static int shmob_drm_probe(struct platform_device *pdev)
        if (IS_ERR(sdev->mmio))
                return PTR_ERR(sdev->mmio);
 
-       ret = shmob_drm_setup_clocks(sdev, pdata->clk_source);
+       ret = shmob_drm_setup_clocks(sdev, sdev->config.clk_source);
        if (ret < 0)
                return ret;
 
-       ret = shmob_drm_init_interface(sdev);
-       if (ret < 0)
+       ret = devm_pm_runtime_enable(&pdev->dev);
+       if (ret)
                return ret;
 
-       /* Allocate and initialize the DRM device. */
-       ddev = drm_dev_alloc(&shmob_drm_driver, &pdev->dev);
-       if (IS_ERR(ddev))
-               return PTR_ERR(ddev);
-
-       sdev->ddev = ddev;
-       ddev->dev_private = sdev;
-
-       ret = shmob_drm_modeset_init(sdev);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "failed to initialize mode setting\n");
-               goto err_free_drm_dev;
-       }
-
-       for (i = 0; i < 4; ++i) {
-               ret = shmob_drm_plane_create(sdev, i);
-               if (ret < 0) {
-                       dev_err(&pdev->dev, "failed to create plane %u\n", i);
-                       goto err_modeset_cleanup;
-               }
-       }
-
        ret = drm_vblank_init(ddev, 1);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to initialize vblank\n");
-               goto err_modeset_cleanup;
+               return ret;
        }
 
+       ret = shmob_drm_modeset_init(sdev);
+       if (ret < 0)
+               return dev_err_probe(&pdev->dev, ret,
+                                    "failed to initialize mode setting\n");
+
        ret = platform_get_irq(pdev, 0);
        if (ret < 0)
                goto err_modeset_cleanup;
        sdev->irq = ret;
 
-       ret = request_irq(sdev->irq, shmob_drm_irq, 0, ddev->driver->name,
-                         ddev);
+       ret = devm_request_irq(&pdev->dev, sdev->irq, shmob_drm_irq, 0,
+                              ddev->driver->name, ddev);
        if (ret < 0) {
                dev_err(&pdev->dev, "failed to install IRQ handler\n");
                goto err_modeset_cleanup;
@@ -268,28 +248,35 @@ static int shmob_drm_probe(struct platform_device *pdev)
         */
        ret = drm_dev_register(ddev, 0);
        if (ret < 0)
-               goto err_irq_uninstall;
+               goto err_modeset_cleanup;
 
        drm_fbdev_generic_setup(ddev, 16);
 
        return 0;
 
-err_irq_uninstall:
-       free_irq(sdev->irq, ddev);
 err_modeset_cleanup:
        drm_kms_helper_poll_fini(ddev);
-err_free_drm_dev:
-       drm_dev_put(ddev);
-
        return ret;
 }
 
+static const struct shmob_drm_config shmob_arm_config = {
+       .clk_source = SHMOB_DRM_CLK_BUS,
+       .clk_div = 5,
+};
+
+static const struct of_device_id shmob_drm_of_table[] __maybe_unused = {
+       { .compatible = "renesas,r8a7740-lcdc", .data = &shmob_arm_config, },
+       { .compatible = "renesas,sh73a0-lcdc",  .data = &shmob_arm_config, },
+       { /* sentinel */ }
+};
+
 static struct platform_driver shmob_drm_platform_driver = {
        .probe          = shmob_drm_probe,
        .remove_new     = shmob_drm_remove,
        .driver         = {
                .name   = "shmob-drm",
-               .pm     = pm_sleep_ptr(&shmob_drm_pm_ops),
+               .of_match_table = of_match_ptr(shmob_drm_of_table),
+               .pm     = &shmob_drm_pm_ops,
        },
 };
 
index 4964ddd5ab7472b018ebc32d52d988caa9ec0c91..088ac5381e91e61a8196f2c3d2fb609e4abc4dac 100644 (file)
@@ -20,23 +20,33 @@ struct clk;
 struct device;
 struct drm_device;
 
+struct shmob_drm_config {
+       enum shmob_drm_clk_source clk_source;
+       unsigned int clk_div;
+};
+
 struct shmob_drm_device {
        struct device *dev;
        const struct shmob_drm_platform_data *pdata;
+       struct shmob_drm_config config;
 
        void __iomem *mmio;
        struct clk *clock;
        u32 lddckr;
-       u32 ldmt1r;
 
        unsigned int irq;
        spinlock_t irq_lock;            /* Protects hardware LDINTR register */
 
-       struct drm_device *ddev;
+       struct drm_device ddev;
 
        struct shmob_drm_crtc crtc;
-       struct shmob_drm_encoder encoder;
-       struct shmob_drm_connector connector;
+       struct drm_encoder encoder;
+       struct drm_connector *connector;
 };
 
+static inline struct shmob_drm_device *to_shmob_device(struct drm_device *dev)
+{
+       return container_of(dev, struct shmob_drm_device, ddev);
+}
+
 #endif /* __SHMOB_DRM_DRV_H__ */
index 99381cc0abf3ae1f99900239d259bf18c345ddf8..4202ab00fb0cf48706fbc4fcbbc9c77842709dca 100644 (file)
@@ -7,6 +7,7 @@
  * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  */
 
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_fourcc.h>
@@ -17,6 +18,7 @@
 #include "shmob_drm_crtc.h"
 #include "shmob_drm_drv.h"
 #include "shmob_drm_kms.h"
+#include "shmob_drm_plane.h"
 #include "shmob_drm_regs.h"
 
 /* -----------------------------------------------------------------------------
@@ -27,53 +29,73 @@ static const struct shmob_drm_format_info shmob_drm_format_infos[] = {
        {
                .fourcc = DRM_FORMAT_RGB565,
                .bpp = 16,
-               .yuv = false,
                .lddfr = LDDFR_PKF_RGB16,
+               .ldddsr = LDDDSR_LS | LDDDSR_WS,
+               .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
+                           LDBBSIFR_RY | LDBBSIFR_RPKF_RGB16,
        }, {
                .fourcc = DRM_FORMAT_RGB888,
                .bpp = 24,
-               .yuv = false,
                .lddfr = LDDFR_PKF_RGB24,
+               .ldddsr = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS,
+               .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
+                           LDBBSIFR_SWPB | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB24,
        }, {
                .fourcc = DRM_FORMAT_ARGB8888,
                .bpp = 32,
-               .yuv = false,
                .lddfr = LDDFR_PKF_ARGB32,
+               .ldddsr = LDDDSR_LS,
+               .ldbbsifr = LDBBSIFR_AL_PK | LDBBSIFR_SWPL | LDBBSIFR_RY |
+                           LDBBSIFR_RPKF_ARGB32,
        }, {
                .fourcc = DRM_FORMAT_XRGB8888,
                .bpp = 32,
-               .yuv = false,
                .lddfr = LDDFR_PKF_ARGB32,
+               .ldddsr = LDDDSR_LS,
+               .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_RY |
+                           LDBBSIFR_RPKF_ARGB32,
        }, {
                .fourcc = DRM_FORMAT_NV12,
                .bpp = 12,
-               .yuv = true,
                .lddfr = LDDFR_CC | LDDFR_YF_420,
+               .ldddsr = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS,
+               .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
+                           LDBBSIFR_SWPB | LDBBSIFR_CHRR_420,
        }, {
                .fourcc = DRM_FORMAT_NV21,
                .bpp = 12,
-               .yuv = true,
                .lddfr = LDDFR_CC | LDDFR_YF_420,
+               .ldddsr = LDDDSR_LS | LDDDSR_WS,
+               .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
+                           LDBBSIFR_CHRR_420,
        }, {
                .fourcc = DRM_FORMAT_NV16,
                .bpp = 16,
-               .yuv = true,
                .lddfr = LDDFR_CC | LDDFR_YF_422,
+               .ldddsr = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS,
+               .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
+                           LDBBSIFR_SWPB | LDBBSIFR_CHRR_422,
        }, {
                .fourcc = DRM_FORMAT_NV61,
                .bpp = 16,
-               .yuv = true,
                .lddfr = LDDFR_CC | LDDFR_YF_422,
+               .ldddsr = LDDDSR_LS | LDDDSR_WS,
+               .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
+                           LDBBSIFR_CHRR_422,
        }, {
                .fourcc = DRM_FORMAT_NV24,
                .bpp = 24,
-               .yuv = true,
                .lddfr = LDDFR_CC | LDDFR_YF_444,
+               .ldddsr = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS,
+               .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
+                           LDBBSIFR_SWPB | LDBBSIFR_CHRR_444,
        }, {
                .fourcc = DRM_FORMAT_NV42,
                .bpp = 24,
-               .yuv = true,
                .lddfr = LDDFR_CC | LDDFR_YF_444,
+               .ldddsr = LDDDSR_LS | LDDDSR_WS,
+               .ldbbsifr = LDBBSIFR_AL_1 | LDBBSIFR_SWPL | LDBBSIFR_SWPW |
+                           LDBBSIFR_CHRR_444,
        },
 };
 
@@ -112,7 +134,7 @@ shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                return ERR_PTR(-EINVAL);
        }
 
-       if (format->yuv) {
+       if (shmob_drm_format_is_yuv(format)) {
                unsigned int chroma_cpp = format->bpp == 24 ? 2 : 1;
 
                if (mode_cmd->pitches[1] != mode_cmd->pitches[0] * chroma_cpp) {
@@ -127,29 +149,40 @@ shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv,
 
 static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = {
        .fb_create = shmob_drm_fb_create,
+       .atomic_check = drm_atomic_helper_check,
+       .atomic_commit = drm_atomic_helper_commit,
 };
 
 int shmob_drm_modeset_init(struct shmob_drm_device *sdev)
 {
+       struct drm_device *dev = &sdev->ddev;
        int ret;
 
-       ret = drmm_mode_config_init(sdev->ddev);
+       ret = drmm_mode_config_init(dev);
        if (ret)
                return ret;
 
-       shmob_drm_crtc_create(sdev);
-       shmob_drm_encoder_create(sdev);
-       shmob_drm_connector_create(sdev, &sdev->encoder.encoder);
+       ret = shmob_drm_crtc_create(sdev);
+       if (ret < 0)
+               return ret;
+
+       ret = shmob_drm_encoder_create(sdev);
+       if (ret < 0)
+               return ret;
+
+       ret = shmob_drm_connector_create(sdev, &sdev->encoder);
+       if (ret < 0)
+               return ret;
 
-       drm_kms_helper_poll_init(sdev->ddev);
+       drm_mode_config_reset(dev);
 
-       sdev->ddev->mode_config.min_width = 0;
-       sdev->ddev->mode_config.min_height = 0;
-       sdev->ddev->mode_config.max_width = 4095;
-       sdev->ddev->mode_config.max_height = 4095;
-       sdev->ddev->mode_config.funcs = &shmob_drm_mode_config_funcs;
+       drm_kms_helper_poll_init(dev);
 
-       drm_helper_disable_unused_functions(sdev->ddev);
+       sdev->ddev.mode_config.min_width = 0;
+       sdev->ddev.mode_config.min_height = 0;
+       sdev->ddev.mode_config.max_width = 4095;
+       sdev->ddev.mode_config.max_height = 4095;
+       sdev->ddev.mode_config.funcs = &shmob_drm_mode_config_funcs;
 
        return 0;
 }
index 0347b1fd2338a84dfc4d35df4f066ab035cea472..590162c3db20209d7868af7797503857fdde2876 100644 (file)
@@ -17,11 +17,14 @@ struct shmob_drm_device;
 
 struct shmob_drm_format_info {
        u32 fourcc;
-       unsigned int bpp;
-       bool yuv;
-       u32 lddfr;
+       u32 lddfr;      /* LCD Data Format Register */
+       u16 ldbbsifr;   /* CHn Source Image Format Register low bits */
+       u8 ldddsr;      /* LCDC Input Image Data Swap Register low bits */
+       u8 bpp;
 };
 
+#define shmob_drm_format_is_yuv(format)        ((format)->lddfr & LDDFR_CC)
+
 const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc);
 
 int shmob_drm_modeset_init(struct shmob_drm_device *sdev);
index 850986cee848226a1b36f06de195a81e22f8e9a3..8f9a728affde8375fb71633426072cb0f17c0d6c 100644 (file)
@@ -7,11 +7,14 @@
  * Laurent Pinchart (laurent.pinchart@ideasonboard.com)
  */
 
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_fb_dma_helper.h>
 #include <drm/drm_fourcc.h>
 #include <drm/drm_framebuffer.h>
 #include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_plane_helper.h>
 
 #include "shmob_drm_drv.h"
 #include "shmob_drm_kms.h"
 #include "shmob_drm_regs.h"
 
 struct shmob_drm_plane {
-       struct drm_plane plane;
+       struct drm_plane base;
        unsigned int index;
-       unsigned int alpha;
+};
+
+struct shmob_drm_plane_state {
+       struct drm_plane_state base;
 
        const struct shmob_drm_format_info *format;
-       unsigned long dma[2];
-
-       unsigned int src_x;
-       unsigned int src_y;
-       unsigned int crtc_x;
-       unsigned int crtc_y;
-       unsigned int crtc_w;
-       unsigned int crtc_h;
+       u32 dma[2];
 };
 
-#define to_shmob_plane(p)      container_of(p, struct shmob_drm_plane, plane)
+static inline struct shmob_drm_plane *to_shmob_plane(struct drm_plane *plane)
+{
+       return container_of(plane, struct shmob_drm_plane, base);
+}
+
+static inline struct shmob_drm_plane_state *to_shmob_plane_state(struct drm_plane_state *state)
+{
+       return container_of(state, struct shmob_drm_plane_state, base);
+}
 
-static void shmob_drm_plane_compute_base(struct shmob_drm_plane *splane,
-                                        struct drm_framebuffer *fb,
-                                        int x, int y)
+static void shmob_drm_plane_compute_base(struct shmob_drm_plane_state *sstate)
 {
+       struct drm_framebuffer *fb = sstate->base.fb;
+       unsigned int x = sstate->base.src_x >> 16;
+       unsigned int y = sstate->base.src_y >> 16;
        struct drm_gem_dma_object *gem;
        unsigned int bpp;
 
-       bpp = splane->format->yuv ? 8 : splane->format->bpp;
+       bpp = shmob_drm_format_is_yuv(sstate->format) ? 8 : sstate->format->bpp;
        gem = drm_fb_dma_get_gem_obj(fb, 0);
-       splane->dma[0] = gem->dma_addr + fb->offsets[0]
+       sstate->dma[0] = gem->dma_addr + fb->offsets[0]
                       + y * fb->pitches[0] + x * bpp / 8;
 
-       if (splane->format->yuv) {
-               bpp = splane->format->bpp - 8;
+       if (shmob_drm_format_is_yuv(sstate->format)) {
+               bpp = sstate->format->bpp - 8;
                gem = drm_fb_dma_get_gem_obj(fb, 1);
-               splane->dma[1] = gem->dma_addr + fb->offsets[1]
+               sstate->dma[1] = gem->dma_addr + fb->offsets[1]
                               + y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
                               + x * (bpp == 16 ? 2 : 1);
        }
 }
 
-static void __shmob_drm_plane_setup(struct shmob_drm_plane *splane,
-                                   struct drm_framebuffer *fb)
+static void shmob_drm_primary_plane_setup(struct shmob_drm_plane *splane,
+                                         struct drm_plane_state *state)
 {
-       struct shmob_drm_device *sdev = splane->plane.dev->dev_private;
+       struct shmob_drm_plane_state *sstate = to_shmob_plane_state(state);
+       struct shmob_drm_device *sdev = to_shmob_device(splane->base.dev);
+       struct drm_framebuffer *fb = state->fb;
+
+       /* TODO: Handle YUV colorspaces. Hardcode REC709 for now. */
+       lcdc_write(sdev, LDDFR, sstate->format->lddfr | LDDFR_CF1);
+       lcdc_write(sdev, LDMLSR, fb->pitches[0]);
+
+       /* Word and long word swap. */
+       lcdc_write(sdev, LDDDSR, sstate->format->ldddsr);
+
+       lcdc_write_mirror(sdev, LDSA1R, sstate->dma[0]);
+       if (shmob_drm_format_is_yuv(sstate->format))
+               lcdc_write_mirror(sdev, LDSA2R, sstate->dma[1]);
+
+       lcdc_write(sdev, LDRCNTR, lcdc_read(sdev, LDRCNTR) ^ LDRCNTR_MRS);
+}
+
+static void shmob_drm_overlay_plane_setup(struct shmob_drm_plane *splane,
+                                         struct drm_plane_state *state)
+{
+       struct shmob_drm_plane_state *sstate = to_shmob_plane_state(state);
+       struct shmob_drm_device *sdev = to_shmob_device(splane->base.dev);
+       struct drm_framebuffer *fb = state->fb;
        u32 format;
 
        /* TODO: Support ROP3 mode */
-       format = LDBBSIFR_EN | (splane->alpha << LDBBSIFR_LAY_SHIFT);
-
-       switch (splane->format->fourcc) {
-       case DRM_FORMAT_RGB565:
-       case DRM_FORMAT_NV21:
-       case DRM_FORMAT_NV61:
-       case DRM_FORMAT_NV42:
-               format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW;
-               break;
-       case DRM_FORMAT_RGB888:
-       case DRM_FORMAT_NV12:
-       case DRM_FORMAT_NV16:
-       case DRM_FORMAT_NV24:
-               format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW | LDBBSIFR_SWPB;
-               break;
-       case DRM_FORMAT_ARGB8888:
-       case DRM_FORMAT_XRGB8888:
-       default:
-               format |= LDBBSIFR_SWPL;
-               break;
-       }
-
-       switch (splane->format->fourcc) {
-       case DRM_FORMAT_RGB565:
-               format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB16;
-               break;
-       case DRM_FORMAT_RGB888:
-               format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB24;
-               break;
-       case DRM_FORMAT_ARGB8888:
-               format |= LDBBSIFR_AL_PK | LDBBSIFR_RY | LDDFR_PKF_ARGB32;
-               break;
-       case DRM_FORMAT_XRGB8888:
-               format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDDFR_PKF_ARGB32;
-               break;
-       case DRM_FORMAT_NV12:
-       case DRM_FORMAT_NV21:
-               format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_420;
-               break;
-       case DRM_FORMAT_NV16:
-       case DRM_FORMAT_NV61:
-               format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_422;
-               break;
-       case DRM_FORMAT_NV24:
-       case DRM_FORMAT_NV42:
-               format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_444;
-               break;
-       }
+       format = LDBBSIFR_EN | ((state->alpha >> 8) << LDBBSIFR_LAY_SHIFT) |
+                sstate->format->ldbbsifr;
 
 #define plane_reg_dump(sdev, splane, reg) \
-       dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x 0x%08x\n", __func__, \
+       dev_dbg(sdev->ddev.dev, "%s(%u): %s 0x%08x 0x%08x\n", __func__, \
                splane->index, #reg, \
                lcdc_read(sdev, reg(splane->index)), \
                lcdc_read(sdev, reg(splane->index) + LCDC_SIDE_B_OFFSET))
@@ -127,29 +112,27 @@ static void __shmob_drm_plane_setup(struct shmob_drm_plane *splane,
        plane_reg_dump(sdev, splane, LDBnBSACR);
 
        lcdc_write(sdev, LDBCR, LDBCR_UPC(splane->index));
-       dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x\n", __func__, splane->index,
+       dev_dbg(sdev->ddev.dev, "%s(%u): %s 0x%08x\n", __func__, splane->index,
                "LDBCR", lcdc_read(sdev, LDBCR));
 
        lcdc_write(sdev, LDBnBSIFR(splane->index), format);
 
        lcdc_write(sdev, LDBnBSSZR(splane->index),
-                  (splane->crtc_h << LDBBSSZR_BVSS_SHIFT) |
-                  (splane->crtc_w << LDBBSSZR_BHSS_SHIFT));
+                  (state->crtc_h << LDBBSSZR_BVSS_SHIFT) |
+                  (state->crtc_w << LDBBSSZR_BHSS_SHIFT));
        lcdc_write(sdev, LDBnBLOCR(splane->index),
-                  (splane->crtc_y << LDBBLOCR_CVLC_SHIFT) |
-                  (splane->crtc_x << LDBBLOCR_CHLC_SHIFT));
+                  (state->crtc_y << LDBBLOCR_CVLC_SHIFT) |
+                  (state->crtc_x << LDBBLOCR_CHLC_SHIFT));
        lcdc_write(sdev, LDBnBSMWR(splane->index),
                   fb->pitches[0] << LDBBSMWR_BSMW_SHIFT);
 
-       shmob_drm_plane_compute_base(splane, fb, splane->src_x, splane->src_y);
-
-       lcdc_write(sdev, LDBnBSAYR(splane->index), splane->dma[0]);
-       if (splane->format->yuv)
-               lcdc_write(sdev, LDBnBSACR(splane->index), splane->dma[1]);
+       lcdc_write(sdev, LDBnBSAYR(splane->index), sstate->dma[0]);
+       if (shmob_drm_format_is_yuv(sstate->format))
+               lcdc_write(sdev, LDBnBSACR(splane->index), sstate->dma[1]);
 
        lcdc_write(sdev, LDBCR,
                   LDBCR_UPF(splane->index) | LDBCR_UPD(splane->index));
-       dev_dbg(sdev->ddev->dev, "%s(%u): %s 0x%08x\n", __func__, splane->index,
+       dev_dbg(sdev->ddev.dev, "%s(%u): %s 0x%08x\n", __func__, splane->index,
                "LDBCR", lcdc_read(sdev, LDBCR));
 
        plane_reg_dump(sdev, splane, LDBnBSIFR);
@@ -160,75 +143,143 @@ static void __shmob_drm_plane_setup(struct shmob_drm_plane *splane,
        plane_reg_dump(sdev, splane, LDBnBSACR);
 }
 
-void shmob_drm_plane_setup(struct drm_plane *plane)
+static int shmob_drm_plane_atomic_check(struct drm_plane *plane,
+                                       struct drm_atomic_state *state)
 {
-       struct shmob_drm_plane *splane = to_shmob_plane(plane);
+       struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+       struct shmob_drm_plane_state *sstate = to_shmob_plane_state(new_plane_state);
+       struct drm_crtc_state *crtc_state;
+       bool is_primary = plane->type == DRM_PLANE_TYPE_PRIMARY;
+       int ret;
 
-       if (plane->fb == NULL)
-               return;
+       if (!new_plane_state->crtc) {
+               /*
+                * The visible field is not reset by the DRM core but only
+                * updated by drm_atomic_helper_check_plane_state(), set it
+                * manually.
+                */
+               new_plane_state->visible = false;
+               sstate->format = NULL;
+               return 0;
+       }
 
-       __shmob_drm_plane_setup(splane, plane->fb);
-}
+       crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc);
+       if (IS_ERR(crtc_state))
+               return PTR_ERR(crtc_state);
 
-static int
-shmob_drm_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
-                      struct drm_framebuffer *fb, int crtc_x, int crtc_y,
-                      unsigned int crtc_w, unsigned int crtc_h,
-                      uint32_t src_x, uint32_t src_y,
-                      uint32_t src_w, uint32_t src_h,
-                      struct drm_modeset_acquire_ctx *ctx)
-{
-       struct shmob_drm_plane *splane = to_shmob_plane(plane);
-       struct shmob_drm_device *sdev = plane->dev->dev_private;
-       const struct shmob_drm_format_info *format;
+       ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+                                                 DRM_PLANE_NO_SCALING,
+                                                 DRM_PLANE_NO_SCALING,
+                                                 !is_primary, true);
+       if (ret < 0)
+               return ret;
 
-       format = shmob_drm_format_info(fb->format->format);
-       if (format == NULL) {
-               dev_dbg(sdev->dev, "update_plane: unsupported format %08x\n",
-                       fb->format->format);
-               return -EINVAL;
+       if (!new_plane_state->visible) {
+               sstate->format = NULL;
+               return 0;
        }
 
-       if (src_w >> 16 != crtc_w || src_h >> 16 != crtc_h) {
-               dev_dbg(sdev->dev, "%s: scaling not supported\n", __func__);
+       sstate->format = shmob_drm_format_info(new_plane_state->fb->format->format);
+       if (!sstate->format) {
+               dev_dbg(plane->dev->dev,
+                       "plane_atomic_check: unsupported format %p4cc\n",
+                       &new_plane_state->fb->format->format);
                return -EINVAL;
        }
 
-       splane->format = format;
+       shmob_drm_plane_compute_base(sstate);
 
-       splane->src_x = src_x >> 16;
-       splane->src_y = src_y >> 16;
-       splane->crtc_x = crtc_x;
-       splane->crtc_y = crtc_y;
-       splane->crtc_w = crtc_w;
-       splane->crtc_h = crtc_h;
-
-       __shmob_drm_plane_setup(splane, fb);
        return 0;
 }
 
-static int shmob_drm_plane_disable(struct drm_plane *plane,
-                                  struct drm_modeset_acquire_ctx *ctx)
+static void shmob_drm_plane_atomic_update(struct drm_plane *plane,
+                                         struct drm_atomic_state *state)
 {
+       struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
        struct shmob_drm_plane *splane = to_shmob_plane(plane);
-       struct shmob_drm_device *sdev = plane->dev->dev_private;
 
-       splane->format = NULL;
+       if (!new_plane_state->visible)
+               return;
 
+       if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+               shmob_drm_primary_plane_setup(splane, new_plane_state);
+       else
+               shmob_drm_overlay_plane_setup(splane, new_plane_state);
+}
+
+static void shmob_drm_plane_atomic_disable(struct drm_plane *plane,
+                                          struct drm_atomic_state *state)
+{
+       struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
+       struct shmob_drm_device *sdev = to_shmob_device(plane->dev);
+       struct shmob_drm_plane *splane = to_shmob_plane(plane);
+
+       if (!old_state->crtc)
+               return;
+
+       if (plane->type != DRM_PLANE_TYPE_OVERLAY)
+               return;
+
+       lcdc_write(sdev, LDBCR, LDBCR_UPC(splane->index));
        lcdc_write(sdev, LDBnBSIFR(splane->index), 0);
-       return 0;
+       lcdc_write(sdev, LDBCR,
+                        LDBCR_UPF(splane->index) | LDBCR_UPD(splane->index));
 }
 
-static void shmob_drm_plane_destroy(struct drm_plane *plane)
+static struct drm_plane_state *
+shmob_drm_plane_atomic_duplicate_state(struct drm_plane *plane)
 {
-       drm_plane_force_disable(plane);
-       drm_plane_cleanup(plane);
+       struct shmob_drm_plane_state *state;
+       struct shmob_drm_plane_state *copy;
+
+       if (WARN_ON(!plane->state))
+               return NULL;
+
+       state = to_shmob_plane_state(plane->state);
+       copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
+       if (copy == NULL)
+               return NULL;
+
+       __drm_atomic_helper_plane_duplicate_state(plane, &copy->base);
+
+       return &copy->base;
 }
 
+static void shmob_drm_plane_atomic_destroy_state(struct drm_plane *plane,
+                                                struct drm_plane_state *state)
+{
+       __drm_atomic_helper_plane_destroy_state(state);
+       kfree(to_shmob_plane_state(state));
+}
+
+static void shmob_drm_plane_reset(struct drm_plane *plane)
+{
+       struct shmob_drm_plane_state *state;
+
+       if (plane->state) {
+               shmob_drm_plane_atomic_destroy_state(plane, plane->state);
+               plane->state = NULL;
+       }
+
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       if (state == NULL)
+               return;
+
+       __drm_atomic_helper_plane_reset(plane, &state->base);
+}
+
+static const struct drm_plane_helper_funcs shmob_drm_plane_helper_funcs = {
+       .atomic_check = shmob_drm_plane_atomic_check,
+       .atomic_update = shmob_drm_plane_atomic_update,
+       .atomic_disable = shmob_drm_plane_atomic_disable,
+};
+
 static const struct drm_plane_funcs shmob_drm_plane_funcs = {
-       .update_plane = shmob_drm_plane_update,
-       .disable_plane = shmob_drm_plane_disable,
-       .destroy = shmob_drm_plane_destroy,
+       .update_plane = drm_atomic_helper_update_plane,
+       .disable_plane = drm_atomic_helper_disable_plane,
+       .reset = shmob_drm_plane_reset,
+       .atomic_duplicate_state = shmob_drm_plane_atomic_duplicate_state,
+       .atomic_destroy_state = shmob_drm_plane_atomic_destroy_state,
 };
 
 static const uint32_t formats[] = {
@@ -244,22 +295,23 @@ static const uint32_t formats[] = {
        DRM_FORMAT_NV42,
 };
 
-int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index)
+struct drm_plane *shmob_drm_plane_create(struct shmob_drm_device *sdev,
+                                        enum drm_plane_type type,
+                                        unsigned int index)
 {
        struct shmob_drm_plane *splane;
-       int ret;
 
-       splane = devm_kzalloc(sdev->dev, sizeof(*splane), GFP_KERNEL);
-       if (splane == NULL)
-               return -ENOMEM;
+       splane = drmm_universal_plane_alloc(&sdev->ddev,
+                                           struct shmob_drm_plane, base, 1,
+                                           &shmob_drm_plane_funcs, formats,
+                                           ARRAY_SIZE(formats),  NULL, type,
+                                           NULL);
+       if (IS_ERR(splane))
+               return ERR_CAST(splane);
 
        splane->index = index;
-       splane->alpha = 255;
 
-       ret = drm_universal_plane_init(sdev->ddev, &splane->plane, 1,
-                                      &shmob_drm_plane_funcs,
-                                      formats, ARRAY_SIZE(formats), NULL,
-                                      DRM_PLANE_TYPE_OVERLAY, NULL);
+       drm_plane_helper_add(&splane->base, &shmob_drm_plane_helper_funcs);
 
-       return ret;
+       return &splane->base;
 }
index e72b21a4288fc23f8da1a19eff6a0edf3d36a4c3..dcfddd605899b05d5a8181226293d669b2208056 100644 (file)
@@ -13,7 +13,8 @@
 struct drm_plane;
 struct shmob_drm_device;
 
-int shmob_drm_plane_create(struct shmob_drm_device *sdev, unsigned int index);
-void shmob_drm_plane_setup(struct drm_plane *plane);
+struct drm_plane *shmob_drm_plane_create(struct shmob_drm_device *sdev,
+                                        enum drm_plane_type type,
+                                        unsigned int index);
 
 #endif /* __SHMOB_DRM_PLANE_H__ */
index 32f0857aec9f195e94b6fbd55cd9bd15776525a7..e0174f82e353718507bc46dd31d2fa75e01c2626 100644 (file)
@@ -910,7 +910,7 @@ static int ssd132x_primary_plane_atomic_check(struct drm_plane *plane,
        struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
        struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(plane_state);
        struct drm_crtc *crtc = plane_state->crtc;
-       struct drm_crtc_state *crtc_state;
+       struct drm_crtc_state *crtc_state = NULL;
        const struct drm_format_info *fi;
        unsigned int pitch;
        int ret;
index 5d12d7beef0eb30099b2dd2a1d8894cfa11ed9bf..ade3309ae042f1fba55c55c64b110b983abaa8df 100644 (file)
@@ -26,7 +26,7 @@ struct vc4_dummy_crtc *vc4_mock_pv(struct kunit *test,
        struct vc4_crtc *vc4_crtc;
        int ret;
 
-       dummy_crtc = kunit_kzalloc(test, sizeof(*dummy_crtc), GFP_KERNEL);
+       dummy_crtc = drmm_kzalloc(drm, sizeof(*dummy_crtc), GFP_KERNEL);
        KUNIT_ASSERT_NOT_NULL(test, dummy_crtc);
 
        vc4_crtc = &dummy_crtc->crtc;
index 6e11fcc9ef45e0647aa19d7fe53866a6ac6f3a3a..e70d7c3076acf168782c48301f3b3dfb9be21f22 100644 (file)
@@ -32,7 +32,7 @@ struct vc4_dummy_output *vc4_dummy_output(struct kunit *test,
        struct drm_encoder *enc;
        int ret;
 
-       dummy_output = kunit_kzalloc(test, sizeof(*dummy_output), GFP_KERNEL);
+       dummy_output = drmm_kzalloc(drm, sizeof(*dummy_output), GFP_KERNEL);
        KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dummy_output);
        dummy_output->encoder.type = vc4_encoder_type;
 
index fd887f76767544895e35e7812652dfdbed09fb94..28eb48dd5b3262b0ae05a0e2946b09b294e012a3 100644 (file)
@@ -158,6 +158,7 @@ config I2C_I801
            Alder Lake (PCH)
            Raptor Lake (PCH)
            Meteor Lake (SOC and PCH)
+           Birch Stream (SOC)
 
          This driver can also be built as a module.  If so, the module
          will be called i2c-i801.
index db45554327aedcb7c92d76d774fe624ab9608f57..dc52b35307256d663ac8167630492bbc97e8b4f2 100644 (file)
@@ -221,11 +221,10 @@ static int at91_twi_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, dev);
 
-       dev->clk = devm_clk_get(dev->dev, NULL);
+       dev->clk = devm_clk_get_enabled(dev->dev, NULL);
        if (IS_ERR(dev->clk))
-               return dev_err_probe(dev->dev, PTR_ERR(dev->clk), "no clock defined\n");
-
-       clk_prepare_enable(dev->clk);
+               return dev_err_probe(dev->dev, PTR_ERR(dev->clk),
+                                    "failed to enable clock\n");
 
        snprintf(dev->adapter.name, sizeof(dev->adapter.name), "AT91");
        i2c_set_adapdata(&dev->adapter, dev);
@@ -254,8 +253,6 @@ static int at91_twi_probe(struct platform_device *pdev)
 
        rc = i2c_add_numbered_adapter(&dev->adapter);
        if (rc) {
-               clk_disable_unprepare(dev->clk);
-
                pm_runtime_disable(dev->dev);
                pm_runtime_set_suspended(dev->dev);
 
@@ -272,7 +269,6 @@ static void at91_twi_remove(struct platform_device *pdev)
        struct at91_twi_dev *dev = platform_get_drvdata(pdev);
 
        i2c_del_adapter(&dev->adapter);
-       clk_disable_unprepare(dev->clk);
 
        pm_runtime_disable(dev->dev);
        pm_runtime_set_suspended(dev->dev);
index d7f1e98777ace48b929c3eccd51f841b805d27d7..a66f7f67b3b880b97a3365573873da18c174b6f7 100644 (file)
  * @i2c_clk: clock reference for i2c input clock
  * @bus_clk_rate: current i2c bus clock rate
  * @last: a flag indicating is this is last message in transfer
+ * @slave: associated &i2c_client
+ * @irq: platform device IRQ number
  */
 struct axxia_i2c_dev {
        void __iomem *base;
@@ -165,7 +167,7 @@ static void i2c_int_enable(struct axxia_i2c_dev *idev, u32 mask)
        writel(int_en | mask, idev->base + MST_INT_ENABLE);
 }
 
-/**
+/*
  * ns_to_clk - Convert time (ns) to clock cycles for the given clock frequency.
  */
 static u32 ns_to_clk(u64 ns, u32 clk_mhz)
@@ -263,7 +265,7 @@ static int i2c_m_recv_len(const struct i2c_msg *msg)
        return (msg->flags & I2C_M_RECV_LEN) != 0;
 }
 
-/**
+/*
  * axxia_i2c_empty_rx_fifo - Fetch data from RX FIFO and update SMBus block
  * transfer length if this is the first byte of such a transfer.
  */
@@ -295,7 +297,7 @@ static int axxia_i2c_empty_rx_fifo(struct axxia_i2c_dev *idev)
        return 0;
 }
 
-/**
+/*
  * axxia_i2c_fill_tx_fifo - Fill TX FIFO from current message buffer.
  * @return: Number of bytes left to transfer.
  */
index 51aab662050b1fc795dbaf578d27b36fdfb75c92..e905734c26a049c890b9e50a5270a8c9ee028502 100644 (file)
@@ -316,26 +316,44 @@ static void bcm_iproc_i2c_slave_init(
        iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
 }
 
-static void bcm_iproc_i2c_check_slave_status(
-       struct bcm_iproc_i2c_dev *iproc_i2c)
+static bool bcm_iproc_i2c_check_slave_status
+       (struct bcm_iproc_i2c_dev *iproc_i2c, u32 status)
 {
        u32 val;
+       bool recover = false;
 
-       val = iproc_i2c_rd_reg(iproc_i2c, S_CMD_OFFSET);
-       /* status is valid only when START_BUSY is cleared after it was set */
-       if (val & BIT(S_CMD_START_BUSY_SHIFT))
-               return;
+       /* check slave transmit status only if slave is transmitting */
+       if (!iproc_i2c->slave_rx_only) {
+               val = iproc_i2c_rd_reg(iproc_i2c, S_CMD_OFFSET);
+               /* status is valid only when START_BUSY is cleared */
+               if (!(val & BIT(S_CMD_START_BUSY_SHIFT))) {
+                       val = (val >> S_CMD_STATUS_SHIFT) & S_CMD_STATUS_MASK;
+                       if (val == S_CMD_STATUS_TIMEOUT ||
+                           val == S_CMD_STATUS_MASTER_ABORT) {
+                               dev_warn(iproc_i2c->device,
+                                        (val == S_CMD_STATUS_TIMEOUT) ?
+                                        "slave random stretch time timeout\n" :
+                                        "Master aborted read transaction\n");
+                               recover = true;
+                       }
+               }
+       }
+
+       /* RX_EVENT is not valid when START_BUSY is set */
+       if ((status & BIT(IS_S_RX_EVENT_SHIFT)) &&
+           (status & BIT(IS_S_START_BUSY_SHIFT))) {
+               dev_warn(iproc_i2c->device, "Slave aborted read transaction\n");
+               recover = true;
+       }
 
-       val = (val >> S_CMD_STATUS_SHIFT) & S_CMD_STATUS_MASK;
-       if (val == S_CMD_STATUS_TIMEOUT || val == S_CMD_STATUS_MASTER_ABORT) {
-               dev_err(iproc_i2c->device, (val == S_CMD_STATUS_TIMEOUT) ?
-                       "slave random stretch time timeout\n" :
-                       "Master aborted read transaction\n");
+       if (recover) {
                /* re-initialize i2c for recovery */
                bcm_iproc_i2c_enable_disable(iproc_i2c, false);
                bcm_iproc_i2c_slave_init(iproc_i2c, true);
                bcm_iproc_i2c_enable_disable(iproc_i2c, true);
        }
+
+       return recover;
 }
 
 static void bcm_iproc_i2c_slave_read(struct bcm_iproc_i2c_dev *iproc_i2c)
@@ -420,48 +438,6 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
        u32 val;
        u8 value;
 
-       /*
-        * Slave events in case of master-write, master-write-read and,
-        * master-read
-        *
-        * Master-write     : only IS_S_RX_EVENT_SHIFT event
-        * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
-        *                    events
-        * Master-read      : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
-        *                    events or only IS_S_RD_EVENT_SHIFT
-        *
-        * iproc has a slave rx fifo size of 64 bytes. Rx fifo full interrupt
-        * (IS_S_RX_FIFO_FULL_SHIFT) will be generated when RX fifo becomes
-        * full. This can happen if Master issues write requests of more than
-        * 64 bytes.
-        */
-       if (status & BIT(IS_S_RX_EVENT_SHIFT) ||
-           status & BIT(IS_S_RD_EVENT_SHIFT) ||
-           status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
-               /* disable slave interrupts */
-               val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
-               val &= ~iproc_i2c->slave_int_mask;
-               iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
-
-               if (status & BIT(IS_S_RD_EVENT_SHIFT))
-                       /* Master-write-read request */
-                       iproc_i2c->slave_rx_only = false;
-               else
-                       /* Master-write request only */
-                       iproc_i2c->slave_rx_only = true;
-
-               /* schedule tasklet to read data later */
-               tasklet_schedule(&iproc_i2c->slave_rx_tasklet);
-
-               /*
-                * clear only IS_S_RX_EVENT_SHIFT and
-                * IS_S_RX_FIFO_FULL_SHIFT interrupt.
-                */
-               val = BIT(IS_S_RX_EVENT_SHIFT);
-               if (status & BIT(IS_S_RX_FIFO_FULL_SHIFT))
-                       val |= BIT(IS_S_RX_FIFO_FULL_SHIFT);
-               iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, val);
-       }
 
        if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
                iproc_i2c->tx_underrun++;
@@ -493,8 +469,9 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
                 * less than PKT_LENGTH bytes were output on the SMBUS
                 */
                iproc_i2c->slave_int_mask &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
-               iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET,
-                                iproc_i2c->slave_int_mask);
+               val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+               val &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
+               iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
 
                /* End of SMBUS for Master Read */
                val = BIT(S_TX_WR_STATUS_SHIFT);
@@ -515,9 +492,49 @@ static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
                                 BIT(IS_S_START_BUSY_SHIFT));
        }
 
-       /* check slave transmit status only if slave is transmitting */
-       if (!iproc_i2c->slave_rx_only)
-               bcm_iproc_i2c_check_slave_status(iproc_i2c);
+       /* if the controller has been reset, immediately return from the ISR */
+       if (bcm_iproc_i2c_check_slave_status(iproc_i2c, status))
+               return true;
+
+       /*
+        * Slave events in case of master-write, master-write-read and,
+        * master-read
+        *
+        * Master-write     : only IS_S_RX_EVENT_SHIFT event
+        * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
+        *                    events
+        * Master-read      : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
+        *                    events or only IS_S_RD_EVENT_SHIFT
+        *
+        * iproc has a slave rx fifo size of 64 bytes. Rx fifo full interrupt
+        * (IS_S_RX_FIFO_FULL_SHIFT) will be generated when RX fifo becomes
+        * full. This can happen if Master issues write requests of more than
+        * 64 bytes.
+        */
+       if (status & BIT(IS_S_RX_EVENT_SHIFT) ||
+           status & BIT(IS_S_RD_EVENT_SHIFT) ||
+           status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
+               /* disable slave interrupts */
+               val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+               val &= ~iproc_i2c->slave_int_mask;
+               iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+
+               if (status & BIT(IS_S_RD_EVENT_SHIFT))
+                       /* Master-write-read request */
+                       iproc_i2c->slave_rx_only = false;
+               else
+                       /* Master-write request only */
+                       iproc_i2c->slave_rx_only = true;
+
+               /* schedule tasklet to read data later */
+               tasklet_schedule(&iproc_i2c->slave_rx_tasklet);
+
+               /* clear IS_S_RX_FIFO_FULL_SHIFT interrupt */
+               if (status & BIT(IS_S_RX_FIFO_FULL_SHIFT)) {
+                       val = BIT(IS_S_RX_FIFO_FULL_SHIFT);
+                       iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, val);
+               }
+       }
 
        return true;
 }
index acee7673254417c914652cecfb539b23d61440aa..38f276c99193b29a7a4a94b126b7acaa8edcdb1d 100644 (file)
@@ -160,6 +160,7 @@ struct brcmstb_i2c_dev {
        struct completion done;
        u32 clk_freq_hz;
        int data_regsz;
+       bool atomic;
 };
 
 /* register accessors for both be and le cpu arch */
@@ -240,7 +241,7 @@ static int brcmstb_i2c_wait_for_completion(struct brcmstb_i2c_dev *dev)
        int ret = 0;
        unsigned long timeout = msecs_to_jiffies(I2C_TIMEOUT);
 
-       if (dev->irq >= 0) {
+       if (dev->irq >= 0 && !dev->atomic) {
                if (!wait_for_completion_timeout(&dev->done, timeout))
                        ret = -ETIMEDOUT;
        } else {
@@ -287,7 +288,7 @@ static int brcmstb_send_i2c_cmd(struct brcmstb_i2c_dev *dev,
                return rc;
 
        /* only if we are in interrupt mode */
-       if (dev->irq >= 0)
+       if (dev->irq >= 0 && !dev->atomic)
                reinit_completion(&dev->done);
 
        /* enable BSC CTL interrupt line */
@@ -520,6 +521,23 @@ out:
 
 }
 
+static int brcmstb_i2c_xfer_atomic(struct i2c_adapter *adapter,
+                                  struct i2c_msg msgs[], int num)
+{
+       struct brcmstb_i2c_dev *dev = i2c_get_adapdata(adapter);
+       int ret;
+
+       if (dev->irq >= 0)
+               disable_irq(dev->irq);
+       dev->atomic = true;
+       ret = brcmstb_i2c_xfer(adapter, msgs, num);
+       dev->atomic = false;
+       if (dev->irq >= 0)
+               enable_irq(dev->irq);
+
+       return ret;
+}
+
 static u32 brcmstb_i2c_functionality(struct i2c_adapter *adap)
 {
        return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR
@@ -528,6 +546,7 @@ static u32 brcmstb_i2c_functionality(struct i2c_adapter *adap)
 
 static const struct i2c_algorithm brcmstb_i2c_algo = {
        .master_xfer = brcmstb_i2c_xfer,
+       .master_xfer_atomic = brcmstb_i2c_xfer_atomic,
        .functionality = brcmstb_i2c_functionality,
 };
 
index 3ded28632e4c1d52584ff4fc257b614c3c203658..cf3747d870342e3649e290c8f254aa586c36a67a 100644 (file)
@@ -85,7 +85,7 @@ static int cp2615_init_iop_msg(struct cp2615_iop_msg *ret, enum cp2615_iop_msg_t
        if (!ret)
                return -EINVAL;
 
-       ret->preamble = 0x2A2A;
+       ret->preamble = htons(0x2A2AU);
        ret->length = htons(data_len + 6);
        ret->msg = htons(msg);
        if (data && data_len)
@@ -298,7 +298,7 @@ cp2615_i2c_probe(struct usb_interface *usbif, const struct usb_device_id *id)
        if (!adap)
                return -ENOMEM;
 
-       strncpy(adap->name, usbdev->serial, sizeof(adap->name) - 1);
+       strscpy(adap->name, usbdev->serial, sizeof(adap->name));
        adap->owner = THIS_MODULE;
        adap->dev.parent = &usbif->dev;
        adap->dev.of_node = usbif->dev.of_node;
index ca1035e010c7224ca38dbf6172773be29d22b66f..85dbd0eb5392c53017ffec96bf3fca6646fa70c4 100644 (file)
@@ -519,10 +519,16 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
 
                /*
                 * Because we don't know the buffer length in the
-                * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop
-                * the transaction here.
+                * I2C_FUNC_SMBUS_BLOCK_DATA case, we can't stop the
+                * transaction here. Also disable the TX_EMPTY IRQ
+                * while waiting for the data length byte to avoid the
+                * bogus interrupts flood.
                 */
-               if (buf_len > 0 || flags & I2C_M_RECV_LEN) {
+               if (flags & I2C_M_RECV_LEN) {
+                       dev->status |= STATUS_WRITE_IN_PROGRESS;
+                       intr_mask &= ~DW_IC_INTR_TX_EMPTY;
+                       break;
+               } else if (buf_len > 0) {
                        /* more bytes to be written */
                        dev->status |= STATUS_WRITE_IN_PROGRESS;
                        break;
@@ -558,6 +564,13 @@ i2c_dw_recv_len(struct dw_i2c_dev *dev, u8 len)
        msgs[dev->msg_read_idx].len = len;
        msgs[dev->msg_read_idx].flags &= ~I2C_M_RECV_LEN;
 
+       /*
+        * Received buffer length, re-enable TX_EMPTY interrupt
+        * to resume the SMBUS transaction.
+        */
+       regmap_update_bits(dev->map, DW_IC_INTR_MASK, DW_IC_INTR_TX_EMPTY,
+                          DW_IC_INTR_TX_EMPTY);
+
        return len;
 }
 
index 2b0b9cdffa861bc123f240d9401df62048167f5f..385ef9d9e4d4c00547e69da0b64a56058b82e6e5 100644 (file)
@@ -194,6 +194,11 @@ struct exynos5_i2c {
         */
        int                     trans_done;
 
+       /*
+        * Called from atomic context, don't use interrupts.
+        */
+       unsigned int            atomic;
+
        /* Controller operating frequency */
        unsigned int            op_clock;
 
@@ -265,7 +270,7 @@ static void exynos5_i2c_clr_pend_irq(struct exynos5_i2c *i2c)
  * exynos5_i2c_set_timing: updates the registers with appropriate
  * timing values calculated
  *
- * Timing values for operation are calculated against either 100kHz
+ * Timing values for operation are calculated against 100kHz, 400kHz
  * or 1MHz controller operating frequency.
  *
  * Returns 0 on success, -EINVAL if the cycle length cannot
@@ -328,6 +333,23 @@ static int exynos5_i2c_set_timing(struct exynos5_i2c *i2c, bool hs_timings)
         *
         * Constraints: 4 <= temp, 0 <= CLK_DIV < 256, 2 <= clk_cycle <= 510
         *
+        * To split SCL clock into low, high periods appropriately, one
+        * proportion factor for each I2C mode is used, which is calculated
+        * using this formula.
+        * ```
+        * ((t_low_min + (scl_clock - t_low_min - t_high_min) / 2) / scl_clock)
+        * ```
+        * where:
+        * t_low_min is the minimal value of low period of the SCL clock in us;
+        * t_high_min is the minimal value of high period of the SCL clock in us;
+        * scl_clock is converted from SCL clock frequency into us.
+        *
+        * Below are the proportion factors for these I2C modes:
+        *                t_low_min, t_high_min, scl_clock, proportion
+        * Standard Mode:     4.7us,      4.0us,      10us,      0.535
+        * Fast Mode:         1.3us,      0.6us,     2.5us,       0.64
+        * Fast-Plus Mode:    0.5us,     0.26us,       1us,       0.62
+        *
         */
        t_ftl_cycle = (readl(i2c->regs + HSI2C_CONF) >> 16) & 0x7;
        temp = clkin / op_clk - 8 - t_ftl_cycle;
@@ -341,8 +363,19 @@ static int exynos5_i2c_set_timing(struct exynos5_i2c *i2c, bool hs_timings)
                return -EINVAL;
        }
 
-       t_scl_l = clk_cycle / 2;
-       t_scl_h = clk_cycle / 2;
+       /*
+        * Scale clk_cycle to get t_scl_l using the proption factors for individual I2C modes.
+        */
+       if (op_clk <= I2C_MAX_STANDARD_MODE_FREQ)
+               t_scl_l = clk_cycle * 535 / 1000;
+       else if (op_clk <= I2C_MAX_FAST_MODE_FREQ)
+               t_scl_l = clk_cycle * 64 / 100;
+       else
+               t_scl_l = clk_cycle * 62 / 100;
+
+       if (t_scl_l > 0xFF)
+               t_scl_l = 0xFF;
+       t_scl_h = clk_cycle - t_scl_l;
        t_start_su = t_scl_l;
        t_start_hd = t_scl_l;
        t_stop_su = t_scl_l;
@@ -711,6 +744,22 @@ static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop)
        spin_unlock_irqrestore(&i2c->lock, flags);
 }
 
+static bool exynos5_i2c_poll_irqs_timeout(struct exynos5_i2c *i2c,
+                                         unsigned long timeout)
+{
+       unsigned long time_left = jiffies + timeout;
+
+       while (time_before(jiffies, time_left) &&
+              !((i2c->trans_done && (i2c->msg->len == i2c->msg_ptr)) ||
+                (i2c->state < 0))) {
+               while (readl(i2c->regs + HSI2C_INT_ENABLE) &
+                      readl(i2c->regs + HSI2C_INT_STATUS))
+                       exynos5_i2c_irq(i2c->irq, i2c);
+               usleep_range(100, 200);
+       }
+       return time_before(jiffies, time_left);
+}
+
 static int exynos5_i2c_xfer_msg(struct exynos5_i2c *i2c,
                              struct i2c_msg *msgs, int stop)
 {
@@ -725,8 +774,13 @@ static int exynos5_i2c_xfer_msg(struct exynos5_i2c *i2c,
 
        exynos5_i2c_message_start(i2c, stop);
 
-       timeout = wait_for_completion_timeout(&i2c->msg_complete,
-                                             EXYNOS5_I2C_TIMEOUT);
+       if (!i2c->atomic)
+               timeout = wait_for_completion_timeout(&i2c->msg_complete,
+                                                     EXYNOS5_I2C_TIMEOUT);
+       else
+               timeout = exynos5_i2c_poll_irqs_timeout(i2c,
+                                                       EXYNOS5_I2C_TIMEOUT);
+
        if (timeout == 0)
                ret = -ETIMEDOUT;
        else
@@ -777,6 +831,21 @@ err_pclk:
        return ret ?: num;
 }
 
+static int exynos5_i2c_xfer_atomic(struct i2c_adapter *adap,
+                                  struct i2c_msg *msgs, int num)
+{
+       struct exynos5_i2c *i2c = adap->algo_data;
+       int ret;
+
+       disable_irq(i2c->irq);
+       i2c->atomic = true;
+       ret = exynos5_i2c_xfer(adap, msgs, num);
+       i2c->atomic = false;
+       enable_irq(i2c->irq);
+
+       return ret;
+}
+
 static u32 exynos5_i2c_func(struct i2c_adapter *adap)
 {
        return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
@@ -784,6 +853,7 @@ static u32 exynos5_i2c_func(struct i2c_adapter *adap)
 
 static const struct i2c_algorithm exynos5_i2c_algorithm = {
        .master_xfer            = exynos5_i2c_xfer,
+       .master_xfer_atomic     = exynos5_i2c_xfer_atomic,
        .functionality          = exynos5_i2c_func,
 };
 
index e5a5b9e8bf2c760719eafa5298cf22755c9f25a0..fb35a75fe0e32f54bac273b4a24404c16bf7e663 100644 (file)
@@ -263,15 +263,10 @@ static void i2c_gpio_fault_injector_init(struct platform_device *pdev)
         * 'fault-injector' dir there. Until then, we have a global dir with
         * all adapters as subdirs.
         */
-       if (!i2c_gpio_debug_dir) {
+       if (!i2c_gpio_debug_dir)
                i2c_gpio_debug_dir = debugfs_create_dir("i2c-fault-injector", NULL);
-               if (!i2c_gpio_debug_dir)
-                       return;
-       }
 
        priv->debug_dir = debugfs_create_dir(pdev->name, i2c_gpio_debug_dir);
-       if (!priv->debug_dir)
-               return;
 
        init_completion(&priv->scl_irq_completion);
 
index 1d855258a45dc3c68c71280cb791f77383f38457..070999139c6dcbe24ddde994543cf1ec9df74e82 100644 (file)
@@ -79,6 +79,7 @@
  * Meteor Lake-P (SOC)         0x7e22  32      hard    yes     yes     yes
  * Meteor Lake SoC-S (SOC)     0xae22  32      hard    yes     yes     yes
  * Meteor Lake PCH-S (PCH)     0x7f23  32      hard    yes     yes     yes
+ * Birch Stream (SOC)          0x5796  32      hard    yes     yes     yes
  *
  * Features supported by this driver:
  * Software PEC                                no
 #define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS          0x4da3
 #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS         0x51a3
 #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS         0x54a3
+#define PCI_DEVICE_ID_INTEL_BIRCH_STREAM_SMBUS         0x5796
 #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS              0x5ad4
 #define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_S_SMBUS                0x7a23
 #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS         0x7aa3
@@ -285,7 +287,6 @@ struct i801_priv {
        u8 *data;
 
 #if IS_ENABLED(CONFIG_I2C_MUX_GPIO) && defined CONFIG_DMI
-       const struct i801_mux_config *mux_drvdata;
        struct platform_device *mux_pdev;
        struct gpiod_lookup_table *lookup;
 #endif
@@ -293,10 +294,9 @@ struct i801_priv {
 
        /*
         * If set to true the host controller registers are reserved for
-        * ACPI AML use. Protected by acpi_lock.
+        * ACPI AML use.
         */
        bool acpi_reserved;
-       struct mutex acpi_lock;
 };
 
 #define FEATURE_SMBUS_PEC      BIT(0)
@@ -679,15 +679,11 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
                return result ? priv->status : -ETIMEDOUT;
        }
 
-       for (i = 1; i <= len; i++) {
-               if (i == len && read_write == I2C_SMBUS_READ)
-                       smbcmd |= SMBHSTCNT_LAST_BYTE;
-               outb_p(smbcmd, SMBHSTCNT(priv));
-
-               if (i == 1)
-                       outb_p(inb(SMBHSTCNT(priv)) | SMBHSTCNT_START,
-                              SMBHSTCNT(priv));
+       if (len == 1 && read_write == I2C_SMBUS_READ)
+               smbcmd |= SMBHSTCNT_LAST_BYTE;
+       outb_p(smbcmd | SMBHSTCNT_START, SMBHSTCNT(priv));
 
+       for (i = 1; i <= len; i++) {
                status = i801_wait_byte_done(priv);
                if (status)
                        return status;
@@ -710,9 +706,12 @@ static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
                        data->block[0] = len;
                }
 
-               /* Retrieve/store value in SMBBLKDAT */
-               if (read_write == I2C_SMBUS_READ)
+               if (read_write == I2C_SMBUS_READ) {
                        data->block[i] = inb_p(SMBBLKDAT(priv));
+                       if (i == len - 1)
+                               outb_p(smbcmd | SMBHSTCNT_LAST_BYTE, SMBHSTCNT(priv));
+               }
+
                if (read_write == I2C_SMBUS_WRITE && i+1 <= len)
                        outb_p(data->block[i+1], SMBBLKDAT(priv));
 
@@ -875,11 +874,8 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
        int hwpec, ret;
        struct i801_priv *priv = i2c_get_adapdata(adap);
 
-       mutex_lock(&priv->acpi_lock);
-       if (priv->acpi_reserved) {
-               mutex_unlock(&priv->acpi_lock);
+       if (priv->acpi_reserved)
                return -EBUSY;
-       }
 
        pm_runtime_get_sync(&priv->pci_dev->dev);
 
@@ -920,7 +916,6 @@ out:
 
        pm_runtime_mark_last_busy(&priv->pci_dev->dev);
        pm_runtime_put_autosuspend(&priv->pci_dev->dev);
-       mutex_unlock(&priv->acpi_lock);
        return ret;
 }
 
@@ -1044,6 +1039,7 @@ static const struct pci_device_id i801_ids[] = {
        { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_P_SMBUS,           FEATURES_ICH5 | FEATURE_TCO_CNL) },
        { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_SOC_S_SMBUS,       FEATURES_ICH5 | FEATURE_TCO_CNL) },
        { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS,       FEATURES_ICH5 | FEATURE_TCO_CNL) },
+       { PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS,            FEATURES_ICH5 | FEATURE_TCO_CNL) },
        { 0, }
 };
 
@@ -1288,7 +1284,7 @@ static void i801_probe_optional_slaves(struct i801_priv *priv)
 
        /* Instantiate SPD EEPROMs unless the SMBus is multiplexed */
 #if IS_ENABLED(CONFIG_I2C_MUX_GPIO)
-       if (!priv->mux_drvdata)
+       if (!priv->mux_pdev)
 #endif
                i2c_register_spd(&priv->adapter);
 }
@@ -1390,11 +1386,14 @@ static void i801_add_mux(struct i801_priv *priv)
        const struct i801_mux_config *mux_config;
        struct i2c_mux_gpio_platform_data gpio_data;
        struct gpiod_lookup_table *lookup;
+       const struct dmi_system_id *id;
        int i;
 
-       if (!priv->mux_drvdata)
+       id = dmi_first_match(mux_dmi_table);
+       if (!id)
                return;
-       mux_config = priv->mux_drvdata;
+
+       mux_config = id->driver_data;
 
        /* Prepare the platform data */
        memset(&gpio_data, 0, sizeof(struct i2c_mux_gpio_platform_data));
@@ -1438,35 +1437,9 @@ static void i801_del_mux(struct i801_priv *priv)
        platform_device_unregister(priv->mux_pdev);
        gpiod_remove_lookup_table(priv->lookup);
 }
-
-static unsigned int i801_get_adapter_class(struct i801_priv *priv)
-{
-       const struct dmi_system_id *id;
-       const struct i801_mux_config *mux_config;
-       unsigned int class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
-       int i;
-
-       id = dmi_first_match(mux_dmi_table);
-       if (id) {
-               /* Remove branch classes from trunk */
-               mux_config = id->driver_data;
-               for (i = 0; i < mux_config->n_values; i++)
-                       class &= ~mux_config->classes[i];
-
-               /* Remember for later */
-               priv->mux_drvdata = mux_config;
-       }
-
-       return class;
-}
 #else
 static inline void i801_add_mux(struct i801_priv *priv) { }
 static inline void i801_del_mux(struct i801_priv *priv) { }
-
-static inline unsigned int i801_get_adapter_class(struct i801_priv *priv)
-{
-       return I2C_CLASS_HWMON | I2C_CLASS_SPD;
-}
 #endif
 
 static struct platform_device *
@@ -1572,7 +1545,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
         * further access from the driver itself. This device is now owned
         * by the system firmware.
         */
-       mutex_lock(&priv->acpi_lock);
+       i2c_lock_bus(&priv->adapter, I2C_LOCK_SEGMENT);
 
        if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) {
                priv->acpi_reserved = true;
@@ -1592,7 +1565,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
        else
                status = acpi_os_write_port(address, (u32)*value, bits);
 
-       mutex_unlock(&priv->acpi_lock);
+       i2c_unlock_bus(&priv->adapter, I2C_LOCK_SEGMENT);
 
        return status;
 }
@@ -1630,6 +1603,12 @@ static void i801_setup_hstcfg(struct i801_priv *priv)
        pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hstcfg);
 }
 
+static void i801_restore_regs(struct i801_priv *priv)
+{
+       outb_p(priv->original_hstcnt, SMBHSTCNT(priv));
+       pci_write_config_byte(priv->pci_dev, SMBHSTCFG, priv->original_hstcfg);
+}
+
 static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
        int err, i;
@@ -1641,12 +1620,11 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
 
        i2c_set_adapdata(&priv->adapter, priv);
        priv->adapter.owner = THIS_MODULE;
-       priv->adapter.class = i801_get_adapter_class(priv);
+       priv->adapter.class = I2C_CLASS_HWMON;
        priv->adapter.algo = &smbus_algorithm;
        priv->adapter.dev.parent = &dev->dev;
-       ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev));
+       acpi_use_parent_companion(&priv->adapter.dev);
        priv->adapter.retries = 3;
-       mutex_init(&priv->acpi_lock);
 
        priv->pci_dev = dev;
        priv->features = id->driver_data;
@@ -1756,6 +1734,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
        if (err) {
                platform_device_unregister(priv->tco_pdev);
                i801_acpi_remove(priv);
+               i801_restore_regs(priv);
                return err;
        }
 
@@ -1780,12 +1759,10 @@ static void i801_remove(struct pci_dev *dev)
 {
        struct i801_priv *priv = pci_get_drvdata(dev);
 
-       outb_p(priv->original_hstcnt, SMBHSTCNT(priv));
        i801_disable_host_notify(priv);
        i801_del_mux(priv);
        i2c_del_adapter(&priv->adapter);
        i801_acpi_remove(priv);
-       pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
 
        platform_device_unregister(priv->tco_pdev);
 
@@ -1793,6 +1770,8 @@ static void i801_remove(struct pci_dev *dev)
        if (!priv->acpi_reserved)
                pm_runtime_get_noresume(&dev->dev);
 
+       i801_restore_regs(priv);
+
        /*
         * do not call pci_disable_device(dev) since it can cause hard hangs on
         * some systems during power-off (eg. Fujitsu-Siemens Lifebook E8010)
@@ -1803,18 +1782,18 @@ static void i801_shutdown(struct pci_dev *dev)
 {
        struct i801_priv *priv = pci_get_drvdata(dev);
 
-       /* Restore config registers to avoid hard hang on some systems */
-       outb_p(priv->original_hstcnt, SMBHSTCNT(priv));
        i801_disable_host_notify(priv);
-       pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
+       /* Restore config registers to avoid hard hang on some systems */
+       i801_restore_regs(priv);
 }
 
 static int i801_suspend(struct device *dev)
 {
        struct i801_priv *priv = dev_get_drvdata(dev);
 
-       outb_p(priv->original_hstcnt, SMBHSTCNT(priv));
-       pci_write_config_byte(priv->pci_dev, SMBHSTCFG, priv->original_hstcfg);
+       i2c_mark_adapter_suspended(&priv->adapter);
+       i801_restore_regs(priv);
+
        return 0;
 }
 
@@ -1824,6 +1803,7 @@ static int i801_resume(struct device *dev)
 
        i801_setup_hstcfg(priv);
        i801_enable_host_notify(&priv->adapter);
+       i2c_mark_adapter_resumed(&priv->adapter);
 
        return 0;
 }
@@ -1842,16 +1822,11 @@ static struct pci_driver i801_driver = {
        },
 };
 
-static int __init i2c_i801_init(void)
+static int __init i2c_i801_init(struct pci_driver *drv)
 {
        if (dmi_name_in_vendors("FUJITSU"))
                input_apanel_init();
-       return pci_register_driver(&i801_driver);
-}
-
-static void __exit i2c_i801_exit(void)
-{
-       pci_unregister_driver(&i801_driver);
+       return pci_register_driver(drv);
 }
 
 MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>");
@@ -1859,5 +1834,4 @@ MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
 MODULE_DESCRIPTION("I801 SMBus driver");
 MODULE_LICENSE("GPL");
 
-module_init(i2c_i801_init);
-module_exit(i2c_i801_exit);
+module_driver(i801_driver, i2c_i801_init, pci_unregister_driver);
index 1a9b5a068ef1b2e45c6d3e4d995563969a7561d4..a8b5719c33729ec888fa56bfce82bc8b90bc4ebe 100644 (file)
@@ -1442,15 +1442,19 @@ static int mtk_i2c_probe(struct platform_device *pdev)
        if (IS_ERR(i2c->clocks[I2C_MT65XX_CLK_ARB].clk))
                return PTR_ERR(i2c->clocks[I2C_MT65XX_CLK_ARB].clk);
 
+       i2c->clocks[I2C_MT65XX_CLK_PMIC].clk = devm_clk_get_optional(&pdev->dev, "pmic");
+       if (IS_ERR(i2c->clocks[I2C_MT65XX_CLK_PMIC].clk)) {
+               dev_err(&pdev->dev, "cannot get pmic clock\n");
+               return PTR_ERR(i2c->clocks[I2C_MT65XX_CLK_PMIC].clk);
+       }
+
        if (i2c->have_pmic) {
-               i2c->clocks[I2C_MT65XX_CLK_PMIC].clk = devm_clk_get(&pdev->dev, "pmic");
-               if (IS_ERR(i2c->clocks[I2C_MT65XX_CLK_PMIC].clk)) {
+               if (!i2c->clocks[I2C_MT65XX_CLK_PMIC].clk) {
                        dev_err(&pdev->dev, "cannot get pmic clock\n");
-                       return PTR_ERR(i2c->clocks[I2C_MT65XX_CLK_PMIC].clk);
+                       return -ENODEV;
                }
                speed_clk = I2C_MT65XX_CLK_PMIC;
        } else {
-               i2c->clocks[I2C_MT65XX_CLK_PMIC].clk = NULL;
                speed_clk = I2C_MT65XX_CLK_MAIN;
        }
 
index fd8403b07fa61545de036306dee52a20ada121a1..dc160cbc315531e06868415a20b2541c8e1091be 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/pm_runtime.h>
+#include <linux/property.h>
 #include <linux/reset.h>
 #include <linux/io.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
 #include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/delay.h>
@@ -859,7 +858,7 @@ static int
 mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
                  struct device *dev)
 {
-       const struct of_device_id *device;
+       const struct mv64xxx_i2c_regs *data;
        struct device_node *np = dev->of_node;
        u32 bus_freq, tclk;
        int rc = 0;
@@ -897,11 +896,11 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
         */
        drv_data->adapter.timeout = HZ;
 
-       device = of_match_device(mv64xxx_i2c_of_match_table, dev);
-       if (!device)
+       data = device_get_match_data(dev);
+       if (!data)
                return -ENODEV;
 
-       memcpy(&drv_data->reg_offsets, device->data, sizeof(drv_data->reg_offsets));
+       memcpy(&drv_data->reg_offsets, data, sizeof(drv_data->reg_offsets));
 
        /*
         * For controllers embedded in new SoCs activate the
index 58fd6fa3edf1449c30fe713adca8273e054e52fd..42165ef57946ce1aefa76436ca7e31dff2482be1 100644 (file)
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/slab.h>
 #include <linux/platform_data/i2c-omap.h>
 #include <linux/pm_runtime.h>
 #include <linux/pinctrl/consumer.h>
+#include <linux/property.h>
 
 /* I2C controller revisions */
 #define OMAP_I2C_OMAP1_REV_2           0x20
@@ -1358,7 +1358,6 @@ omap_i2c_probe(struct platform_device *pdev)
        const struct omap_i2c_bus_platform_data *pdata =
                dev_get_platdata(&pdev->dev);
        struct device_node      *node = pdev->dev.of_node;
-       const struct of_device_id *match;
        int irq;
        int r;
        u32 rev;
@@ -1376,11 +1375,10 @@ omap_i2c_probe(struct platform_device *pdev)
        if (IS_ERR(omap->base))
                return PTR_ERR(omap->base);
 
-       match = of_match_device(of_match_ptr(omap_i2c_of_match), &pdev->dev);
-       if (match) {
+       if (pdev->dev.of_node) {
                u32 freq = I2C_MAX_STANDARD_MODE_FREQ;
 
-               pdata = match->data;
+               pdata = device_get_match_data(&pdev->dev);
                omap->flags = pdata->flags;
 
                of_property_read_u32(node, "clock-frequency", &freq);
index 4996a628fdae1bb41fee3bd440e685dcfcdab6df..8e57ebe595be501e3e4ef16d29a3d79537fd29f2 100644 (file)
@@ -231,7 +231,7 @@ static void i2c_powermac_create_one(struct i2c_adapter *adap,
        struct i2c_board_info info = {};
        struct i2c_client *newdev;
 
-       strncpy(info.type, type, sizeof(info.type));
+       strscpy(info.type, type, sizeof(info.type));
        info.addr = addr;
        newdev = i2c_new_client_device(adap, &info);
        if (IS_ERR(newdev))
index 29be05af826b05a3a283d75db7ca82068978c42f..1d76482427492113886af4ae84943092106e8479 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
 #include <linux/platform_data/i2c-pxa.h>
+#include <linux/property.h>
 #include <linux/slab.h>
 
 /* I2C register field definitions */
@@ -1252,10 +1253,8 @@ static int i2c_pxa_probe_dt(struct platform_device *pdev, struct pxa_i2c *i2c,
                            enum pxa_i2c_types *i2c_types)
 {
        struct device_node *np = pdev->dev.of_node;
-       const struct of_device_id *of_id =
-                       of_match_device(i2c_pxa_dt_ids, &pdev->dev);
 
-       if (!of_id)
+       if (!pdev->dev.of_node)
                return 1;
 
        /* For device tree we always use the dynamic or alias-assigned ID */
@@ -1264,7 +1263,7 @@ static int i2c_pxa_probe_dt(struct platform_device *pdev, struct pxa_i2c *i2c,
        i2c->use_pio = of_property_read_bool(np, "mrvl,i2c-polling");
        i2c->fast_mode = of_property_read_bool(np, "mrvl,i2c-fast-mode");
 
-       *i2c_types = (enum pxa_i2c_types)(of_id->data);
+       *i2c_types = (enum pxa_i2c_types)device_get_match_data(&pdev->dev);
 
        return 0;
 }
index 229353e96e095486e87feb436f865bb09d7c0d8c..6d829ed2f868f733540cf9cee1cbb2714e45b345 100644 (file)
@@ -722,6 +722,7 @@ static const struct i2c_algorithm geni_i2c_algo = {
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id geni_i2c_acpi_match[] = {
        { "QCOM0220"},
+       { "QCOM0411" },
        { },
 };
 MODULE_DEVICE_TABLE(acpi, geni_i2c_acpi_match);
index a32a93f9a60d03db0a9cb92ba3c04ffbd43984dd..829ac053bbb7c4a1203a7fc878b2aebf28600d22 100644 (file)
 #define ICSAR  0x1C    /* slave address */
 #define ICMAR  0x20    /* master address */
 #define ICRXTX 0x24    /* data port */
+#define ICCCR2 0x28    /* Clock control 2 */
+#define ICMPR  0x2C    /* SCL mask control */
+#define ICHPR  0x30    /* SCL HIGH control */
+#define ICLPR  0x34    /* SCL LOW control */
 #define ICFBSCR        0x38    /* first bit setup cycle (Gen3) */
 #define ICDMAER        0x3c    /* DMA enable (Gen3) */
 
 #define RMDMAE BIT(1)  /* DMA Master Received Enable */
 #define TMDMAE BIT(0)  /* DMA Master Transmitted Enable */
 
+/* ICCCR2 */
+#define CDFD   BIT(2)  /* CDF Disable */
+#define HLSE   BIT(1)  /* HIGH/LOW Separate Control Enable */
+#define SME    BIT(0)  /* SCL Mask Enable */
+
 /* ICFBSCR */
 #define TCYC17 0x0f            /* 17*Tcyc delay 1st bit between SDA and SCL */
 
 #define RCAR_MIN_DMA_LEN       8
 
+/* SCL low/high ratio 5:4 to meet all I2C timing specs (incl safety margin) */
+#define RCAR_SCLD_RATIO                5
+#define RCAR_SCHD_RATIO                4
+/*
+ * SMD should be smaller than SCLD/SCHD and is always around 20 in the docs.
+ * Thus, we simply use 20 which works for low and high speeds.
+ */
+#define RCAR_DEFAULT_SMD       20
+
 #define RCAR_BUS_PHASE_START   (MDBS | MIE | ESG)
 #define RCAR_BUS_PHASE_DATA    (MDBS | MIE)
 #define RCAR_BUS_PHASE_STOP    (MDBS | MIE | FSB)
@@ -128,6 +146,8 @@ struct rcar_i2c_priv {
 
        int pos;
        u32 icccr;
+       u16 schd;
+       u16 scld;
        u8 recovery_icmcr;      /* protected by adapter lock */
        enum rcar_i2c_type devtype;
        struct i2c_client *slave;
@@ -216,11 +236,16 @@ static void rcar_i2c_init(struct rcar_i2c_priv *priv)
        rcar_i2c_write(priv, ICMCR, MDBS);
        rcar_i2c_write(priv, ICMSR, 0);
        /* start clock */
-       rcar_i2c_write(priv, ICCCR, priv->icccr);
-
-       if (priv->devtype == I2C_RCAR_GEN3)
+       if (priv->devtype < I2C_RCAR_GEN3) {
+               rcar_i2c_write(priv, ICCCR, priv->icccr);
+       } else {
+               rcar_i2c_write(priv, ICCCR2, CDFD | HLSE | SME);
+               rcar_i2c_write(priv, ICCCR, priv->icccr);
+               rcar_i2c_write(priv, ICMPR, RCAR_DEFAULT_SMD);
+               rcar_i2c_write(priv, ICHPR, priv->schd);
+               rcar_i2c_write(priv, ICLPR, priv->scld);
                rcar_i2c_write(priv, ICFBSCR, TCYC17);
-
+       }
 }
 
 static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv)
@@ -241,7 +266,7 @@ static int rcar_i2c_bus_barrier(struct rcar_i2c_priv *priv)
 
 static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv)
 {
-       u32 scgd, cdf, round, ick, sum, scl, cdf_width;
+       u32 cdf, round, ick, sum, scl, cdf_width;
        unsigned long rate;
        struct device *dev = rcar_i2c_priv_to_dev(priv);
        struct i2c_timings t = {
@@ -254,27 +279,17 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv)
        /* Fall back to previously used values if not supplied */
        i2c_parse_fw_timings(dev, &t, false);
 
-       switch (priv->devtype) {
-       case I2C_RCAR_GEN1:
-               cdf_width = 2;
-               break;
-       case I2C_RCAR_GEN2:
-       case I2C_RCAR_GEN3:
-               cdf_width = 3;
-               break;
-       default:
-               dev_err(dev, "device type error\n");
-               return -EIO;
-       }
-
        /*
         * calculate SCL clock
         * see
-        *      ICCCR
+        *      ICCCR (and ICCCR2 for Gen3+)
         *
         * ick  = clkp / (1 + CDF)
         * SCL  = ick / (20 + SCGD * 8 + F[(ticf + tr + intd) * ick])
         *
+        * for Gen3+:
+        * SCL  = clkp / (8 + SMD * 2 + SCLD + SCHD +F[(ticf + tr + intd) * clkp])
+        *
         * ick  : I2C internal clock < 20 MHz
         * ticf : I2C SCL falling time
         * tr   : I2C SCL rising  time
@@ -284,52 +299,82 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv)
         */
        rate = clk_get_rate(priv->clk);
        cdf = rate / 20000000;
-       if (cdf >= 1U << cdf_width) {
-               dev_err(dev, "Input clock %lu too high\n", rate);
-               return -EIO;
-       }
-       ick = rate / (cdf + 1);
+       cdf_width = (priv->devtype == I2C_RCAR_GEN1) ? 2 : 3;
+       if (cdf >= 1U << cdf_width)
+               goto err_no_val;
+
+       /* On Gen3+, we use cdf only for the filters, not as a SCL divider */
+       ick = rate / (priv->devtype < I2C_RCAR_GEN3 ? (cdf + 1) : 1);
 
        /*
-        * it is impossible to calculate large scale
-        * number on u32. separate it
+        * It is impossible to calculate a large scale number on u32. Separate it.
         *
         * F[(ticf + tr + intd) * ick] with sum = (ticf + tr + intd)
         *  = F[sum * ick / 1000000000]
         *  = F[(ick / 1000000) * sum / 1000]
         */
        sum = t.scl_fall_ns + t.scl_rise_ns + t.scl_int_delay_ns;
-       round = (ick + 500000) / 1000000 * sum;
-       round = (round + 500) / 1000;
+       round = DIV_ROUND_CLOSEST(ick, 1000000);
+       round = DIV_ROUND_CLOSEST(round * sum, 1000);
 
-       /*
-        * SCL  = ick / (20 + SCGD * 8 + F[(ticf + tr + intd) * ick])
-        *
-        * Calculation result (= SCL) should be less than
-        * bus_speed for hardware safety
-        *
-        * We could use something along the lines of
-        *      div = ick / (bus_speed + 1) + 1;
-        *      scgd = (div - 20 - round + 7) / 8;
-        *      scl = ick / (20 + (scgd * 8) + round);
-        * (not fully verified) but that would get pretty involved
-        */
-       for (scgd = 0; scgd < 0x40; scgd++) {
-               scl = ick / (20 + (scgd * 8) + round);
-               if (scl <= t.bus_freq_hz)
-                       goto scgd_find;
-       }
-       dev_err(dev, "it is impossible to calculate best SCL\n");
-       return -EIO;
+       if (priv->devtype < I2C_RCAR_GEN3) {
+               u32 scgd;
+               /*
+                * SCL  = ick / (20 + 8 * SCGD + F[(ticf + tr + intd) * ick])
+                * 20 + 8 * SCGD + F[...] = ick / SCL
+                * SCGD = ((ick / SCL) - 20 - F[...]) / 8
+                * Result (= SCL) should be less than bus_speed for hardware safety
+                */
+               scgd = DIV_ROUND_UP(ick, t.bus_freq_hz ?: 1);
+               scgd = DIV_ROUND_UP(scgd - 20 - round, 8);
+               scl = ick / (20 + 8 * scgd + round);
+
+               if (scgd > 0x3f)
+                       goto err_no_val;
 
-scgd_find:
-       dev_dbg(dev, "clk %d/%d(%lu), round %u, CDF:0x%x, SCGD: 0x%x\n",
-               scl, t.bus_freq_hz, rate, round, cdf, scgd);
+               dev_dbg(dev, "clk %u/%u(%lu), round %u, CDF: %u, SCGD: %u\n",
+                       scl, t.bus_freq_hz, rate, round, cdf, scgd);
 
-       /* keep icccr value */
-       priv->icccr = scgd << cdf_width | cdf;
+               priv->icccr = scgd << cdf_width | cdf;
+       } else {
+               u32 x, sum_ratio = RCAR_SCHD_RATIO + RCAR_SCLD_RATIO;
+               /*
+                * SCLD/SCHD ratio and SMD default value are explained above
+                * where they are defined. With these definitions, we can compute
+                * x as a base value for the SCLD/SCHD ratio:
+                *
+                * SCL = clkp / (8 + 2 * SMD + SCLD + SCHD + F[(ticf + tr + intd) * clkp])
+                * SCL = clkp / (8 + 2 * RCAR_DEFAULT_SMD + RCAR_SCLD_RATIO * x
+                *               + RCAR_SCHD_RATIO * x + F[...])
+                *
+                * with: sum_ratio = RCAR_SCLD_RATIO + RCAR_SCHD_RATIO
+                * and:  smd = RCAR_DEFAULT_SMD
+                *
+                * SCL = clkp / (8 + 2 * smd + sum_ratio * x + F[...])
+                * 8 + 2 * smd + sum_ratio * x + F[...] = clkp / SCL
+                * x = ((clkp / SCL) - 8 - 2 * smd - F[...]) / sum_ratio
+                */
+               x = DIV_ROUND_UP(rate, t.bus_freq_hz ?: 1);
+               x = DIV_ROUND_UP(x - 8 - 2 * RCAR_DEFAULT_SMD - round, sum_ratio);
+               scl = rate / (8 + 2 * RCAR_DEFAULT_SMD + sum_ratio * x + round);
+
+               /* Bail out if values don't fit into 16 bit or SMD became too large */
+               if (x * RCAR_SCLD_RATIO > 0xffff || RCAR_DEFAULT_SMD > x * RCAR_SCHD_RATIO)
+                       goto err_no_val;
+
+               priv->icccr = cdf;
+               priv->schd = RCAR_SCHD_RATIO * x;
+               priv->scld = RCAR_SCLD_RATIO * x;
+
+               dev_dbg(dev, "clk %u/%u(%lu), round %u, CDF: %u SCHD %u SCLD %u\n",
+                       scl, t.bus_freq_hz, rate, round, cdf, priv->schd, priv->scld);
+       }
 
        return 0;
+
+err_no_val:
+       dev_err(dev, "it is impossible to calculate best SCL\n");
+       return -EINVAL;
 }
 
 /*
@@ -843,12 +888,10 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
 
        /* Gen3 needs a reset before allowing RXDMA once */
        if (priv->devtype == I2C_RCAR_GEN3) {
-               priv->flags |= ID_P_NO_RXDMA;
-               if (!IS_ERR(priv->rstc)) {
-                       ret = rcar_i2c_do_reset(priv);
-                       if (ret == 0)
-                               priv->flags &= ~ID_P_NO_RXDMA;
-               }
+               priv->flags &= ~ID_P_NO_RXDMA;
+               ret = rcar_i2c_do_reset(priv);
+               if (ret)
+                       goto out;
        }
 
        rcar_i2c_init(priv);
@@ -1099,15 +1142,6 @@ static int rcar_i2c_probe(struct platform_device *pdev)
                irqhandler = rcar_i2c_gen2_irq;
        }
 
-       if (priv->devtype == I2C_RCAR_GEN3) {
-               priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
-               if (!IS_ERR(priv->rstc)) {
-                       ret = reset_control_status(priv->rstc);
-                       if (ret < 0)
-                               priv->rstc = ERR_PTR(-ENOTSUPP);
-               }
-       }
-
        /* Stay always active when multi-master to keep arbitration working */
        if (of_property_read_bool(dev->of_node, "multi-master"))
                priv->flags |= ID_P_PM_BLOCKED;
@@ -1117,6 +1151,18 @@ static int rcar_i2c_probe(struct platform_device *pdev)
        if (of_property_read_bool(dev->of_node, "smbus"))
                priv->flags |= ID_P_HOST_NOTIFY;
 
+       if (priv->devtype == I2C_RCAR_GEN3) {
+               priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+               if (IS_ERR(priv->rstc)) {
+                       ret = PTR_ERR(priv->rstc);
+                       goto out_pm_put;
+               }
+
+               ret = reset_control_status(priv->rstc);
+               if (ret < 0)
+                       goto out_pm_put;
+       }
+
        ret = platform_get_irq(pdev, 0);
        if (ret < 0)
                goto out_pm_put;
index f0ee8871d5ae1b2c1b2f127de0a7e03221c93812..e43ff483c56ece3a757fd8d0a2fce70e5e15b741 100644 (file)
@@ -313,7 +313,7 @@ static int riic_init_hw(struct riic_dev *riic, struct i2c_timings *t)
         * frequency with only 62 clock ticks max (31 high, 31 low).
         * Aim for a duty of 60% LOW, 40% HIGH.
         */
-       total_ticks = DIV_ROUND_UP(rate, t->bus_freq_hz);
+       total_ticks = DIV_ROUND_UP(rate, t->bus_freq_hz ?: 1);
 
        for (cks = 0; cks < 7; cks++) {
                /*
index 127eb3805facb55a9f2e6fa0badb4885c709195d..c56886af724ea87e3f863cd6140951530e620d76 100644 (file)
@@ -133,7 +133,7 @@ static const struct platform_device_id s3c24xx_driver_ids[] = {
 };
 MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
 
-static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat);
+static void i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat);
 
 #ifdef CONFIG_OF
 static const struct of_device_id s3c24xx_i2c_match[] = {
@@ -377,11 +377,10 @@ static inline int is_msgend(struct s3c24xx_i2c *i2c)
 /*
  * process an interrupt and work out what to do
  */
-static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
+static void i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
 {
        unsigned long tmp;
        unsigned char byte;
-       int ret = 0;
 
        switch (i2c->state) {
 
@@ -544,7 +543,7 @@ static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
        tmp &= ~S3C2410_IICCON_IRQPEND;
        writel(tmp, i2c->regs + S3C2410_IICCON);
  out:
-       return ret;
+       return;
 }
 
 /*
index ecc54792a66f1dc6b9b010cab9751234e5a6d293..859ac0cf7f6cb156a5bde7a5ad81be6440f5eed9 100644 (file)
@@ -783,23 +783,17 @@ static int stm32f4_i2c_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       i2c_dev->clk = devm_clk_get(&pdev->dev, NULL);
+       i2c_dev->clk = devm_clk_get_enabled(&pdev->dev, NULL);
        if (IS_ERR(i2c_dev->clk)) {
-               dev_err(&pdev->dev, "Error: Missing controller clock\n");
+               dev_err(&pdev->dev, "Failed to enable clock\n");
                return PTR_ERR(i2c_dev->clk);
        }
-       ret = clk_prepare_enable(i2c_dev->clk);
-       if (ret) {
-               dev_err(i2c_dev->dev, "Failed to prepare_enable clock\n");
-               return ret;
-       }
 
        rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
-       if (IS_ERR(rst)) {
-               ret = dev_err_probe(&pdev->dev, PTR_ERR(rst),
-                                   "Error: Missing reset ctrl\n");
-               goto clk_free;
-       }
+       if (IS_ERR(rst))
+               return dev_err_probe(&pdev->dev, PTR_ERR(rst),
+                                    "Error: Missing reset ctrl\n");
+
        reset_control_assert(rst);
        udelay(2);
        reset_control_deassert(rst);
@@ -816,7 +810,7 @@ static int stm32f4_i2c_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(&pdev->dev, "Failed to request irq event %i\n",
                        irq_event);
-               goto clk_free;
+               return ret;
        }
 
        ret = devm_request_irq(&pdev->dev, irq_error, stm32f4_i2c_isr_error, 0,
@@ -824,12 +818,12 @@ static int stm32f4_i2c_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(&pdev->dev, "Failed to request irq error %i\n",
                        irq_error);
-               goto clk_free;
+               return ret;
        }
 
        ret = stm32f4_i2c_hw_config(i2c_dev);
        if (ret)
-               goto clk_free;
+               return ret;
 
        adap = &i2c_dev->adap;
        i2c_set_adapdata(adap, i2c_dev);
@@ -845,7 +839,7 @@ static int stm32f4_i2c_probe(struct platform_device *pdev)
 
        ret = i2c_add_adapter(adap);
        if (ret)
-               goto clk_free;
+               return ret;
 
        platform_set_drvdata(pdev, i2c_dev);
 
@@ -854,10 +848,6 @@ static int stm32f4_i2c_probe(struct platform_device *pdev)
        dev_info(i2c_dev->dev, "STM32F4 I2C driver registered\n");
 
        return 0;
-
-clk_free:
-       clk_disable_unprepare(i2c_dev->clk);
-       return ret;
 }
 
 static void stm32f4_i2c_remove(struct platform_device *pdev)
@@ -865,8 +855,6 @@ static void stm32f4_i2c_remove(struct platform_device *pdev)
        struct stm32f4_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
 
        i2c_del_adapter(&i2c_dev->adap);
-
-       clk_unprepare(i2c_dev->clk);
 }
 
 static const struct of_device_id stm32f4_i2c_match[] = {
index 0d3c9a041b5611361b489dc70ff866fcc0b28f67..983509936727edfdd8f28320076f2f2ef068263b 100644 (file)
@@ -325,6 +325,7 @@ struct stm32f7_i2c_alert {
  * @dnf_dt: value of digital filter requested via dt
  * @dnf: value of digital filter to apply
  * @alert: SMBus alert specific data
+ * @atomic: boolean indicating that current transfer is atomic
  */
 struct stm32f7_i2c_dev {
        struct i2c_adapter adap;
@@ -357,6 +358,7 @@ struct stm32f7_i2c_dev {
        u32 dnf_dt;
        u32 dnf;
        struct stm32f7_i2c_alert *alert;
+       bool atomic;
 };
 
 /*
@@ -915,7 +917,8 @@ static void stm32f7_i2c_xfer_msg(struct stm32f7_i2c_dev *i2c_dev,
 
        /* Configure DMA or enable RX/TX interrupt */
        i2c_dev->use_dma = false;
-       if (i2c_dev->dma && f7_msg->count >= STM32F7_I2C_DMA_LEN_MIN) {
+       if (i2c_dev->dma && f7_msg->count >= STM32F7_I2C_DMA_LEN_MIN
+           && !i2c_dev->atomic) {
                ret = stm32_i2c_prep_dma_xfer(i2c_dev->dev, i2c_dev->dma,
                                              msg->flags & I2C_M_RD,
                                              f7_msg->count, f7_msg->buf,
@@ -939,6 +942,9 @@ static void stm32f7_i2c_xfer_msg(struct stm32f7_i2c_dev *i2c_dev,
                        cr1 |= STM32F7_I2C_CR1_TXDMAEN;
        }
 
+       if (i2c_dev->atomic)
+               cr1 &= ~STM32F7_I2C_ALL_IRQ_MASK; /* Disable all interrupts */
+
        /* Configure Start/Repeated Start */
        cr2 |= STM32F7_I2C_CR2_START;
 
@@ -1673,7 +1679,22 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
+static int stm32f7_i2c_wait_polling(struct stm32f7_i2c_dev *i2c_dev)
+{
+       ktime_t timeout = ktime_add_ms(ktime_get(), i2c_dev->adap.timeout);
+
+       while (ktime_compare(ktime_get(), timeout) < 0) {
+               udelay(5);
+               stm32f7_i2c_isr_event(0, i2c_dev);
+
+               if (completion_done(&i2c_dev->complete))
+                       return 1;
+       }
+
+       return 0;
+}
+
+static int stm32f7_i2c_xfer_core(struct i2c_adapter *i2c_adap,
                            struct i2c_msg msgs[], int num)
 {
        struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap);
@@ -1697,8 +1718,12 @@ static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
 
        stm32f7_i2c_xfer_msg(i2c_dev, msgs);
 
-       time_left = wait_for_completion_timeout(&i2c_dev->complete,
-                                               i2c_dev->adap.timeout);
+       if (!i2c_dev->atomic)
+               time_left = wait_for_completion_timeout(&i2c_dev->complete,
+                                                       i2c_dev->adap.timeout);
+       else
+               time_left = stm32f7_i2c_wait_polling(i2c_dev);
+
        ret = f7_msg->result;
        if (ret) {
                if (i2c_dev->use_dma)
@@ -1730,6 +1755,24 @@ pm_free:
        return (ret < 0) ? ret : num;
 }
 
+static int stm32f7_i2c_xfer(struct i2c_adapter *i2c_adap,
+                           struct i2c_msg msgs[], int num)
+{
+       struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap);
+
+       i2c_dev->atomic = false;
+       return stm32f7_i2c_xfer_core(i2c_adap, msgs, num);
+}
+
+static int stm32f7_i2c_xfer_atomic(struct i2c_adapter *i2c_adap,
+                           struct i2c_msg msgs[], int num)
+{
+       struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap);
+
+       i2c_dev->atomic = true;
+       return stm32f7_i2c_xfer_core(i2c_adap, msgs, num);
+}
+
 static int stm32f7_i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
                                  unsigned short flags, char read_write,
                                  u8 command, int size,
@@ -2098,6 +2141,7 @@ static u32 stm32f7_i2c_func(struct i2c_adapter *adap)
 
 static const struct i2c_algorithm stm32f7_i2c_algo = {
        .master_xfer = stm32f7_i2c_xfer,
+       .master_xfer_atomic = stm32f7_i2c_xfer_atomic,
        .smbus_xfer = stm32f7_i2c_smbus_xfer,
        .functionality = stm32f7_i2c_func,
        .reg_slave = stm32f7_i2c_reg_slave,
@@ -2134,23 +2178,16 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
        i2c_dev->wakeup_src = of_property_read_bool(pdev->dev.of_node,
                                                    "wakeup-source");
 
-       i2c_dev->clk = devm_clk_get(&pdev->dev, NULL);
+       i2c_dev->clk = devm_clk_get_enabled(&pdev->dev, NULL);
        if (IS_ERR(i2c_dev->clk))
                return dev_err_probe(&pdev->dev, PTR_ERR(i2c_dev->clk),
-                                    "Failed to get controller clock\n");
-
-       ret = clk_prepare_enable(i2c_dev->clk);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to prepare_enable clock\n");
-               return ret;
-       }
+                                    "Failed to enable controller clock\n");
 
        rst = devm_reset_control_get(&pdev->dev, NULL);
-       if (IS_ERR(rst)) {
-               ret = dev_err_probe(&pdev->dev, PTR_ERR(rst),
-                                   "Error: Missing reset ctrl\n");
-               goto clk_free;
-       }
+       if (IS_ERR(rst))
+               return dev_err_probe(&pdev->dev, PTR_ERR(rst),
+                                    "Error: Missing reset ctrl\n");
+
        reset_control_assert(rst);
        udelay(2);
        reset_control_deassert(rst);
@@ -2165,7 +2202,7 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(&pdev->dev, "Failed to request irq event %i\n",
                        irq_event);
-               goto clk_free;
+               return ret;
        }
 
        ret = devm_request_irq(&pdev->dev, irq_error, stm32f7_i2c_isr_error, 0,
@@ -2173,29 +2210,28 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(&pdev->dev, "Failed to request irq error %i\n",
                        irq_error);
-               goto clk_free;
+               return ret;
        }
 
        setup = of_device_get_match_data(&pdev->dev);
        if (!setup) {
                dev_err(&pdev->dev, "Can't get device data\n");
-               ret = -ENODEV;
-               goto clk_free;
+               return -ENODEV;
        }
        i2c_dev->setup = *setup;
 
        ret = stm32f7_i2c_setup_timing(i2c_dev, &i2c_dev->setup);
        if (ret)
-               goto clk_free;
+               return ret;
 
        /* Setup Fast mode plus if necessary */
        if (i2c_dev->bus_rate > I2C_MAX_FAST_MODE_FREQ) {
                ret = stm32f7_i2c_setup_fm_plus_bits(pdev, i2c_dev);
                if (ret)
-                       goto clk_free;
+                       return ret;
                ret = stm32f7_i2c_write_fm_plus_bits(i2c_dev, true);
                if (ret)
-                       goto clk_free;
+                       return ret;
        }
 
        adap = &i2c_dev->adap;
@@ -2306,9 +2342,6 @@ clr_wakeup_capable:
 fmp_clear:
        stm32f7_i2c_write_fm_plus_bits(i2c_dev, false);
 
-clk_free:
-       clk_disable_unprepare(i2c_dev->clk);
-
        return ret;
 }
 
@@ -2342,8 +2375,6 @@ static void stm32f7_i2c_remove(struct platform_device *pdev)
        }
 
        stm32f7_i2c_write_fm_plus_bits(i2c_dev, false);
-
-       clk_disable_unprepare(i2c_dev->clk);
 }
 
 static int __maybe_unused stm32f7_i2c_runtime_suspend(struct device *dev)
index fa6020dced595d863c61b3626f2cd02bba417f6c..85e035e7a1d75e06cd96398fab8b1c684d369ff8 100644 (file)
@@ -201,6 +201,11 @@ static int p2wi_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
+       if (clk_freq == 0) {
+               dev_err(dev, "clock-frequency is set to 0 in DT\n");
+               return -EINVAL;
+       }
+
        if (of_get_child_count(np) > 1) {
                dev_err(dev, "P2WI only supports one slave device\n");
                return -EINVAL;
index 8ca1daadec9373dec9df3cab220d5bc1a5035918..f21475ae592183a45b5e46a20e7a0699fb88132c 100644 (file)
@@ -94,7 +94,7 @@ struct i2c_atr {
 
        struct notifier_block i2c_nb;
 
-       struct i2c_adapter *adapter[];
+       struct i2c_adapter *adapter[] __counted_by(max_adapters);
 };
 
 static struct i2c_atr_alias_pair *
index 60746652fd5255cb998c5690f985430aaf0d773c..eac90a3cf61a4b7740108974ab114105cb74ae70 100644 (file)
@@ -931,8 +931,9 @@ int i2c_dev_irq_from_resources(const struct resource *resources,
 struct i2c_client *
 i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
 {
-       struct i2c_client       *client;
-       int                     status;
+       struct i2c_client *client;
+       bool need_put = false;
+       int status;
 
        client = kzalloc(sizeof *client, GFP_KERNEL);
        if (!client)
@@ -970,7 +971,6 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
        client->dev.fwnode = info->fwnode;
 
        device_enable_async_suspend(&client->dev);
-       i2c_dev_set_name(adap, client, info);
 
        if (info->swnode) {
                status = device_add_software_node(&client->dev, info->swnode);
@@ -982,6 +982,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
                }
        }
 
+       i2c_dev_set_name(adap, client, info);
        status = device_register(&client->dev);
        if (status)
                goto out_remove_swnode;
@@ -993,6 +994,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
 
 out_remove_swnode:
        device_remove_software_node(&client->dev);
+       need_put = true;
 out_err_put_of_node:
        of_node_put(info->of_node);
 out_err:
@@ -1000,7 +1002,10 @@ out_err:
                "Failed to register i2c client %s at 0x%02x (%d)\n",
                client->name, client->addr, status);
 out_err_silent:
-       kfree(client);
+       if (need_put)
+               put_device(&client->dev);
+       else
+               kfree(client);
        return ERR_PTR(status);
 }
 EXPORT_SYMBOL_GPL(i2c_new_client_device);
@@ -1189,9 +1194,11 @@ static void i2c_adapter_dev_release(struct device *dev)
 unsigned int i2c_adapter_depth(struct i2c_adapter *adapter)
 {
        unsigned int depth = 0;
+       struct device *parent;
 
-       while ((adapter = i2c_parent_is_i2c_adapter(adapter)))
-               depth++;
+       for (parent = adapter->dev.parent; parent; parent = parent->parent)
+               if (parent->type == &i2c_adapter_type)
+                       depth++;
 
        WARN_ONCE(depth >= MAX_LOCKDEP_SUBCLASSES,
                  "adapter depth exceeds lockdep subclass limit\n");
index a01b59e3599b53e5ce20378e0713f56f5dd70ec9..8b7e599f167411be1cc8bc63e2e4c8f5b45059bc 100644 (file)
@@ -450,8 +450,8 @@ static long i2cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                if (rdwr_arg.nmsgs > I2C_RDWR_IOCTL_MAX_MSGS)
                        return -EINVAL;
 
-               rdwr_pa = memdup_user(rdwr_arg.msgs,
-                                     rdwr_arg.nmsgs * sizeof(struct i2c_msg));
+               rdwr_pa = memdup_array_user(rdwr_arg.msgs,
+                                           rdwr_arg.nmsgs, sizeof(struct i2c_msg));
                if (IS_ERR(rdwr_pa))
                        return PTR_ERR(rdwr_pa);
 
@@ -636,7 +636,10 @@ static const struct file_operations i2cdev_fops = {
 
 /* ------------------------------------------------------------------------- */
 
-static struct class *i2c_dev_class;
+static const struct class i2c_dev_class = {
+       .name = "i2c-dev",
+       .dev_groups = i2c_groups,
+};
 
 static void i2cdev_dev_release(struct device *dev)
 {
@@ -665,7 +668,7 @@ static int i2cdev_attach_adapter(struct device *dev)
 
        device_initialize(&i2c_dev->dev);
        i2c_dev->dev.devt = MKDEV(I2C_MAJOR, adap->nr);
-       i2c_dev->dev.class = i2c_dev_class;
+       i2c_dev->dev.class = &i2c_dev_class;
        i2c_dev->dev.parent = &adap->dev;
        i2c_dev->dev.release = i2cdev_dev_release;
 
@@ -751,12 +754,9 @@ static int __init i2c_dev_init(void)
        if (res)
                goto out;
 
-       i2c_dev_class = class_create("i2c-dev");
-       if (IS_ERR(i2c_dev_class)) {
-               res = PTR_ERR(i2c_dev_class);
+       res = class_register(&i2c_dev_class);
+       if (res)
                goto out_unreg_chrdev;
-       }
-       i2c_dev_class->dev_groups = i2c_groups;
 
        /* Keep track of adapters which will be added or removed later */
        res = bus_register_notifier(&i2c_bus_type, &i2cdev_notifier);
@@ -769,7 +769,7 @@ static int __init i2c_dev_init(void)
        return 0;
 
 out_unreg_class:
-       class_destroy(i2c_dev_class);
+       class_unregister(&i2c_dev_class);
 out_unreg_chrdev:
        unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS);
 out:
@@ -781,7 +781,7 @@ static void __exit i2c_dev_exit(void)
 {
        bus_unregister_notifier(&i2c_bus_type, &i2cdev_notifier);
        i2c_for_each_dev(NULL, i2c_dev_detach_adapter);
-       class_destroy(i2c_dev_class);
+       class_unregister(&i2c_dev_class);
        unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS);
 }
 
index 9f2e4aa28159338f2613a784fe5c539cf3152a6f..7e2686b606c04d21bf4f078c875d51a72f0bce6d 100644 (file)
@@ -32,7 +32,7 @@ struct i2c_demux_pinctrl_priv {
        const char *bus_name;
        struct i2c_adapter cur_adap;
        struct i2c_algorithm algo;
-       struct i2c_demux_pinctrl_chan chan[];
+       struct i2c_demux_pinctrl_chan chan[] __counted_by(num_chan);
 };
 
 static int i2c_demux_master_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
@@ -167,9 +167,9 @@ static ssize_t available_masters_show(struct device *dev,
        int count = 0, i;
 
        for (i = 0; i < priv->num_chan && count < PAGE_SIZE; i++)
-               count += scnprintf(buf + count, PAGE_SIZE - count, "%d:%pOF%c",
-                                  i, priv->chan[i].parent_np,
-                                  i == priv->num_chan - 1 ? '\n' : ' ');
+               count += sysfs_emit_at(buf, count, "%d:%pOF%c",
+                                      i, priv->chan[i].parent_np,
+                                      i == priv->num_chan - 1 ? '\n' : ' ');
 
        return count;
 }
@@ -226,6 +226,8 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
        if (!priv || !props)
                return -ENOMEM;
 
+       priv->num_chan = num_chan;
+
        err = of_property_read_string(np, "i2c-bus-name", &priv->bus_name);
        if (err)
                return err;
@@ -253,9 +255,7 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev)
                of_changeset_update_property(&priv->chan[i].chgset, adap_np, &props[i]);
        }
 
-       priv->num_chan = num_chan;
        priv->dev = &pdev->dev;
-
        platform_set_drvdata(pdev, priv);
 
        pm_runtime_no_callbacks(&pdev->dev);
index 78987ead6be065e986d7308eccf130af5230ea41..6b979a0a6ab86a1bbc36a80435f993b8a35fbdab 100644 (file)
@@ -22,7 +22,7 @@ struct gpiomux {
        struct gpio_desc **gpios;
 };
 
-static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val)
+static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned int val)
 {
        DECLARE_BITMAP(values, BITS_PER_TYPE(val));
 
@@ -58,7 +58,7 @@ static int i2c_mux_gpio_probe_fw(struct gpiomux *mux,
        struct device_node *adapter_np;
        struct i2c_adapter *adapter = NULL;
        struct fwnode_handle *child;
-       unsigned *values;
+       unsigned int *values;
        int rc, i = 0;
 
        if (is_of_node(fwnode)) {
@@ -101,7 +101,6 @@ static int i2c_mux_gpio_probe_fw(struct gpiomux *mux,
        device_for_each_child_node(dev, child) {
                if (is_of_node(child)) {
                        fwnode_property_read_u32(child, "reg", values + i);
-
                } else if (is_acpi_node(child)) {
                        rc = acpi_get_local_address(ACPI_HANDLE_FWNODE(child), values + i);
                        if (rc) {
@@ -126,7 +125,7 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
        struct gpiomux *mux;
        struct i2c_adapter *parent;
        struct i2c_adapter *root;
-       unsigned initial_state;
+       unsigned int initial_state;
        int i, ngpios, ret;
 
        mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
index 959ec5269376fb42d78a8faecef8ffa0aa37104b..95caa162706f5543a91fb40cffaa2927b25b64c7 100644 (file)
@@ -1556,9 +1556,11 @@ i3c_master_register_new_i3c_devs(struct i3c_master_controller *master)
                        desc->dev->dev.of_node = desc->boardinfo->of_node;
 
                ret = device_register(&desc->dev->dev);
-               if (ret)
+               if (ret) {
                        dev_err(&master->dev,
                                "Failed to add I3C device (err = %d)\n", ret);
+                       put_device(&desc->dev->dev);
+               }
        }
 }
 
@@ -2340,7 +2342,7 @@ static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
        adap->dev.parent = master->dev.parent;
        adap->owner = master->dev.parent->driver->owner;
        adap->algo = &i3c_master_i2c_algo;
-       strncpy(adap->name, dev_name(master->dev.parent), sizeof(adap->name));
+       strscpy(adap->name, dev_name(master->dev.parent), sizeof(adap->name));
 
        /* FIXME: Should we allow i3c masters to override these values? */
        adap->timeout = 1000;
@@ -2403,7 +2405,7 @@ static void i3c_master_unregister_i3c_devs(struct i3c_master_controller *master)
 void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot)
 {
        atomic_inc(&dev->ibi->pending_ibis);
-       queue_work(dev->common.master->wq, &slot->work);
+       queue_work(dev->ibi->wq, &slot->work);
 }
 EXPORT_SYMBOL_GPL(i3c_master_queue_ibi);
 
@@ -2660,6 +2662,10 @@ int i3c_master_register(struct i3c_master_controller *master,
        device_initialize(&master->dev);
        dev_set_name(&master->dev, "i3c-%d", i3cbus->id);
 
+       master->dev.dma_mask = parent->dma_mask;
+       master->dev.coherent_dma_mask = parent->coherent_dma_mask;
+       master->dev.dma_parms = parent->dma_parms;
+
        ret = of_populate_i3c_bus(master);
        if (ret)
                goto err_put_dev;
@@ -2848,6 +2854,12 @@ int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
        if (!ibi)
                return -ENOMEM;
 
+       ibi->wq = alloc_ordered_workqueue(dev_name(i3cdev_to_dev(dev->dev)), WQ_MEM_RECLAIM);
+       if (!ibi->wq) {
+               kfree(ibi);
+               return -ENOMEM;
+       }
+
        atomic_set(&ibi->pending_ibis, 0);
        init_completion(&ibi->all_ibis_handled);
        ibi->handler = req->handler;
@@ -2875,6 +2887,12 @@ void i3c_dev_free_ibi_locked(struct i3c_dev_desc *dev)
                WARN_ON(i3c_dev_disable_ibi_locked(dev));
 
        master->ops->free_ibi(dev);
+
+       if (dev->ibi->wq) {
+               destroy_workqueue(dev->ibi->wq);
+               dev->ibi->wq = NULL;
+       }
+
        kfree(dev->ibi);
        dev->ibi = NULL;
 }
index 9332ae5f641903e45f1c2602f6d2b89de5943d32..ef5751e91cc9eff0a9a75e663e6d408a87209070 100644 (file)
@@ -233,7 +233,7 @@ struct dw_i3c_xfer {
        struct completion comp;
        int ret;
        unsigned int ncmds;
-       struct dw_i3c_cmd cmds[];
+       struct dw_i3c_cmd cmds[] __counted_by(ncmds);
 };
 
 struct dw_i3c_i2c_dev_data {
index 49551db71bc96ba9df786f097dbfec9598dcab6d..bcbe8f914149b29d69147e1ccbefbf45339ba314 100644 (file)
 #define SLV_STATUS1_HJ_DIS             BIT(18)
 #define SLV_STATUS1_MR_DIS             BIT(17)
 #define SLV_STATUS1_PROT_ERR           BIT(16)
-#define SLV_STATUS1_DA(x)              (((s) & GENMASK(15, 9)) >> 9)
+#define SLV_STATUS1_DA(s)              (((s) & GENMASK(15, 9)) >> 9)
 #define SLV_STATUS1_HAS_DA             BIT(8)
 #define SLV_STATUS1_DDR_RX_FULL                BIT(7)
 #define SLV_STATUS1_DDR_TX_FULL                BIT(6)
@@ -387,7 +387,7 @@ struct cdns_i3c_xfer {
        struct completion comp;
        int ret;
        unsigned int ncmds;
-       struct cdns_i3c_cmd cmds[];
+       struct cdns_i3c_cmd cmds[] __counted_by(ncmds);
 };
 
 struct cdns_i3c_data {
@@ -1623,13 +1623,13 @@ static int cdns_i3c_master_probe(struct platform_device *pdev)
        /* Device ID0 is reserved to describe this master. */
        master->maxdevs = CONF_STATUS0_DEVS_NUM(val);
        master->free_rr_slots = GENMASK(master->maxdevs, 1);
+       master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
+       master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
 
        val = readl(master->regs + CONF_STATUS1);
        master->caps.cmdfifodepth = CONF_STATUS1_CMD_DEPTH(val);
        master->caps.rxfifodepth = CONF_STATUS1_RX_DEPTH(val);
        master->caps.txfifodepth = CONF_STATUS1_TX_DEPTH(val);
-       master->caps.ibirfifodepth = CONF_STATUS0_IBIR_DEPTH(val);
-       master->caps.cmdrfifodepth = CONF_STATUS0_CMDR_DEPTH(val);
 
        spin_lock_init(&master->ibi.lock);
        master->ibi.num_slots = CONF_STATUS1_IBI_HW_RES(val);
index 6a781f89b0e4066beef10dcf641d242e97504600..2b2323aa671416075454b781212ca36b66360a5a 100644 (file)
@@ -332,6 +332,7 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
                        CMD_A0_DEV_COUNT(1) |
                        CMD_A0_ROC | CMD_A0_TOC;
                xfer->cmd_desc[1] = 0;
+               xfer->completion = &done;
                hci->io->queue_xfer(hci, xfer, 1);
                if (!wait_for_completion_timeout(&done, HZ) &&
                    hci->io->dequeue_xfer(hci, xfer, 1)) {
index 837af83c85f4ed8d14fc733897abef9a27d69eb4..1ae56a5699c6b63eee19e73031d28658e5e1d3ca 100644 (file)
@@ -161,10 +161,12 @@ static int i3c_hci_bus_init(struct i3c_master_controller *m)
 static void i3c_hci_bus_cleanup(struct i3c_master_controller *m)
 {
        struct i3c_hci *hci = to_i3c_hci(m);
+       struct platform_device *pdev = to_platform_device(m->dev.parent);
 
        DBG("");
 
        reg_clear(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
+       synchronize_irq(platform_get_irq(pdev, 0));
        hci->io->cleanup(hci);
        if (hci->cmd == &mipi_i3c_hci_cmd_v1)
                mipi_i3c_hci_dat_v1.cleanup(hci);
@@ -172,8 +174,7 @@ static void i3c_hci_bus_cleanup(struct i3c_master_controller *m)
 
 void mipi_i3c_hci_resume(struct i3c_hci *hci)
 {
-       /* the HC_CONTROL_RESUME bit is R/W1C so just read and write back */
-       reg_write(HC_CONTROL, reg_read(HC_CONTROL));
+       reg_set(HC_CONTROL, HC_CONTROL_RESUME);
 }
 
 /* located here rather than pio.c because needed bits are in core reg space */
@@ -610,17 +611,17 @@ static int i3c_hci_init(struct i3c_hci *hci)
        offset = FIELD_GET(DAT_TABLE_OFFSET, regval);
        hci->DAT_regs = offset ? hci->base_regs + offset : NULL;
        hci->DAT_entries = FIELD_GET(DAT_TABLE_SIZE, regval);
-       hci->DAT_entry_size = FIELD_GET(DAT_ENTRY_SIZE, regval);
+       hci->DAT_entry_size = FIELD_GET(DAT_ENTRY_SIZE, regval) ? 0 : 8;
        dev_info(&hci->master.dev, "DAT: %u %u-bytes entries at offset %#x\n",
-                hci->DAT_entries, hci->DAT_entry_size * 4, offset);
+                hci->DAT_entries, hci->DAT_entry_size, offset);
 
        regval = reg_read(DCT_SECTION);
        offset = FIELD_GET(DCT_TABLE_OFFSET, regval);
        hci->DCT_regs = offset ? hci->base_regs + offset : NULL;
        hci->DCT_entries = FIELD_GET(DCT_TABLE_SIZE, regval);
-       hci->DCT_entry_size = FIELD_GET(DCT_ENTRY_SIZE, regval);
+       hci->DCT_entry_size = FIELD_GET(DCT_ENTRY_SIZE, regval) ? 0 : 16;
        dev_info(&hci->master.dev, "DCT: %u %u-bytes entries at offset %#x\n",
-                hci->DCT_entries, hci->DCT_entry_size * 4, offset);
+                hci->DCT_entries, hci->DCT_entry_size, offset);
 
        regval = reg_read(RING_HEADERS_SECTION);
        offset = FIELD_GET(RING_HEADERS_OFFSET, regval);
@@ -787,6 +788,7 @@ static struct platform_driver i3c_hci_driver = {
        },
 };
 module_platform_driver(i3c_hci_driver);
+MODULE_ALIAS("platform:mipi-i3c-hci");
 
 MODULE_AUTHOR("Nicolas Pitre <npitre@baylibre.com>");
 MODULE_DESCRIPTION("MIPI I3C HCI driver");
index 97bb49ff5b53bd8887303471a8b6446470156a81..47b9b4d4ed3fc0ff5e05ed4d4d2d5b28ae246ded 100644 (file)
@@ -64,15 +64,17 @@ static int hci_dat_v1_init(struct i3c_hci *hci)
                return -EOPNOTSUPP;
        }
 
-       /* use a bitmap for faster free slot search */
-       hci->DAT_data = bitmap_zalloc(hci->DAT_entries, GFP_KERNEL);
-       if (!hci->DAT_data)
-               return -ENOMEM;
-
-       /* clear them */
-       for (dat_idx = 0; dat_idx < hci->DAT_entries; dat_idx++) {
-               dat_w0_write(dat_idx, 0);
-               dat_w1_write(dat_idx, 0);
+       if (!hci->DAT_data) {
+               /* use a bitmap for faster free slot search */
+               hci->DAT_data = bitmap_zalloc(hci->DAT_entries, GFP_KERNEL);
+               if (!hci->DAT_data)
+                       return -ENOMEM;
+
+               /* clear them */
+               for (dat_idx = 0; dat_idx < hci->DAT_entries; dat_idx++) {
+                       dat_w0_write(dat_idx, 0);
+                       dat_w1_write(dat_idx, 0);
+               }
        }
 
        return 0;
@@ -87,7 +89,13 @@ static void hci_dat_v1_cleanup(struct i3c_hci *hci)
 static int hci_dat_v1_alloc_entry(struct i3c_hci *hci)
 {
        unsigned int dat_idx;
+       int ret;
 
+       if (!hci->DAT_data) {
+               ret = hci_dat_v1_init(hci);
+               if (ret)
+                       return ret;
+       }
        dat_idx = find_first_zero_bit(hci->DAT_data, hci->DAT_entries);
        if (dat_idx >= hci->DAT_entries)
                return -ENOENT;
@@ -103,7 +111,8 @@ static void hci_dat_v1_free_entry(struct i3c_hci *hci, unsigned int dat_idx)
 {
        dat_w0_write(dat_idx, 0);
        dat_w1_write(dat_idx, 0);
-       __clear_bit(dat_idx, hci->DAT_data);
+       if (hci->DAT_data)
+               __clear_bit(dat_idx, hci->DAT_data);
 }
 
 static void hci_dat_v1_set_dynamic_addr(struct i3c_hci *hci,
index 2990ac9eaade771fcb02da00694fd0a2850241b2..c805a8497319dbb76f51a97e107ab9206e2a3515 100644 (file)
@@ -139,7 +139,7 @@ struct hci_rh_data {
 
 struct hci_rings_data {
        unsigned int total;
-       struct hci_rh_data headers[];
+       struct hci_rh_data headers[] __counted_by(total);
 };
 
 struct hci_dma_dev_ibi_data {
@@ -229,6 +229,9 @@ static int hci_dma_init(struct i3c_hci *hci)
        hci->io_data = rings;
        rings->total = nr_rings;
 
+       regval = FIELD_PREP(MAX_HEADER_COUNT, rings->total);
+       rhs_reg_write(CONTROL, regval);
+
        for (i = 0; i < rings->total; i++) {
                u32 offset = rhs_reg_read(RHn_OFFSET(i));
 
@@ -325,11 +328,10 @@ static int hci_dma_init(struct i3c_hci *hci)
                rh_reg_write(INTR_SIGNAL_ENABLE, regval);
 
 ring_ready:
-               rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
+               rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE |
+                                          RING_CTRL_RUN_STOP);
        }
 
-       regval = FIELD_PREP(MAX_HEADER_COUNT, rings->total);
-       rhs_reg_write(CONTROL, regval);
        return 0;
 
 err_out:
@@ -345,6 +347,8 @@ static void hci_dma_unmap_xfer(struct i3c_hci *hci,
 
        for (i = 0; i < n; i++) {
                xfer = xfer_list + i;
+               if (!xfer->data)
+                       continue;
                dma_unmap_single(&hci->master.dev,
                                 xfer->data_dma, xfer->data_len,
                                 xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
@@ -450,10 +454,9 @@ static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
                /*
                 * We're deep in it if ever this condition is ever met.
                 * Hardware might still be writing to memory, etc.
-                * Better suspend the world than risking silent corruption.
                 */
                dev_crit(&hci->master.dev, "unable to abort the ring\n");
-               BUG();
+               WARN_ON(1);
        }
 
        for (i = 0; i < n; i++) {
@@ -734,7 +737,7 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci, unsigned int mask)
        unsigned int i;
        bool handled = false;
 
-       for (i = 0; mask && i < 8; i++) {
+       for (i = 0; mask && i < rings->total; i++) {
                struct hci_rh_data *rh;
                u32 status;
 
@@ -756,9 +759,11 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci, unsigned int mask)
                if (status & INTR_RING_OP)
                        complete(&rh->op_done);
 
-               if (status & INTR_TRANSFER_ABORT)
+               if (status & INTR_TRANSFER_ABORT) {
                        dev_notice_ratelimited(&hci->master.dev,
                                "ring %d: Transfer Aborted\n", i);
+                       mipi_i3c_hci_resume(hci);
+               }
                if (status & INTR_WARN_INS_STOP_MODE)
                        dev_warn_ratelimited(&hci->master.dev,
                                "ring %d: Inserted Stop on Mode Change\n", i);
index 8f8295acdadb3aa3aa039271beaf56fefed3617f..cf703c00f63349c7b989efe08365f080a119e2c2 100644 (file)
@@ -93,6 +93,7 @@
 #define SVC_I3C_MINTMASKED   0x098
 #define SVC_I3C_MERRWARN     0x09C
 #define   SVC_I3C_MERRWARN_NACK BIT(2)
+#define   SVC_I3C_MERRWARN_TIMEOUT BIT(20)
 #define SVC_I3C_MDMACTRL     0x0A0
 #define SVC_I3C_MDATACTRL    0x0AC
 #define   SVC_I3C_MDATACTRL_FLUSHTB BIT(0)
@@ -143,7 +144,7 @@ struct svc_i3c_xfer {
        int ret;
        unsigned int type;
        unsigned int ncmds;
-       struct svc_i3c_cmd cmds[];
+       struct svc_i3c_cmd cmds[] __counted_by(ncmds);
 };
 
 struct svc_i3c_regs_save {
@@ -175,6 +176,7 @@ struct svc_i3c_regs_save {
  * @ibi.slots: Available IBI slots
  * @ibi.tbq_slot: To be queued IBI slot
  * @ibi.lock: IBI lock
+ * @lock: Transfer lock, protect between IBI work thread and callbacks from master
  */
 struct svc_i3c_master {
        struct i3c_master_controller base;
@@ -203,6 +205,7 @@ struct svc_i3c_master {
                /* Prevent races within IBI handlers */
                spinlock_t lock;
        } ibi;
+       struct mutex lock;
 };
 
 /**
@@ -225,6 +228,14 @@ static bool svc_i3c_master_error(struct svc_i3c_master *master)
        if (SVC_I3C_MSTATUS_ERRWARN(mstatus)) {
                merrwarn = readl(master->regs + SVC_I3C_MERRWARN);
                writel(merrwarn, master->regs + SVC_I3C_MERRWARN);
+
+               /* Ignore timeout error */
+               if (merrwarn & SVC_I3C_MERRWARN_TIMEOUT) {
+                       dev_dbg(master->dev, "Warning condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
+                               mstatus, merrwarn);
+                       return false;
+               }
+
                dev_err(master->dev,
                        "Error condition: MSTATUS 0x%08x, MERRWARN 0x%08x\n",
                        mstatus, merrwarn);
@@ -331,6 +342,7 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
        struct i3c_ibi_slot *slot;
        unsigned int count;
        u32 mdatactrl;
+       int ret, val;
        u8 *buf;
 
        slot = i3c_generic_ibi_get_free_slot(data->ibi_pool);
@@ -340,6 +352,13 @@ static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
        slot->len = 0;
        buf = slot->data;
 
+       ret = readl_relaxed_poll_timeout(master->regs + SVC_I3C_MSTATUS, val,
+                                               SVC_I3C_MSTATUS_COMPLETE(val), 0, 1000);
+       if (ret) {
+               dev_err(master->dev, "Timeout when polling for COMPLETE\n");
+               return ret;
+       }
+
        while (SVC_I3C_MSTATUS_RXPEND(readl(master->regs + SVC_I3C_MSTATUS))  &&
               slot->len < SVC_I3C_FIFO_SIZE) {
                mdatactrl = readl(master->regs + SVC_I3C_MDATACTRL);
@@ -384,6 +403,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
        u32 status, val;
        int ret;
 
+       mutex_lock(&master->lock);
        /* Acknowledge the incoming interrupt with the AUTOIBI mechanism */
        writel(SVC_I3C_MCTRL_REQUEST_AUTO_IBI |
               SVC_I3C_MCTRL_IBIRESP_AUTO,
@@ -394,6 +414,7 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
                                         SVC_I3C_MSTATUS_IBIWON(val), 0, 1000);
        if (ret) {
                dev_err(master->dev, "Timeout when polling for IBIWON\n");
+               svc_i3c_master_emit_stop(master);
                goto reenable_ibis;
        }
 
@@ -460,12 +481,13 @@ static void svc_i3c_master_ibi_work(struct work_struct *work)
 
 reenable_ibis:
        svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
+       mutex_unlock(&master->lock);
 }
 
 static irqreturn_t svc_i3c_master_irq_handler(int irq, void *dev_id)
 {
        struct svc_i3c_master *master = (struct svc_i3c_master *)dev_id;
-       u32 active = readl(master->regs + SVC_I3C_MINTMASKED);
+       u32 active = readl(master->regs + SVC_I3C_MSTATUS);
 
        if (!SVC_I3C_MSTATUS_SLVSTART(active))
                return IRQ_NONE;
@@ -765,7 +787,7 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
                        u8 data[6];
 
                        /*
-                        * We only care about the 48-bit provisional ID yet to
+                        * We only care about the 48-bit provisioned ID yet to
                         * be sure a device does not nack an address twice.
                         * Otherwise, we would just need to flush the RX FIFO.
                         */
@@ -1007,6 +1029,9 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
        u32 reg;
        int ret;
 
+       /* clean SVC_I3C_MINT_IBIWON w1c bits */
+       writel(SVC_I3C_MINT_IBIWON, master->regs + SVC_I3C_MSTATUS);
+
        writel(SVC_I3C_MCTRL_REQUEST_START_ADDR |
               xfer_type |
               SVC_I3C_MCTRL_IBIRESP_NACK |
@@ -1025,6 +1050,23 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
                goto emit_stop;
        }
 
+       /*
+        * According to I3C spec ver 1.1.1, 5.1.2.2.3 Consequence of Controller Starting a Frame
+        * with I3C Target Address.
+        *
+        * The I3C Controller normally should start a Frame, the Address may be arbitrated, and so
+        * the Controller shall monitor to see whether an In-Band Interrupt request, a Controller
+        * Role Request (i.e., Secondary Controller requests to become the Active Controller), or
+        * a Hot-Join Request has been made.
+        *
+        * If missed IBIWON check, the wrong data will be return. When IBIWON happen, return failure
+        * and yield the above events handler.
+        */
+       if (SVC_I3C_MSTATUS_IBIWON(reg)) {
+               ret = -ENXIO;
+               goto emit_stop;
+       }
+
        if (rnw)
                ret = svc_i3c_master_read(master, in, xfer_len);
        else
@@ -1204,9 +1246,11 @@ static int svc_i3c_master_send_bdcast_ccc_cmd(struct svc_i3c_master *master,
        cmd->read_len = 0;
        cmd->continued = false;
 
+       mutex_lock(&master->lock);
        svc_i3c_master_enqueue_xfer(master, xfer);
        if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
                svc_i3c_master_dequeue_xfer(master, xfer);
+       mutex_unlock(&master->lock);
 
        ret = xfer->ret;
        kfree(buf);
@@ -1250,9 +1294,11 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
        cmd->read_len = read_len;
        cmd->continued = false;
 
+       mutex_lock(&master->lock);
        svc_i3c_master_enqueue_xfer(master, xfer);
        if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
                svc_i3c_master_dequeue_xfer(master, xfer);
+       mutex_unlock(&master->lock);
 
        if (cmd->read_len != xfer_len)
                ccc->dests[0].payload.len = cmd->read_len;
@@ -1309,9 +1355,11 @@ static int svc_i3c_master_priv_xfers(struct i3c_dev_desc *dev,
                cmd->continued = (i + 1) < nxfers;
        }
 
+       mutex_lock(&master->lock);
        svc_i3c_master_enqueue_xfer(master, xfer);
        if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
                svc_i3c_master_dequeue_xfer(master, xfer);
+       mutex_unlock(&master->lock);
 
        ret = xfer->ret;
        svc_i3c_master_free_xfer(xfer);
@@ -1347,9 +1395,11 @@ static int svc_i3c_master_i2c_xfers(struct i2c_dev_desc *dev,
                cmd->continued = (i + 1 < nxfers);
        }
 
+       mutex_lock(&master->lock);
        svc_i3c_master_enqueue_xfer(master, xfer);
        if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
                svc_i3c_master_dequeue_xfer(master, xfer);
+       mutex_unlock(&master->lock);
 
        ret = xfer->ret;
        svc_i3c_master_free_xfer(xfer);
@@ -1540,6 +1590,8 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
 
        INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
        INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
+       mutex_init(&master->lock);
+
        ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
                               IRQF_NO_SUSPEND, "svc-i3c-irq", master);
        if (ret)
@@ -1651,7 +1703,7 @@ static const struct dev_pm_ops svc_i3c_pm_ops = {
 };
 
 static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
-       { .compatible = "silvaco,i3c-master},
+       { .compatible = "silvaco,i3c-master-v1"},
        { /* sentinel */ },
 };
 MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
index 95f90699d2b17b4e42e067bf44b6deac611fdb8b..51e0c4954600196081cfbc774c5a790c21be6e48 100644 (file)
@@ -50,7 +50,7 @@ struct evdev_client {
        bool revoked;
        unsigned long *evmasks[EV_CNT];
        unsigned int bufsize;
-       struct input_event buffer[];
+       struct input_event buffer[] __counted_by(bufsize);
 };
 
 static size_t evdev_get_mask_cnt(unsigned int type)
index 0b11990ade465022ecaa329ff9653376bb12a695..0e935914bc3aa1ba7cea72a2a6c1e8e01a4a321c 100644 (file)
@@ -44,7 +44,7 @@ struct input_led {
 struct input_leds {
        struct input_handle handle;
        unsigned int num_leds;
-       struct input_led leds[];
+       struct input_led leds[] __counted_by(num_leds);
 };
 
 static enum led_brightness input_leds_brightness_get(struct led_classdev *cdev)
index 56abc8c6c763a35f92fac92dd15c29bcbe0f791c..27d95d6cf56e342e6d76695a3fa6ed3245e43262 100644 (file)
@@ -296,15 +296,4 @@ static struct parport_driver walkera0701_parport_driver = {
        .devmodel = true,
 };
 
-static int __init walkera0701_init(void)
-{
-       return parport_register_driver(&walkera0701_parport_driver);
-}
-
-static void __exit walkera0701_exit(void)
-{
-       parport_unregister_driver(&walkera0701_parport_driver);
-}
-
-module_init(walkera0701_init);
-module_exit(walkera0701_exit);
+module_parport_driver(walkera0701_parport_driver);
index 7851ffd678a8fdc67e0972fb9cb59b492834539c..10c248f0c1fcda0bdf926f18411ff1a2d94938dd 100644 (file)
@@ -168,14 +168,12 @@ static int adp5520_keys_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int adp5520_keys_remove(struct platform_device *pdev)
+static void adp5520_keys_remove(struct platform_device *pdev)
 {
        struct adp5520_keys *dev = platform_get_drvdata(pdev);
 
        adp5520_unregister_notifier(dev->master, &dev->notifier,
                                ADP5520_KP_IEN | ADP5520_KR_IEN);
-
-       return 0;
 }
 
 static struct platform_driver adp5520_keys_driver = {
@@ -183,7 +181,7 @@ static struct platform_driver adp5520_keys_driver = {
                .name   = "adp5520-keys",
        },
        .probe          = adp5520_keys_probe,
-       .remove         = adp5520_keys_remove,
+       .remove_new     = adp5520_keys_remove,
 };
 module_platform_driver(adp5520_keys_driver);
 
index e7ecfca838df4076f6b2301fb5496e5976593b99..30678a34cf6476fe404b6d7148a1192fadede922 100644 (file)
@@ -686,10 +686,11 @@ static umode_t cros_ec_keyb_attr_is_visible(struct kobject *kobj,
        return attr->mode;
 }
 
-static const struct attribute_group cros_ec_keyb_attr_group = {
+static const struct attribute_group cros_ec_keyb_group = {
        .is_visible = cros_ec_keyb_attr_is_visible,
        .attrs = cros_ec_keyb_attrs,
 };
+__ATTRIBUTE_GROUPS(cros_ec_keyb);
 
 static int cros_ec_keyb_probe(struct platform_device *pdev)
 {
@@ -730,12 +731,6 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
                return err;
        }
 
-       err = devm_device_add_group(dev, &cros_ec_keyb_attr_group);
-       if (err) {
-               dev_err(dev, "failed to create attributes: %d\n", err);
-               return err;
-       }
-
        ckdev->notifier.notifier_call = cros_ec_keyb_work;
        err = blocking_notifier_chain_register(&ckdev->ec->event_notifier,
                                               &ckdev->notifier);
@@ -748,14 +743,12 @@ static int cros_ec_keyb_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int cros_ec_keyb_remove(struct platform_device *pdev)
+static void cros_ec_keyb_remove(struct platform_device *pdev)
 {
        struct cros_ec_keyb *ckdev = dev_get_drvdata(&pdev->dev);
 
        blocking_notifier_chain_unregister(&ckdev->ec->event_notifier,
                                           &ckdev->notifier);
-
-       return 0;
 }
 
 #ifdef CONFIG_ACPI
@@ -779,9 +772,10 @@ static DEFINE_SIMPLE_DEV_PM_OPS(cros_ec_keyb_pm_ops, NULL, cros_ec_keyb_resume);
 
 static struct platform_driver cros_ec_keyb_driver = {
        .probe = cros_ec_keyb_probe,
-       .remove = cros_ec_keyb_remove,
+       .remove_new = cros_ec_keyb_remove,
        .driver = {
                .name = "cros-ec-keyb",
+               .dev_groups = cros_ec_keyb_groups,
                .of_match_table = of_match_ptr(cros_ec_keyb_of_match),
                .acpi_match_table = ACPI_PTR(cros_ec_keyb_acpi_match),
                .pm = pm_sleep_ptr(&cros_ec_keyb_pm_ops),
index 55075addcac2618b9ed49a53b150953ed2a0d0bc..6b811d6bf6258c8dd5e5c52f3f6868d5b02bdb64 100644 (file)
@@ -308,11 +308,9 @@ static int ep93xx_keypad_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int ep93xx_keypad_remove(struct platform_device *pdev)
+static void ep93xx_keypad_remove(struct platform_device *pdev)
 {
        dev_pm_clear_wake_irq(&pdev->dev);
-
-       return 0;
 }
 
 static struct platform_driver ep93xx_keypad_driver = {
@@ -321,7 +319,7 @@ static struct platform_driver ep93xx_keypad_driver = {
                .pm     = pm_sleep_ptr(&ep93xx_keypad_pm_ops),
        },
        .probe          = ep93xx_keypad_probe,
-       .remove         = ep93xx_keypad_remove,
+       .remove_new     = ep93xx_keypad_remove,
 };
 module_platform_driver(ep93xx_keypad_driver);
 
index 02ceebad7bdaaddc3e24436d4f0ac97ae06ceb6e..688d61244b5fdf6640777adeb6aea536e94d35f0 100644 (file)
@@ -310,7 +310,7 @@ static int iqs62x_keys_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int iqs62x_keys_remove(struct platform_device *pdev)
+static void iqs62x_keys_remove(struct platform_device *pdev)
 {
        struct iqs62x_keys_private *iqs62x_keys = platform_get_drvdata(pdev);
        int ret;
@@ -319,8 +319,6 @@ static int iqs62x_keys_remove(struct platform_device *pdev)
                                                 &iqs62x_keys->notifier);
        if (ret)
                dev_err(&pdev->dev, "Failed to unregister notifier: %d\n", ret);
-
-       return 0;
 }
 
 static struct platform_driver iqs62x_keys_platform_driver = {
@@ -328,7 +326,7 @@ static struct platform_driver iqs62x_keys_platform_driver = {
                .name = "iqs62x-keys",
        },
        .probe = iqs62x_keys_probe,
-       .remove = iqs62x_keys_remove,
+       .remove_new = iqs62x_keys_remove,
 };
 module_platform_driver(iqs62x_keys_platform_driver);
 
index a1b037891af25f0b173e5bb35aed03ee1c3d907a..50fa764c82d2b3e9bb0e6670066dd18a7c938a84 100644 (file)
@@ -549,15 +549,13 @@ err_free_mem:
        return err;
 }
 
-static int matrix_keypad_remove(struct platform_device *pdev)
+static void matrix_keypad_remove(struct platform_device *pdev)
 {
        struct matrix_keypad *keypad = platform_get_drvdata(pdev);
 
        matrix_keypad_free_gpio(keypad);
        input_unregister_device(keypad->input_dev);
        kfree(keypad);
-
-       return 0;
 }
 
 #ifdef CONFIG_OF
@@ -570,7 +568,7 @@ MODULE_DEVICE_TABLE(of, matrix_keypad_dt_match);
 
 static struct platform_driver matrix_keypad_driver = {
        .probe          = matrix_keypad_probe,
-       .remove         = matrix_keypad_remove,
+       .remove_new     = matrix_keypad_remove,
        .driver         = {
                .name   = "matrix-keypad",
                .pm     = pm_sleep_ptr(&matrix_keypad_pm_ops),
index 24440b4986457b18edb16a5976e501fdc91c0299..454fb8675657302ca1281c211a2be027fc520163 100644 (file)
@@ -287,7 +287,7 @@ err2:
        return -EINVAL;
 }
 
-static int omap_kp_remove(struct platform_device *pdev)
+static void omap_kp_remove(struct platform_device *pdev)
 {
        struct omap_kp *omap_kp = platform_get_drvdata(pdev);
 
@@ -303,13 +303,11 @@ static int omap_kp_remove(struct platform_device *pdev)
        input_unregister_device(omap_kp->input);
 
        kfree(omap_kp);
-
-       return 0;
 }
 
 static struct platform_driver omap_kp_driver = {
        .probe          = omap_kp_probe,
-       .remove         = omap_kp_remove,
+       .remove_new     = omap_kp_remove,
        .driver         = {
                .name   = "omap-keypad",
        },
index 773e55eed88b18353742107689d5c93e2aae4c16..d3f8688fdd9c3ebe03a0c97fb32283f4cd4190ac 100644 (file)
@@ -461,11 +461,9 @@ static int omap4_keypad_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int omap4_keypad_remove(struct platform_device *pdev)
+static void omap4_keypad_remove(struct platform_device *pdev)
 {
        dev_pm_clear_wake_irq(&pdev->dev);
-
-       return 0;
 }
 
 static const struct of_device_id omap_keypad_dt_match[] = {
@@ -476,7 +474,7 @@ MODULE_DEVICE_TABLE(of, omap_keypad_dt_match);
 
 static struct platform_driver omap4_keypad_driver = {
        .probe          = omap4_keypad_probe,
-       .remove         = omap4_keypad_remove,
+       .remove_new     = omap4_keypad_remove,
        .driver         = {
                .name   = "omap4-keypad",
                .of_match_table = omap_keypad_dt_match,
index d85dd2489293463cfeabf00b1414834806b563ca..e212eff7687c010ca3e19b9627d5a46b3a8a7e51 100644 (file)
@@ -444,7 +444,7 @@ err_unprepare_clk:
        return error;
 }
 
-static int samsung_keypad_remove(struct platform_device *pdev)
+static void samsung_keypad_remove(struct platform_device *pdev)
 {
        struct samsung_keypad *keypad = platform_get_drvdata(pdev);
 
@@ -453,8 +453,6 @@ static int samsung_keypad_remove(struct platform_device *pdev)
        input_unregister_device(keypad->input_dev);
 
        clk_unprepare(keypad->clk);
-
-       return 0;
 }
 
 static int samsung_keypad_runtime_suspend(struct device *dev)
@@ -589,7 +587,7 @@ MODULE_DEVICE_TABLE(platform, samsung_keypad_driver_ids);
 
 static struct platform_driver samsung_keypad_driver = {
        .probe          = samsung_keypad_probe,
-       .remove         = samsung_keypad_remove,
+       .remove_new     = samsung_keypad_remove,
        .driver         = {
                .name   = "samsung-keypad",
                .of_match_table = of_match_ptr(samsung_keypad_dt_match),
index 2c00320f739fc1e7e60e0fab6c11c7efca9608e0..4ea4fd25c5d20ed2fed750034f9db1426fe2bf37 100644 (file)
@@ -265,7 +265,7 @@ static int sh_keysc_probe(struct platform_device *pdev)
        return error;
 }
 
-static int sh_keysc_remove(struct platform_device *pdev)
+static void sh_keysc_remove(struct platform_device *pdev)
 {
        struct sh_keysc_priv *priv = platform_get_drvdata(pdev);
 
@@ -279,8 +279,6 @@ static int sh_keysc_remove(struct platform_device *pdev)
        pm_runtime_disable(&pdev->dev);
 
        kfree(priv);
-
-       return 0;
 }
 
 static int sh_keysc_suspend(struct device *dev)
@@ -321,7 +319,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(sh_keysc_dev_pm_ops,
 
 static struct platform_driver sh_keysc_device_driver = {
        .probe          = sh_keysc_probe,
-       .remove         = sh_keysc_remove,
+       .remove_new     = sh_keysc_remove,
        .driver         = {
                .name   = "sh_keysc",
                .pm     = pm_sleep_ptr(&sh_keysc_dev_pm_ops),
index a50fa9915381f1a71b8ad1da4ca2a9a1d7e4626b..557d00a667cea523a6b3a7dd635573defb0c10a3 100644 (file)
@@ -272,14 +272,12 @@ static int spear_kbd_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int spear_kbd_remove(struct platform_device *pdev)
+static void spear_kbd_remove(struct platform_device *pdev)
 {
        struct spear_kbd *kbd = platform_get_drvdata(pdev);
 
        input_unregister_device(kbd->input);
        clk_unprepare(kbd->clk);
-
-       return 0;
 }
 
 static int spear_kbd_suspend(struct device *dev)
@@ -375,7 +373,7 @@ MODULE_DEVICE_TABLE(of, spear_kbd_id_table);
 
 static struct platform_driver spear_kbd_driver = {
        .probe          = spear_kbd_probe,
-       .remove         = spear_kbd_remove,
+       .remove_new     = spear_kbd_remove,
        .driver         = {
                .name   = "keyboard",
                .pm     = pm_sleep_ptr(&spear_kbd_pm_ops),
index 2c6c53290cc0f57977eb081d3559ae35391e3d3d..2013c0afd0c3a6b987a9218563dfbe82cebee431 100644 (file)
@@ -404,20 +404,18 @@ static int stmpe_keypad_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int stmpe_keypad_remove(struct platform_device *pdev)
+static void stmpe_keypad_remove(struct platform_device *pdev)
 {
        struct stmpe_keypad *keypad = platform_get_drvdata(pdev);
 
        stmpe_disable(keypad->stmpe, STMPE_BLOCK_KEYPAD);
-
-       return 0;
 }
 
 static struct platform_driver stmpe_keypad_driver = {
        .driver.name    = "stmpe-keypad",
        .driver.owner   = THIS_MODULE,
        .probe          = stmpe_keypad_probe,
-       .remove         = stmpe_keypad_remove,
+       .remove_new     = stmpe_keypad_remove,
 };
 module_platform_driver(stmpe_keypad_driver);
 
index c9a823ea45d02965275f44318a7e8c5af3790887..a1765ed8c825c16298d5ca7e523c049504fcf3fa 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/io.h>
 #include <linux/interrupt.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
 #include <linux/clk.h>
 #include <linux/slab.h>
 #include <linux/input/matrix_keypad.h>
@@ -602,9 +602,6 @@ static int tegra_kbc_probe(struct platform_device *pdev)
        unsigned int debounce_cnt;
        unsigned int scan_time_rows;
        unsigned int keymap_rows;
-       const struct of_device_id *match;
-
-       match = of_match_device(tegra_kbc_of_match, &pdev->dev);
 
        kbc = devm_kzalloc(&pdev->dev, sizeof(*kbc), GFP_KERNEL);
        if (!kbc) {
@@ -613,7 +610,7 @@ static int tegra_kbc_probe(struct platform_device *pdev)
        }
 
        kbc->dev = &pdev->dev;
-       kbc->hw_support = match->data;
+       kbc->hw_support = device_get_match_data(&pdev->dev);
        kbc->max_keys = kbc->hw_support->max_rows *
                                kbc->hw_support->max_columns;
        kbc->num_rows_and_columns = kbc->hw_support->max_rows +
index 51c8a326fd0686571df301ffa939ce7cfd52cdbe..31f0702c3d01e54f74ad3b28b8d9469745145600 100644 (file)
@@ -138,14 +138,13 @@ out:
        return err;
 }
 
-static int pm80x_onkey_remove(struct platform_device *pdev)
+static void pm80x_onkey_remove(struct platform_device *pdev)
 {
        struct pm80x_onkey_info *info = platform_get_drvdata(pdev);
 
        pm80x_free_irq(info->pm80x, info->irq, info);
        input_unregister_device(info->idev);
        kfree(info);
-       return 0;
 }
 
 static struct platform_driver pm80x_onkey_driver = {
@@ -154,7 +153,7 @@ static struct platform_driver pm80x_onkey_driver = {
                   .pm = &pm80x_onkey_pm_ops,
                   },
        .probe = pm80x_onkey_probe,
-       .remove = pm80x_onkey_remove,
+       .remove_new = pm80x_onkey_remove,
 };
 
 module_platform_driver(pm80x_onkey_driver);
index 4581606a28d63011c28c37f776001b3ee0ef871e..24f9e9d893de3a2fe24f4271c0281f421ef5abc1 100644 (file)
@@ -133,20 +133,11 @@ static ssize_t axp20x_store_attr(struct device *dev,
                                 size_t count)
 {
        struct axp20x_pek *axp20x_pek = dev_get_drvdata(dev);
-       char val_str[20];
-       size_t len;
        int ret, i;
        unsigned int val, idx = 0;
        unsigned int best_err = UINT_MAX;
 
-       val_str[sizeof(val_str) - 1] = '\0';
-       strncpy(val_str, buf, sizeof(val_str) - 1);
-       len = strlen(val_str);
-
-       if (len && val_str[len - 1] == '\n')
-               val_str[len - 1] = '\0';
-
-       ret = kstrtouint(val_str, 10, &val);
+       ret = kstrtouint(buf, 10, &val);
        if (ret)
                return ret;
 
index 6d1152850a6d8f6df68101b9692763f143f87ab1..7a1122e1efb9cc2711e02122d4d9655644e3d7e5 100644 (file)
@@ -127,7 +127,7 @@ err_free_mem:
        return error;
 }
 
-static int da9052_onkey_remove(struct platform_device *pdev)
+static void da9052_onkey_remove(struct platform_device *pdev)
 {
        struct da9052_onkey *onkey = platform_get_drvdata(pdev);
 
@@ -136,13 +136,11 @@ static int da9052_onkey_remove(struct platform_device *pdev)
 
        input_unregister_device(onkey->input);
        kfree(onkey);
-
-       return 0;
 }
 
 static struct platform_driver da9052_onkey_driver = {
        .probe  = da9052_onkey_probe,
-       .remove = da9052_onkey_remove,
+       .remove_new = da9052_onkey_remove,
        .driver = {
                .name   = "da9052-onkey",
        },
index 7a0d3a1d503cf3b94aea3ff38560fc4bfbc72679..871812f1b398e8150bddb56e78987c348e0dc735 100644 (file)
@@ -132,7 +132,7 @@ err_free_input:
        return err;
 }
 
-static int da9055_onkey_remove(struct platform_device *pdev)
+static void da9055_onkey_remove(struct platform_device *pdev)
 {
        struct da9055_onkey *onkey = platform_get_drvdata(pdev);
        int irq = platform_get_irq_byname(pdev, "ONKEY");
@@ -141,13 +141,11 @@ static int da9055_onkey_remove(struct platform_device *pdev)
        free_irq(irq, onkey);
        cancel_delayed_work_sync(&onkey->work);
        input_unregister_device(onkey->input);
-
-       return 0;
 }
 
 static struct platform_driver da9055_onkey_driver = {
        .probe  = da9055_onkey_probe,
-       .remove = da9055_onkey_remove,
+       .remove_new = da9055_onkey_remove,
        .driver = {
                .name   = "da9055-onkey",
        },
index 68f1c584da05697f11ddddf9bbffe84433a7e509..fa4e7f67d71367b09a0cc5dd42661f94da3a29f8 100644 (file)
@@ -256,20 +256,18 @@ err_release_ports:
        return err;
 }
 
-static int ideapad_remove(struct platform_device *pdev)
+static void ideapad_remove(struct platform_device *pdev)
 {
        i8042_remove_filter(slidebar_i8042_filter);
        input_unregister_device(slidebar_input_dev);
        release_region(IDEAPAD_BASE, 3);
-
-       return 0;
 }
 
 static struct platform_driver slidebar_drv = {
        .driver = {
                .name = "ideapad_slidebar",
        },
-       .remove = ideapad_remove,
+       .remove_new = ideapad_remove,
 };
 
 static int __init ideapad_dmi_check(const struct dmi_system_id *id)
index c0a08563987053effad40b29e51d75a8839320d0..3c636c75e8a1f1520ab2c5c3bec4004e023080c8 100644 (file)
@@ -1586,10 +1586,7 @@ static struct attribute *iqs269_attrs[] = {
        &dev_attr_ati_trigger.attr,
        NULL,
 };
-
-static const struct attribute_group iqs269_attr_group = {
-       .attrs = iqs269_attrs,
-};
+ATTRIBUTE_GROUPS(iqs269);
 
 static const struct regmap_config iqs269_regmap_config = {
        .reg_bits = 8,
@@ -1671,10 +1668,6 @@ static int iqs269_probe(struct i2c_client *client)
                return error;
        }
 
-       error = devm_device_add_group(&client->dev, &iqs269_attr_group);
-       if (error)
-               dev_err(&client->dev, "Failed to add attributes: %d\n", error);
-
        return error;
 }
 
@@ -1743,6 +1736,7 @@ MODULE_DEVICE_TABLE(of, iqs269_of_match);
 static struct i2c_driver iqs269_i2c_driver = {
        .driver = {
                .name = "iqs269a",
+               .dev_groups = iqs269_groups,
                .of_match_table = iqs269_of_match,
                .pm = pm_sleep_ptr(&iqs269_pm),
        },
index 912e614d039d71d53f83c4499ddf6a90ed24e230..d47269b10e9ab387c39c766d23ac7478994dc741 100644 (file)
@@ -334,14 +334,25 @@ static ssize_t kxtj9_set_poll(struct device *dev, struct device_attribute *attr,
 
 static DEVICE_ATTR(poll, S_IRUGO|S_IWUSR, kxtj9_get_poll, kxtj9_set_poll);
 
-static struct attribute *kxtj9_attributes[] = {
+static struct attribute *kxtj9_attrs[] = {
        &dev_attr_poll.attr,
        NULL
 };
 
-static struct attribute_group kxtj9_attribute_group = {
-       .attrs = kxtj9_attributes
+static umode_t kxtj9_attr_is_visible(struct kobject *kobj,
+                                    struct attribute *attr, int n)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct i2c_client *client = to_i2c_client(dev);
+
+       return client->irq ? attr->mode : 0;
+}
+
+static struct attribute_group kxtj9_group = {
+       .attrs = kxtj9_attrs,
+       .is_visible = kxtj9_attr_is_visible,
 };
+__ATTRIBUTE_GROUPS(kxtj9);
 
 static void kxtj9_poll(struct input_dev *input)
 {
@@ -482,13 +493,6 @@ static int kxtj9_probe(struct i2c_client *client)
                        dev_err(&client->dev, "request irq failed: %d\n", err);
                        return err;
                }
-
-               err = devm_device_add_group(&client->dev,
-                                           &kxtj9_attribute_group);
-               if (err) {
-                       dev_err(&client->dev, "sysfs create failed: %d\n", err);
-                       return err;
-               }
        }
 
        return 0;
@@ -535,8 +539,9 @@ MODULE_DEVICE_TABLE(i2c, kxtj9_id);
 
 static struct i2c_driver kxtj9_driver = {
        .driver = {
-               .name   = NAME,
-               .pm     = pm_sleep_ptr(&kxtj9_pm_ops),
+               .name           = NAME,
+               .dev_groups     = kxtj9_groups,
+               .pm             = pm_sleep_ptr(&kxtj9_pm_ops),
        },
        .probe          = kxtj9_probe,
        .id_table       = kxtj9_id,
index 25fcf1467151b1fd8643353ef8dbb64082e2d9a5..3fe0a85c45e00a5cf50effc415b6b3587e99dee2 100644 (file)
@@ -75,15 +75,13 @@ static int m68kspkr_probe(struct platform_device *dev)
        return 0;
 }
 
-static int m68kspkr_remove(struct platform_device *dev)
+static void m68kspkr_remove(struct platform_device *dev)
 {
        struct input_dev *input_dev = platform_get_drvdata(dev);
 
        input_unregister_device(input_dev);
        /* turn off the speaker */
        m68kspkr_event(NULL, EV_SND, SND_BELL, 0);
-
-       return 0;
 }
 
 static void m68kspkr_shutdown(struct platform_device *dev)
@@ -97,7 +95,7 @@ static struct platform_driver m68kspkr_platform_driver = {
                .name   = "m68kspkr",
        },
        .probe          = m68kspkr_probe,
-       .remove         = m68kspkr_remove,
+       .remove_new     = m68kspkr_remove,
        .shutdown       = m68kspkr_shutdown,
 };
 
index c4dff476d47968d54260deb93146e32d451d9999..8861a67be5754a067372813600d271363ca27698 100644 (file)
@@ -351,7 +351,7 @@ err_free_mem:
        return error;
 }
 
-static int max8997_haptic_remove(struct platform_device *pdev)
+static void max8997_haptic_remove(struct platform_device *pdev)
 {
        struct max8997_haptic *chip = platform_get_drvdata(pdev);
 
@@ -362,8 +362,6 @@ static int max8997_haptic_remove(struct platform_device *pdev)
                pwm_put(chip->pwm);
 
        kfree(chip);
-
-       return 0;
 }
 
 static int max8997_haptic_suspend(struct device *dev)
@@ -391,7 +389,7 @@ static struct platform_driver max8997_haptic_driver = {
                .pm     = pm_sleep_ptr(&max8997_haptic_pm_ops),
        },
        .probe          = max8997_haptic_probe,
-       .remove         = max8997_haptic_remove,
+       .remove_new     = max8997_haptic_remove,
        .id_table       = max8997_haptic_id,
 };
 module_platform_driver(max8997_haptic_driver);
index 0636eee4bb6ceee2eede21475dac61ec5c6a678c..1c8c939638f6cda327a995f7c209a5c531060ea1 100644 (file)
@@ -229,7 +229,7 @@ free_input_dev:
        return err;
 }
 
-static int mc13783_pwrbutton_remove(struct platform_device *pdev)
+static void mc13783_pwrbutton_remove(struct platform_device *pdev)
 {
        struct mc13783_pwrb *priv = platform_get_drvdata(pdev);
        const struct mc13xxx_buttons_platform_data *pdata;
@@ -249,13 +249,11 @@ static int mc13783_pwrbutton_remove(struct platform_device *pdev)
 
        input_unregister_device(priv->pwr);
        kfree(priv);
-
-       return 0;
 }
 
 static struct platform_driver mc13783_pwrbutton_driver = {
        .probe          = mc13783_pwrbutton_probe,
-       .remove         = mc13783_pwrbutton_remove,
+       .remove_new     = mc13783_pwrbutton_remove,
        .driver         = {
                .name   = "mc13783-pwrbutton",
        },
index 7e361727b0d980458afad0022ad8c16df10aca55..06d5972e8e84dc594b4b27d0fb40dfb7d670ce7e 100644 (file)
@@ -245,7 +245,7 @@ err_free_mem:
  *
  * Return: 0
  */
-static int palmas_pwron_remove(struct platform_device *pdev)
+static void palmas_pwron_remove(struct platform_device *pdev)
 {
        struct palmas_pwron *pwron = platform_get_drvdata(pdev);
 
@@ -254,8 +254,6 @@ static int palmas_pwron_remove(struct platform_device *pdev)
 
        input_unregister_device(pwron->input_dev);
        kfree(pwron);
-
-       return 0;
 }
 
 /**
@@ -312,7 +310,7 @@ MODULE_DEVICE_TABLE(of, of_palmas_pwr_match);
 
 static struct platform_driver palmas_pwron_driver = {
        .probe  = palmas_pwron_probe,
-       .remove = palmas_pwron_remove,
+       .remove_new = palmas_pwron_remove,
        .driver = {
                .name   = "palmas_pwrbutton",
                .of_match_table = of_match_ptr(of_palmas_pwr_match),
index b5a53636d7e2e9aafe112aa78152aec47776da47..8a7e9ada59526a64842082254661094cb86d4981 100644 (file)
@@ -99,7 +99,7 @@ fail:
        return err;
 }
 
-static int pcap_keys_remove(struct platform_device *pdev)
+static void pcap_keys_remove(struct platform_device *pdev)
 {
        struct pcap_keys *pcap_keys = platform_get_drvdata(pdev);
 
@@ -108,13 +108,11 @@ static int pcap_keys_remove(struct platform_device *pdev)
 
        input_unregister_device(pcap_keys->input);
        kfree(pcap_keys);
-
-       return 0;
 }
 
 static struct platform_driver pcap_keys_device_driver = {
        .probe          = pcap_keys_probe,
-       .remove         = pcap_keys_remove,
+       .remove_new     = pcap_keys_remove,
        .driver         = {
                .name   = "pcap-keys",
        }
index 4c60c70c4c102dd4564ff3f8a5350db4f9483c92..c5c5fe236c182c95bce78b95a3fceaf353df771c 100644 (file)
@@ -87,7 +87,7 @@ static int pcf50633_input_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int pcf50633_input_remove(struct platform_device *pdev)
+static void pcf50633_input_remove(struct platform_device *pdev)
 {
        struct pcf50633_input *input  = platform_get_drvdata(pdev);
 
@@ -96,8 +96,6 @@ static int pcf50633_input_remove(struct platform_device *pdev)
 
        input_unregister_device(input->input_dev);
        kfree(input);
-
-       return 0;
 }
 
 static struct platform_driver pcf50633_input_driver = {
@@ -105,7 +103,7 @@ static struct platform_driver pcf50633_input_driver = {
                .name = "pcf50633-input",
        },
        .probe = pcf50633_input_probe,
-       .remove = pcf50633_input_remove,
+       .remove_new = pcf50633_input_remove,
 };
 module_platform_driver(pcf50633_input_driver);
 
index 9c666b2f14fef0f28572540eb2a0b85ea486080f..897854fd245f18d362b61d3d631f6dd2570cd776 100644 (file)
@@ -95,15 +95,13 @@ static int pcspkr_probe(struct platform_device *dev)
        return 0;
 }
 
-static int pcspkr_remove(struct platform_device *dev)
+static void pcspkr_remove(struct platform_device *dev)
 {
        struct input_dev *pcspkr_dev = platform_get_drvdata(dev);
 
        input_unregister_device(pcspkr_dev);
        /* turn off the speaker */
        pcspkr_event(NULL, EV_SND, SND_BELL, 0);
-
-       return 0;
 }
 
 static int pcspkr_suspend(struct device *dev)
@@ -129,7 +127,7 @@ static struct platform_driver pcspkr_platform_driver = {
                .pm     = &pcspkr_pm_ops,
        },
        .probe          = pcspkr_probe,
-       .remove         = pcspkr_remove,
+       .remove_new     = pcspkr_remove,
        .shutdown       = pcspkr_shutdown,
 };
 module_platform_driver(pcspkr_platform_driver);
index ba747c5b2b5fc7620fbd8b2acf2bac46cf92fd47..bab710023d8f1c2bbd04ca02d1571fddafc89006 100644 (file)
@@ -408,14 +408,12 @@ static int pm8941_pwrkey_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int pm8941_pwrkey_remove(struct platform_device *pdev)
+static void pm8941_pwrkey_remove(struct platform_device *pdev)
 {
        struct pm8941_pwrkey *pwrkey = platform_get_drvdata(pdev);
 
        if (pwrkey->data->supports_ps_hold_poff_config)
                unregister_reboot_notifier(&pwrkey->reboot_notifier);
-
-       return 0;
 }
 
 static const struct pm8941_data pwrkey_data = {
@@ -467,7 +465,7 @@ MODULE_DEVICE_TABLE(of, pm8941_pwr_key_id_table);
 
 static struct platform_driver pm8941_pwrkey_driver = {
        .probe = pm8941_pwrkey_probe,
-       .remove = pm8941_pwrkey_remove,
+       .remove_new = pm8941_pwrkey_remove,
        .driver = {
                .name = "pm8941-pwrkey",
                .pm = pm_sleep_ptr(&pm8941_pwr_key_pm_ops),
index e79f5497948b8c047039c29c504317d009b5ef4c..08bcee3d6bccac2f03625c32a588c664ea789af8 100644 (file)
@@ -411,7 +411,7 @@ out:
        return button_info;
 }
 
-static int soc_button_remove(struct platform_device *pdev)
+static void soc_button_remove(struct platform_device *pdev)
 {
        struct soc_button_data *priv = platform_get_drvdata(pdev);
 
@@ -420,8 +420,6 @@ static int soc_button_remove(struct platform_device *pdev)
        for (i = 0; i < BUTTON_TYPES; i++)
                if (priv->children[i])
                        platform_device_unregister(priv->children[i]);
-
-       return 0;
 }
 
 static int soc_button_probe(struct platform_device *pdev)
@@ -609,7 +607,7 @@ MODULE_DEVICE_TABLE(acpi, soc_button_acpi_match);
 
 static struct platform_driver soc_button_driver = {
        .probe          = soc_button_probe,
-       .remove         = soc_button_remove,
+       .remove_new     = soc_button_remove,
        .driver         = {
                .name = KBUILD_MODNAME,
                .acpi_match_table = ACPI_PTR(soc_button_acpi_match),
index e5dd84725c6e74737ca4a080d6ac4b1129330623..20020cbc0752be1434c62fe52320bfc961d77b1e 100644 (file)
@@ -231,7 +231,7 @@ out_err:
        return err;
 }
 
-static int bbc_remove(struct platform_device *op)
+static void bbc_remove(struct platform_device *op)
 {
        struct sparcspkr_state *state = platform_get_drvdata(op);
        struct input_dev *input_dev = state->input_dev;
@@ -245,8 +245,6 @@ static int bbc_remove(struct platform_device *op)
        of_iounmap(&op->resource[0], info->regs, 6);
 
        kfree(state);
-
-       return 0;
 }
 
 static const struct of_device_id bbc_beep_match[] = {
@@ -264,7 +262,7 @@ static struct platform_driver bbc_beep_driver = {
                .of_match_table = bbc_beep_match,
        },
        .probe          = bbc_beep_probe,
-       .remove         = bbc_remove,
+       .remove_new     = bbc_remove,
        .shutdown       = sparcspkr_shutdown,
 };
 
@@ -310,7 +308,7 @@ out_err:
        return err;
 }
 
-static int grover_remove(struct platform_device *op)
+static void grover_remove(struct platform_device *op)
 {
        struct sparcspkr_state *state = platform_get_drvdata(op);
        struct grover_beep_info *info = &state->u.grover;
@@ -325,8 +323,6 @@ static int grover_remove(struct platform_device *op)
        of_iounmap(&op->resource[2], info->freq_regs, 2);
 
        kfree(state);
-
-       return 0;
 }
 
 static const struct of_device_id grover_beep_match[] = {
@@ -344,7 +340,7 @@ static struct platform_driver grover_beep_driver = {
                .of_match_table = grover_beep_match,
        },
        .probe          = grover_beep_probe,
-       .remove         = grover_remove,
+       .remove_new     = grover_remove,
        .shutdown       = sparcspkr_shutdown,
 };
 
index 111cb70cde46527cd39eec0e6a616ad625676c9d..5c4956678cd0aeed39a5bb756940939850ac271d 100644 (file)
@@ -1286,13 +1286,11 @@ static int wistron_probe(struct platform_device *dev)
        return 0;
 }
 
-static int wistron_remove(struct platform_device *dev)
+static void wistron_remove(struct platform_device *dev)
 {
        wistron_led_remove();
        input_unregister_device(wistron_idev);
        bios_detach();
-
-       return 0;
 }
 
 static int wistron_suspend(struct device *dev)
@@ -1336,7 +1334,7 @@ static struct platform_driver wistron_driver = {
                .pm     = pm_sleep_ptr(&wistron_pm_ops),
        },
        .probe          = wistron_probe,
-       .remove         = wistron_remove,
+       .remove_new     = wistron_remove,
 };
 
 static int __init wb_module_init(void)
index a42fe041b73c67da266b786eb88f89540af05b46..e4a06c73b72d9dd07ac7dc938bb11f5cffed58f3 100644 (file)
@@ -123,20 +123,18 @@ err:
        return ret;
 }
 
-static int wm831x_on_remove(struct platform_device *pdev)
+static void wm831x_on_remove(struct platform_device *pdev)
 {
        struct wm831x_on *wm831x_on = platform_get_drvdata(pdev);
        int irq = platform_get_irq(pdev, 0);
 
        free_irq(irq, wm831x_on);
        cancel_delayed_work_sync(&wm831x_on->work);
-
-       return 0;
 }
 
 static struct platform_driver wm831x_on_driver = {
        .probe          = wm831x_on_probe,
-       .remove         = wm831x_on_remove,
+       .remove_new     = wm831x_on_remove,
        .driver         = {
                .name   = "wm831x-on",
        },
index 05851bc32541f7c6917ed1ae9e434234baf9e5f0..a84098448f5b94d607dc5b86ceb5f04618783179 100644 (file)
@@ -1223,7 +1223,7 @@ static DEVICE_ATTR(baseline, S_IRUGO, cyapa_show_baseline, NULL);
 static DEVICE_ATTR(calibrate, S_IWUSR, NULL, cyapa_calibrate_store);
 static DEVICE_ATTR(mode, S_IRUGO, cyapa_show_mode, NULL);
 
-static struct attribute *cyapa_sysfs_entries[] = {
+static struct attribute *cyapa_attrs[] = {
        &dev_attr_firmware_version.attr,
        &dev_attr_product_id.attr,
        &dev_attr_update_fw.attr,
@@ -1232,10 +1232,7 @@ static struct attribute *cyapa_sysfs_entries[] = {
        &dev_attr_mode.attr,
        NULL,
 };
-
-static const struct attribute_group cyapa_sysfs_group = {
-       .attrs = cyapa_sysfs_entries,
-};
+ATTRIBUTE_GROUPS(cyapa);
 
 static void cyapa_disable_regulator(void *data)
 {
@@ -1302,12 +1299,6 @@ static int cyapa_probe(struct i2c_client *client)
                return error;
        }
 
-       error = devm_device_add_group(dev, &cyapa_sysfs_group);
-       if (error) {
-               dev_err(dev, "failed to create sysfs entries: %d\n", error);
-               return error;
-       }
-
        error = cyapa_prepare_wakeup_controls(cyapa);
        if (error) {
                dev_err(dev, "failed to prepare wakeup controls: %d\n", error);
@@ -1484,6 +1475,7 @@ MODULE_DEVICE_TABLE(of, cyapa_of_match);
 static struct i2c_driver cyapa_driver = {
        .driver = {
                .name = "cyapa",
+               .dev_groups = cyapa_groups,
                .pm = pm_ptr(&cyapa_pm_ops),
                .acpi_match_table = ACPI_PTR(cyapa_acpi_id),
                .of_match_table = of_match_ptr(cyapa_of_match),
index 2b7b86eef280e73ead3b12e5bd68a6fefd9d6b74..c00dc1275da23daebb21110749f3c3f75431d075 100644 (file)
@@ -295,7 +295,7 @@ err_free_gpio:
        return error;
 }
 
-static int navpoint_remove(struct platform_device *pdev)
+static void navpoint_remove(struct platform_device *pdev)
 {
        const struct navpoint_platform_data *pdata =
                                        dev_get_platdata(&pdev->dev);
@@ -311,8 +311,6 @@ static int navpoint_remove(struct platform_device *pdev)
 
        if (gpio_is_valid(pdata->gpio))
                gpio_free(pdata->gpio);
-
-       return 0;
 }
 
 static int navpoint_suspend(struct device *dev)
@@ -348,7 +346,7 @@ static DEFINE_SIMPLE_DEV_PM_OPS(navpoint_pm_ops,
 
 static struct platform_driver navpoint_driver = {
        .probe          = navpoint_probe,
-       .remove         = navpoint_remove,
+       .remove_new     = navpoint_remove,
        .driver = {
                .name   = "navpoint",
                .pm     = pm_sleep_ptr(&navpoint_pm_ops),
index f2e093b0b9982d238cca68454c0fbdbef174a81d..1b45b1d3077de72d6f78c608287c52f0d09773f3 100644 (file)
@@ -277,11 +277,11 @@ void rmi_unregister_function(struct rmi_function *fn)
 
        device_del(&fn->dev);
        of_node_put(fn->dev.of_node);
-       put_device(&fn->dev);
 
        for (i = 0; i < fn->num_of_irqs; i++)
                irq_dispose_mapping(fn->irq[i]);
 
+       put_device(&fn->dev);
 }
 
 /**
index 0d9a5756e3f5934ba8df6e4b0518687c74bc3a16..3b3ac71e53dc589e5c547ff4eec7cccd54564b8a 100644 (file)
@@ -471,7 +471,7 @@ static ssize_t rmi_driver_update_fw_store(struct device *dev,
        if (buf[count - 1] == '\0' || buf[count - 1] == '\n')
                copy_count -= 1;
 
-       strncpy(fw_name, buf, copy_count);
+       memcpy(fw_name, buf, copy_count);
        fw_name[copy_count] = '\0';
 
        ret = request_firmware(&fw, fw_name, dev);
index 9f8d7b332d1b0a8643851617c401f2ac82dc7bd6..c5b634940cfcff91c63af13df6a0f8d9cad1c4cb 100644 (file)
@@ -125,13 +125,11 @@ static int altera_ps2_probe(struct platform_device *pdev)
 /*
  * Remove one device from this driver.
  */
-static int altera_ps2_remove(struct platform_device *pdev)
+static void altera_ps2_remove(struct platform_device *pdev)
 {
        struct ps2if *ps2if = platform_get_drvdata(pdev);
 
        serio_unregister_port(ps2if->io);
-
-       return 0;
 }
 
 #ifdef CONFIG_OF
@@ -148,7 +146,7 @@ MODULE_DEVICE_TABLE(of, altera_ps2_match);
  */
 static struct platform_driver altera_ps2_driver = {
        .probe          = altera_ps2_probe,
-       .remove         = altera_ps2_remove,
+       .remove_new     = altera_ps2_remove,
        .driver = {
                .name   = DRV_NAME,
                .of_match_table = of_match_ptr(altera_ps2_match),
index ec93cb4573c3e1a88ba95bdfe81b5c95ea6845c6..0bd6ae1068099aa8daaf9c61998cb85e285e1a83 100644 (file)
@@ -173,18 +173,16 @@ static int ams_delta_serio_init(struct platform_device *pdev)
        return 0;
 }
 
-static int ams_delta_serio_exit(struct platform_device *pdev)
+static void ams_delta_serio_exit(struct platform_device *pdev)
 {
        struct ams_delta_serio *priv = platform_get_drvdata(pdev);
 
        serio_unregister_port(priv->serio);
-
-       return 0;
 }
 
 static struct platform_driver ams_delta_serio_driver = {
        .probe  = ams_delta_serio_init,
-       .remove = ams_delta_serio_exit,
+       .remove_new = ams_delta_serio_exit,
        .driver = {
                .name   = DRIVER_NAME
        },
index 3f6866d39b862454fa903981016c3df03b252c8f..dbbb1025152098d2ff1cf2fae0834e485076b2e7 100644 (file)
@@ -187,13 +187,11 @@ static int apbps2_of_probe(struct platform_device *ofdev)
        return 0;
 }
 
-static int apbps2_of_remove(struct platform_device *of_dev)
+static void apbps2_of_remove(struct platform_device *of_dev)
 {
        struct apbps2_priv *priv = platform_get_drvdata(of_dev);
 
        serio_unregister_port(priv->io);
-
-       return 0;
 }
 
 static const struct of_device_id apbps2_of_match[] = {
@@ -210,7 +208,7 @@ static struct platform_driver apbps2_of_driver = {
                .of_match_table = apbps2_of_match,
        },
        .probe = apbps2_of_probe,
-       .remove = apbps2_of_remove,
+       .remove_new = apbps2_of_remove,
 };
 
 module_platform_driver(apbps2_of_driver);
index a6debb13d5278f96ef40a73847ea8c6ade4f5ada..9d8726830140110ae92ad2f7df93c1cedd75a438 100644 (file)
@@ -232,7 +232,7 @@ static int arc_ps2_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int arc_ps2_remove(struct platform_device *pdev)
+static void arc_ps2_remove(struct platform_device *pdev)
 {
        struct arc_ps2_data *arc_ps2 = platform_get_drvdata(pdev);
        int i;
@@ -244,8 +244,6 @@ static int arc_ps2_remove(struct platform_device *pdev)
        dev_dbg(&pdev->dev, "frame error count = %i\n", arc_ps2->frame_error);
        dev_dbg(&pdev->dev, "buffer overflow count = %i\n",
                arc_ps2->buf_overflow);
-
-       return 0;
 }
 
 #ifdef CONFIG_OF
@@ -262,7 +260,7 @@ static struct platform_driver arc_ps2_driver = {
                .of_match_table = of_match_ptr(arc_ps2_match),
        },
        .probe  = arc_ps2_probe,
-       .remove = arc_ps2_remove,
+       .remove_new = arc_ps2_remove,
 };
 
 module_platform_driver(arc_ps2_driver);
index 3da751f4a6bf675ee286554a35edcbe730c2ec91..d5c9bb3d010382a5311f711d18b5157a4e4055fd 100644 (file)
@@ -180,11 +180,9 @@ static int ct82c710_probe(struct platform_device *dev)
        return 0;
 }
 
-static int ct82c710_remove(struct platform_device *dev)
+static void ct82c710_remove(struct platform_device *dev)
 {
        serio_unregister_port(ct82c710_port);
-
-       return 0;
 }
 
 static struct platform_driver ct82c710_driver = {
@@ -192,7 +190,7 @@ static struct platform_driver ct82c710_driver = {
                .name   = "ct82c710",
        },
        .probe          = ct82c710_probe,
-       .remove         = ct82c710_remove,
+       .remove_new     = ct82c710_remove,
 };
 
 
index b68793bf05c8cefb9f7bb59da7748147d611da7f..c2fda54dc384fe742f0cdec836ac85dce7b7b2b2 100644 (file)
@@ -82,11 +82,9 @@ static int sparc_i8042_probe(struct platform_device *op)
        return 0;
 }
 
-static int sparc_i8042_remove(struct platform_device *op)
+static void sparc_i8042_remove(struct platform_device *op)
 {
        of_iounmap(kbd_res, kbd_iobase, 8);
-
-       return 0;
 }
 
 static const struct of_device_id sparc_i8042_match[] = {
@@ -103,7 +101,7 @@ static struct platform_driver sparc_i8042_driver = {
                .of_match_table = sparc_i8042_match,
        },
        .probe          = sparc_i8042_probe,
-       .remove         = sparc_i8042_remove,
+       .remove_new     = sparc_i8042_remove,
 };
 
 static bool i8042_is_mr_coffee(void)
index 6dac7c1853a54189f50784b85b5ae3eaf9b6b93e..9fbb8d31575ae1486afcb1e7f70e51ff8d6f5a76 100644 (file)
@@ -1584,13 +1584,11 @@ static int i8042_probe(struct platform_device *dev)
        return error;
 }
 
-static int i8042_remove(struct platform_device *dev)
+static void i8042_remove(struct platform_device *dev)
 {
        i8042_unregister_ports();
        i8042_free_irqs();
        i8042_controller_reset(false);
-
-       return 0;
 }
 
 static struct platform_driver i8042_driver = {
@@ -1601,7 +1599,7 @@ static struct platform_driver i8042_driver = {
 #endif
        },
        .probe          = i8042_probe,
-       .remove         = i8042_remove,
+       .remove_new     = i8042_remove,
        .shutdown       = i8042_shutdown,
 };
 
index d51bfe912db5b64b26a24662787269bef23d5b22..50552dc7b4f5e689b7f1e9e63f9104e80efc57c2 100644 (file)
@@ -190,7 +190,7 @@ static int ioc3kbd_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int ioc3kbd_remove(struct platform_device *pdev)
+static void ioc3kbd_remove(struct platform_device *pdev)
 {
        struct ioc3kbd_data *d = platform_get_drvdata(pdev);
 
@@ -198,13 +198,11 @@ static int ioc3kbd_remove(struct platform_device *pdev)
 
        serio_unregister_port(d->kbd);
        serio_unregister_port(d->aux);
-
-       return 0;
 }
 
 static struct platform_driver ioc3kbd_driver = {
        .probe          = ioc3kbd_probe,
-       .remove         = ioc3kbd_remove,
+       .remove_new     = ioc3kbd_remove,
        .driver = {
                .name = "ioc3-kbd",
        },
index 629e15089c219e28c1b0746f401461951057ed4e..5ccfb82759b35613eba024608b088132adddf68b 100644 (file)
@@ -148,12 +148,10 @@ static int maceps2_probe(struct platform_device *dev)
        return 0;
 }
 
-static int maceps2_remove(struct platform_device *dev)
+static void maceps2_remove(struct platform_device *dev)
 {
        serio_unregister_port(maceps2_port[0]);
        serio_unregister_port(maceps2_port[1]);
-
-       return 0;
 }
 
 static struct platform_driver maceps2_driver = {
@@ -161,7 +159,7 @@ static struct platform_driver maceps2_driver = {
                .name   = "maceps2",
        },
        .probe          = maceps2_probe,
-       .remove         = maceps2_remove,
+       .remove_new     = maceps2_remove,
 };
 
 static int __init maceps2_init(void)
index 33a8e5889bd8b8fa0747c8e97a951bb5650c0c9c..240a714f708185335fe507804fdeae6cbb5d8c01 100644 (file)
@@ -238,7 +238,7 @@ err_pad:
        return error;
 }
 
-static int olpc_apsp_remove(struct platform_device *pdev)
+static void olpc_apsp_remove(struct platform_device *pdev)
 {
        struct olpc_apsp *priv = platform_get_drvdata(pdev);
 
@@ -246,8 +246,6 @@ static int olpc_apsp_remove(struct platform_device *pdev)
 
        serio_unregister_port(priv->kbio);
        serio_unregister_port(priv->padio);
-
-       return 0;
 }
 
 static const struct of_device_id olpc_apsp_dt_ids[] = {
@@ -258,7 +256,7 @@ MODULE_DEVICE_TABLE(of, olpc_apsp_dt_ids);
 
 static struct platform_driver olpc_apsp_driver = {
        .probe          = olpc_apsp_probe,
-       .remove         = olpc_apsp_remove,
+       .remove_new     = olpc_apsp_remove,
        .driver         = {
                .name   = "olpc-apsp",
                .of_match_table = olpc_apsp_dt_ids,
index bc1dc484389b40871476964d409652f660be4888..c3ff60859a035b90d20982f9b3fa8047dfb23287 100644 (file)
@@ -476,12 +476,11 @@ err_free_serio:
        return error;
 }
 
-static int ps2_gpio_remove(struct platform_device *pdev)
+static void ps2_gpio_remove(struct platform_device *pdev)
 {
        struct ps2_gpio_data *drvdata = platform_get_drvdata(pdev);
 
        serio_unregister_port(drvdata->serio);
-       return 0;
 }
 
 #if defined(CONFIG_OF)
@@ -494,7 +493,7 @@ MODULE_DEVICE_TABLE(of, ps2_gpio_match);
 
 static struct platform_driver ps2_gpio_driver = {
        .probe          = ps2_gpio_probe,
-       .remove         = ps2_gpio_remove,
+       .remove_new     = ps2_gpio_remove,
        .driver = {
                .name = DRIVER_NAME,
                .of_match_table = of_match_ptr(ps2_gpio_match),
index ba04058fc3cbde89260357a6ede06323f9af0ad7..3f81f8749cd56cbfd6b3ddcf5978bae491a4e5a1 100644 (file)
@@ -148,7 +148,7 @@ err_free_mem:
        return error;
 }
 
-static int q40kbd_remove(struct platform_device *pdev)
+static void q40kbd_remove(struct platform_device *pdev)
 {
        struct q40kbd *q40kbd = platform_get_drvdata(pdev);
 
@@ -160,15 +160,13 @@ static int q40kbd_remove(struct platform_device *pdev)
        serio_unregister_port(q40kbd->port);
        free_irq(Q40_IRQ_KEYBOARD, q40kbd);
        kfree(q40kbd);
-
-       return 0;
 }
 
 static struct platform_driver q40kbd_driver = {
        .driver         = {
                .name   = "q40kbd",
        },
-       .remove         = q40kbd_remove,
+       .remove_new     = q40kbd_remove,
 };
 
 module_platform_driver_probe(q40kbd_driver, q40kbd_probe);
index e8a9709f32ebbb68ab1d09f8bc56860f1751b63c..9bbfefd092c0852c558baef07e71baaecf1f62bd 100644 (file)
@@ -133,20 +133,18 @@ static int rpckbd_probe(struct platform_device *dev)
        return 0;
 }
 
-static int rpckbd_remove(struct platform_device *dev)
+static void rpckbd_remove(struct platform_device *dev)
 {
        struct serio *serio = platform_get_drvdata(dev);
        struct rpckbd_data *rpckbd = serio->port_data;
 
        serio_unregister_port(serio);
        kfree(rpckbd);
-
-       return 0;
 }
 
 static struct platform_driver rpckbd_driver = {
        .probe          = rpckbd_probe,
-       .remove         = rpckbd_remove,
+       .remove_new     = rpckbd_remove,
        .driver         = {
                .name   = "kart",
        },
index eb262640192e989e5d1966dc7f155443cfe62248..aec66d9f517615bf7a8b21432bab07f485f1386c 100644 (file)
@@ -297,7 +297,7 @@ err_free_mem:
        return error;
 }
 
-static int sun4i_ps2_remove(struct platform_device *pdev)
+static void sun4i_ps2_remove(struct platform_device *pdev)
 {
        struct sun4i_ps2data *drvdata = platform_get_drvdata(pdev);
 
@@ -311,8 +311,6 @@ static int sun4i_ps2_remove(struct platform_device *pdev)
        iounmap(drvdata->reg_base);
 
        kfree(drvdata);
-
-       return 0;
 }
 
 static const struct of_device_id sun4i_ps2_match[] = {
@@ -324,7 +322,7 @@ MODULE_DEVICE_TABLE(of, sun4i_ps2_match);
 
 static struct platform_driver sun4i_ps2_driver = {
        .probe          = sun4i_ps2_probe,
-       .remove         = sun4i_ps2_remove,
+       .remove_new     = sun4i_ps2_remove,
        .driver = {
                .name = DRIVER_NAME,
                .of_match_table = sun4i_ps2_match,
index f3d28da70b75c4478becad35f757e972fc7a054d..d8f9faf2b52902a76527abd0a844b386cfb0a677 100644 (file)
@@ -329,7 +329,7 @@ failed1:
  * if the driver module is being unloaded. It frees any resources allocated to
  * the device.
  */
-static int xps2_of_remove(struct platform_device *of_dev)
+static void xps2_of_remove(struct platform_device *of_dev)
 {
        struct xps2data *drvdata = platform_get_drvdata(of_dev);
        struct resource r_mem; /* IO mem resources */
@@ -344,8 +344,6 @@ static int xps2_of_remove(struct platform_device *of_dev)
                release_mem_region(r_mem.start, resource_size(&r_mem));
 
        kfree(drvdata);
-
-       return 0;
 }
 
 /* Match table for of_platform binding */
@@ -361,7 +359,7 @@ static struct platform_driver xps2_of_driver = {
                .of_match_table = xps2_of_match,
        },
        .probe          = xps2_of_probe,
-       .remove         = xps2_of_remove,
+       .remove_new     = xps2_of_remove,
 };
 module_platform_driver(xps2_of_driver);
 
index edb36d663f22f16aa3f7e8895b47a9fe02a1b4c0..a0598e9c7affcc1689a56d1a5674011769d806e9 100644 (file)
@@ -612,10 +612,11 @@ static umode_t ad7877_attr_is_visible(struct kobject *kobj,
        return mode;
 }
 
-static const struct attribute_group ad7877_attr_group = {
+static const struct attribute_group ad7877_group = {
        .is_visible     = ad7877_attr_is_visible,
        .attrs          = ad7877_attributes,
 };
+__ATTRIBUTE_GROUPS(ad7877);
 
 static void ad7877_setup_ts_def_msg(struct spi_device *spi, struct ad7877 *ts)
 {
@@ -777,10 +778,6 @@ static int ad7877_probe(struct spi_device *spi)
                return err;
        }
 
-       err = devm_device_add_group(&spi->dev, &ad7877_attr_group);
-       if (err)
-               return err;
-
        err = input_register_device(input_dev);
        if (err)
                return err;
@@ -810,8 +807,9 @@ static DEFINE_SIMPLE_DEV_PM_OPS(ad7877_pm, ad7877_suspend, ad7877_resume);
 
 static struct spi_driver ad7877_driver = {
        .driver = {
-               .name   = "ad7877",
-               .pm     = pm_sleep_ptr(&ad7877_pm),
+               .name           = "ad7877",
+               .dev_groups     = ad7877_groups,
+               .pm             = pm_sleep_ptr(&ad7877_pm),
        },
        .probe          = ad7877_probe,
 };
index feaa6f8b01ed67baea5de8c8a9dc853b21e8da1e..5c094ab746989678df08d30170504b6d6aec7cc2 100644 (file)
@@ -58,9 +58,10 @@ MODULE_DEVICE_TABLE(of, ad7879_i2c_dt_ids);
 
 static struct i2c_driver ad7879_i2c_driver = {
        .driver = {
-               .name   = "ad7879",
-               .pm     = &ad7879_pm_ops,
-               .of_match_table = of_match_ptr(ad7879_i2c_dt_ids),
+               .name           = "ad7879",
+               .dev_groups     = ad7879_groups,
+               .pm             = &ad7879_pm_ops,
+               .of_match_table = of_match_ptr(ad7879_i2c_dt_ids),
        },
        .probe          = ad7879_i2c_probe,
        .id_table       = ad7879_id,
index 50e88984680032ab89ff6811ed65dabd325da7f0..064968fe57cfd54ff80ac3d927572ef4b7acc6a1 100644 (file)
@@ -56,9 +56,10 @@ MODULE_DEVICE_TABLE(of, ad7879_spi_dt_ids);
 
 static struct spi_driver ad7879_spi_driver = {
        .driver = {
-               .name   = "ad7879",
-               .pm     = &ad7879_pm_ops,
-               .of_match_table = of_match_ptr(ad7879_spi_dt_ids),
+               .name           = "ad7879",
+               .dev_groups     = ad7879_groups,
+               .pm             = &ad7879_pm_ops,
+               .of_match_table = of_match_ptr(ad7879_spi_dt_ids),
        },
        .probe          = ad7879_spi_probe,
 };
index e850853328f1b3bd9f13f46ab8644cfda1f45516..e5d69bf2276e0f9fb5424e7cda1983325c00c56a 100644 (file)
@@ -391,6 +391,12 @@ static const struct attribute_group ad7879_attr_group = {
        .attrs = ad7879_attributes,
 };
 
+const struct attribute_group *ad7879_groups[] = {
+       &ad7879_attr_group,
+       NULL
+};
+EXPORT_SYMBOL_GPL(ad7879_groups);
+
 #ifdef CONFIG_GPIOLIB
 static int ad7879_gpio_direction_input(struct gpio_chip *chip,
                                        unsigned gpio)
@@ -612,10 +618,6 @@ int ad7879_probe(struct device *dev, struct regmap *regmap,
 
        __ad7879_disable(ts);
 
-       err = devm_device_add_group(dev, &ad7879_attr_group);
-       if (err)
-               return err;
-
        err = ad7879_gpio_add(ts);
        if (err)
                return err;
index ae8aa1428e56b7de7ca2af89956ed7b677dd6ba0..d71a8e787290002037833732a3a410f71511a1a1 100644 (file)
@@ -8,11 +8,14 @@
 #ifndef _AD7879_H_
 #define _AD7879_H_
 
+#include <linux/pm.h>
 #include <linux/types.h>
 
+struct attribute_group;
 struct device;
 struct regmap;
 
+extern const struct attribute_group *ad7879_groups[];
 extern const struct dev_pm_ops ad7879_pm_ops;
 
 int ad7879_probe(struct device *dev, struct regmap *regmap,
index faea40dd66d01862f22e6fc50a95e4c044abfbb1..d2bbb436a77df927b95c1d9494beaf3d2bf6abf6 100644 (file)
@@ -625,15 +625,12 @@ static ssize_t ads7846_disable_store(struct device *dev,
 
 static DEVICE_ATTR(disable, 0664, ads7846_disable_show, ads7846_disable_store);
 
-static struct attribute *ads784x_attributes[] = {
+static struct attribute *ads784x_attrs[] = {
        &dev_attr_pen_down.attr,
        &dev_attr_disable.attr,
        NULL,
 };
-
-static const struct attribute_group ads784x_attr_group = {
-       .attrs = ads784x_attributes,
-};
+ATTRIBUTE_GROUPS(ads784x);
 
 /*--------------------------------------------------------------------------*/
 
@@ -1357,10 +1354,6 @@ static int ads7846_probe(struct spi_device *spi)
        else
                (void) ads7846_read12_ser(dev, READ_12BIT_SER(vaux));
 
-       err = devm_device_add_group(dev, &ads784x_attr_group);
-       if (err)
-               return err;
-
        err = input_register_device(input_dev);
        if (err)
                return err;
@@ -1386,9 +1379,10 @@ static void ads7846_remove(struct spi_device *spi)
 
 static struct spi_driver ads7846_driver = {
        .driver = {
-               .name   = "ads7846",
-               .pm     = pm_sleep_ptr(&ads7846_pm),
-               .of_match_table = ads7846_dt_ids,
+               .name           = "ads7846",
+               .dev_groups     = ads784x_groups,
+               .pm             = pm_sleep_ptr(&ads7846_pm),
+               .of_match_table = ads7846_dt_ids,
        },
        .probe          = ads7846_probe,
        .remove         = ads7846_remove,
index db5a885ecd728573edc566eb2d57f10d26e7e33a..68527ede5c0eae1393fdec5ce76150fb92ac7675 100644 (file)
@@ -207,7 +207,7 @@ struct cyttsp5 {
        int num_prv_rec;
        struct regmap *regmap;
        struct touchscreen_properties prop;
-       struct regulator *vdd;
+       struct regulator_bulk_data supplies[2];
 };
 
 /*
@@ -817,7 +817,7 @@ static void cyttsp5_cleanup(void *data)
 {
        struct cyttsp5 *ts = data;
 
-       regulator_disable(ts->vdd);
+       regulator_bulk_disable(ARRAY_SIZE(ts->supplies), ts->supplies);
 }
 
 static int cyttsp5_probe(struct device *dev, struct regmap *regmap, int irq,
@@ -840,9 +840,12 @@ static int cyttsp5_probe(struct device *dev, struct regmap *regmap, int irq,
        init_completion(&ts->cmd_done);
 
        /* Power up the device */
-       ts->vdd = devm_regulator_get(dev, "vdd");
-       if (IS_ERR(ts->vdd)) {
-               error = PTR_ERR(ts->vdd);
+       ts->supplies[0].supply = "vdd";
+       ts->supplies[1].supply = "vddio";
+       error = devm_regulator_bulk_get(dev, ARRAY_SIZE(ts->supplies),
+                                       ts->supplies);
+       if (error) {
+               dev_err(ts->dev, "Failed to get regulators, error %d\n", error);
                return error;
        }
 
@@ -850,9 +853,11 @@ static int cyttsp5_probe(struct device *dev, struct regmap *regmap, int irq,
        if (error)
                return error;
 
-       error = regulator_enable(ts->vdd);
-       if (error)
+       error = regulator_bulk_enable(ARRAY_SIZE(ts->supplies), ts->supplies);
+       if (error) {
+               dev_err(ts->dev, "Failed to enable regulators, error %d\n", error);
                return error;
+       }
 
        ts->input = devm_input_allocate_device(dev);
        if (!ts->input) {
index f91d0e02ddaee1ddc01eecdad1d61372cbd6fd07..d71690ce64633ff90f83cd2875439e3911718aec 100644 (file)
@@ -311,7 +311,7 @@ err_free_mem:
        return error;
 }
 
-static int  da9052_ts_remove(struct platform_device *pdev)
+static void da9052_ts_remove(struct platform_device *pdev)
 {
        struct da9052_tsi *tsi = platform_get_drvdata(pdev);
 
@@ -322,13 +322,11 @@ static int  da9052_ts_remove(struct platform_device *pdev)
 
        input_unregister_device(tsi->dev);
        kfree(tsi);
-
-       return 0;
 }
 
 static struct platform_driver da9052_tsi_driver = {
        .probe  = da9052_ts_probe,
-       .remove = da9052_ts_remove,
+       .remove_new = da9052_ts_remove,
        .driver = {
                .name   = "da9052-tsi",
        },
index 457d53337fbb399191efd37f7f822a53e204b981..3e102bcc4a1c7120428ca4c8a40d396797ae9980 100644 (file)
@@ -580,10 +580,7 @@ static struct attribute *edt_ft5x06_attrs[] = {
        &dev_attr_crc_errors.attr,
        NULL
 };
-
-static const struct attribute_group edt_ft5x06_attr_group = {
-       .attrs = edt_ft5x06_attrs,
-};
+ATTRIBUTE_GROUPS(edt_ft5x06);
 
 static void edt_ft5x06_restore_reg_parameters(struct edt_ft5x06_ts_data *tsdata)
 {
@@ -1330,10 +1327,6 @@ static int edt_ft5x06_ts_probe(struct i2c_client *client)
                return error;
        }
 
-       error = devm_device_add_group(&client->dev, &edt_ft5x06_attr_group);
-       if (error)
-               return error;
-
        error = input_register_device(input);
        if (error)
                return error;
@@ -1502,6 +1495,7 @@ MODULE_DEVICE_TABLE(of, edt_ft5x06_of_match);
 static struct i2c_driver edt_ft5x06_ts_driver = {
        .driver = {
                .name = "edt_ft5x06",
+               .dev_groups = edt_ft5x06_groups,
                .of_match_table = edt_ft5x06_of_match,
                .pm = pm_sleep_ptr(&edt_ft5x06_ts_pm_ops),
                .probe_type = PROBE_PREFER_ASYNCHRONOUS,
index a1af3de9f3109ed6c6887d6b6575a33facf44669..365765d40e627880df9753cc27771345ff31a790 100644 (file)
@@ -1299,7 +1299,7 @@ static ELANTS_VERSION_ATTR(solution_version);
 static ELANTS_VERSION_ATTR(bc_version);
 static ELANTS_VERSION_ATTR(iap_version);
 
-static struct attribute *elants_attributes[] = {
+static struct attribute *elants_i2c_attrs[] = {
        &dev_attr_calibrate.attr,
        &dev_attr_update_fw.attr,
        &dev_attr_iap_mode.attr,
@@ -1313,10 +1313,7 @@ static struct attribute *elants_attributes[] = {
        &elants_ver_attr_iap_version.dattr.attr,
        NULL
 };
-
-static const struct attribute_group elants_attribute_group = {
-       .attrs = elants_attributes,
-};
+ATTRIBUTE_GROUPS(elants_i2c);
 
 static int elants_i2c_power_on(struct elants_data *ts)
 {
@@ -1552,13 +1549,6 @@ static int elants_i2c_probe(struct i2c_client *client)
                return error;
        }
 
-       error = devm_device_add_group(&client->dev, &elants_attribute_group);
-       if (error) {
-               dev_err(&client->dev, "failed to create sysfs attributes: %d\n",
-                       error);
-               return error;
-       }
-
        return 0;
 }
 
@@ -1667,6 +1657,7 @@ static struct i2c_driver elants_i2c_driver = {
        .id_table = elants_i2c_id,
        .driver = {
                .name = DEVICE_NAME,
+               .dev_groups = elants_i2c_groups,
                .pm = pm_sleep_ptr(&elants_i2c_pm_ops),
                .acpi_match_table = ACPI_PTR(elants_acpi_id),
                .of_match_table = of_match_ptr(elants_of_match),
index 4c0d99aae9e0346be665ac679c17925e2291e332..a4030cc9ff60d7031e4f988f7446491dafcfc0ea 100644 (file)
@@ -325,16 +325,13 @@ static ssize_t type_show(struct device *dev,
 }
 static DEVICE_ATTR_RO(type);
 
-static struct attribute *sysfs_attrs[] = {
+static struct attribute *exc3000_attrs[] = {
        &dev_attr_fw_version.attr,
        &dev_attr_model.attr,
        &dev_attr_type.attr,
        NULL
 };
-
-static struct attribute_group exc3000_attribute_group = {
-       .attrs = sysfs_attrs
-};
+ATTRIBUTE_GROUPS(exc3000);
 
 static int exc3000_probe(struct i2c_client *client)
 {
@@ -437,10 +434,6 @@ static int exc3000_probe(struct i2c_client *client)
 
        i2c_set_clientdata(client, data);
 
-       error = devm_device_add_group(&client->dev, &exc3000_attribute_group);
-       if (error)
-               return error;
-
        return 0;
 }
 
@@ -473,6 +466,7 @@ MODULE_DEVICE_TABLE(acpi, exc3000_acpi_match);
 static struct i2c_driver exc3000_driver = {
        .driver = {
                .name   = "exc3000",
+               .dev_groups = exc3000_groups,
                .of_match_table = of_match_ptr(exc3000_of_match),
                .acpi_match_table = ACPI_PTR(exc3000_acpi_match),
        },
index 404153338df718d2d62874bc02c9b8948396cd2a..0f58258306bfc594e84625f51e56cf7cd963615d 100644 (file)
@@ -954,16 +954,13 @@ static DEVICE_ATTR(version, 0664, hideep_fw_version_show, NULL);
 static DEVICE_ATTR(product_id, 0664, hideep_product_id_show, NULL);
 static DEVICE_ATTR(update_fw, 0664, NULL, hideep_update_fw);
 
-static struct attribute *hideep_ts_sysfs_entries[] = {
+static struct attribute *hideep_ts_attrs[] = {
        &dev_attr_version.attr,
        &dev_attr_product_id.attr,
        &dev_attr_update_fw.attr,
        NULL,
 };
-
-static const struct attribute_group hideep_ts_attr_group = {
-       .attrs = hideep_ts_sysfs_entries,
-};
+ATTRIBUTE_GROUPS(hideep_ts);
 
 static void hideep_set_work_mode(struct hideep_ts *ts)
 {
@@ -1096,13 +1093,6 @@ static int hideep_probe(struct i2c_client *client)
                return error;
        }
 
-       error = devm_device_add_group(&client->dev, &hideep_ts_attr_group);
-       if (error) {
-               dev_err(&client->dev,
-                       "failed to add sysfs attributes: %d\n", error);
-               return error;
-       }
-
        return 0;
 }
 
@@ -1131,6 +1121,7 @@ MODULE_DEVICE_TABLE(of, hideep_match_table);
 static struct i2c_driver hideep_driver = {
        .driver = {
                .name                   = HIDEEP_I2C_NAME,
+               .dev_groups             = hideep_ts_groups,
                .of_match_table         = of_match_ptr(hideep_match_table),
                .acpi_match_table       = ACPI_PTR(hideep_acpi_id),
                .pm                     = pm_sleep_ptr(&hideep_pm_ops),
index 2450cfa14de9dfe67657657399588241d4dfc636..d0f257989fd6b728091ff3ee1add1d00777507de 100644 (file)
@@ -274,10 +274,7 @@ static struct attribute *hycon_hy46xx_attrs[] = {
        &hycon_hy46xx_attr_bootloader_version.dattr.attr,
        NULL
 };
-
-static const struct attribute_group hycon_hy46xx_attr_group = {
-       .attrs = hycon_hy46xx_attrs,
-};
+ATTRIBUTE_GROUPS(hycon_hy46xx);
 
 static void hycon_hy46xx_get_defaults(struct device *dev, struct hycon_hy46xx_data *tsdata)
 {
@@ -535,10 +532,6 @@ static int hycon_hy46xx_probe(struct i2c_client *client)
                return error;
        }
 
-       error = devm_device_add_group(&client->dev, &hycon_hy46xx_attr_group);
-       if (error)
-               return error;
-
        error = input_register_device(input);
        if (error)
                return error;
@@ -576,6 +569,7 @@ MODULE_DEVICE_TABLE(of, hycon_hy46xx_of_match);
 static struct i2c_driver hycon_hy46xx_driver = {
        .driver = {
                .name = "hycon_hy46xx",
+               .dev_groups = hycon_hy46xx_groups,
                .of_match_table = hycon_hy46xx_of_match,
                .probe_type = PROBE_PREFER_ASYNCHRONOUS,
        },
index ad6828e4f2e2df579b3f084c17405324eee662d4..31ffdc2a93f35aaf72bb67a3931f5cc0441ab3f7 100644 (file)
@@ -876,7 +876,7 @@ exit:
 
 static DEVICE_ATTR(firmware_update, 0200, NULL, ili210x_firmware_update_store);
 
-static struct attribute *ili210x_attributes[] = {
+static struct attribute *ili210x_attrs[] = {
        &dev_attr_calibrate.attr,
        &dev_attr_firmware_update.attr,
        &dev_attr_firmware_version.attr,
@@ -904,10 +904,11 @@ static umode_t ili210x_attributes_visible(struct kobject *kobj,
        return attr->mode;
 }
 
-static const struct attribute_group ili210x_attr_group = {
-       .attrs = ili210x_attributes,
+static const struct attribute_group ili210x_group = {
+       .attrs = ili210x_attrs,
        .is_visible = ili210x_attributes_visible,
 };
+__ATTRIBUTE_GROUPS(ili210x);
 
 static void ili210x_power_down(void *data)
 {
@@ -1013,13 +1014,6 @@ static int ili210x_i2c_probe(struct i2c_client *client)
        if (error)
                return error;
 
-       error = devm_device_add_group(dev, &ili210x_attr_group);
-       if (error) {
-               dev_err(dev, "Unable to create sysfs attributes, err: %d\n",
-                       error);
-               return error;
-       }
-
        error = input_register_device(priv->input);
        if (error) {
                dev_err(dev, "Cannot register input device, err: %d\n", error);
@@ -1050,6 +1044,7 @@ MODULE_DEVICE_TABLE(of, ili210x_dt_ids);
 static struct i2c_driver ili210x_ts_driver = {
        .driver = {
                .name = "ili210x_i2c",
+               .dev_groups = ili210x_groups,
                .of_match_table = ili210x_dt_ids,
        },
        .id_table = ili210x_i2c_id,
index 2f872e95fbbadee2df4304c30120a04613bfada3..90c4934e750a3a3dda59e0688388f8d1f0dcf4b4 100644 (file)
@@ -537,10 +537,7 @@ static struct attribute *ilitek_sysfs_attrs[] = {
        &dev_attr_product_id.attr,
        NULL
 };
-
-static struct attribute_group ilitek_attrs_group = {
-       .attrs = ilitek_sysfs_attrs,
-};
+ATTRIBUTE_GROUPS(ilitek_sysfs);
 
 static int ilitek_ts_i2c_probe(struct i2c_client *client)
 {
@@ -595,12 +592,6 @@ static int ilitek_ts_i2c_probe(struct i2c_client *client)
                return error;
        }
 
-       error = devm_device_add_group(dev, &ilitek_attrs_group);
-       if (error) {
-               dev_err(dev, "sysfs create group failed: %d\n", error);
-               return error;
-       }
-
        return 0;
 }
 
@@ -675,6 +666,7 @@ MODULE_DEVICE_TABLE(of, ilitek_ts_i2c_match);
 static struct i2c_driver ilitek_ts_i2c_driver = {
        .driver = {
                .name = ILITEK_TS_NAME,
+               .dev_groups = ilitek_sysfs_groups,
                .pm = pm_sleep_ptr(&ilitek_pm_ops),
                .of_match_table = of_match_ptr(ilitek_ts_i2c_match),
                .acpi_match_table = ACPI_PTR(ilitekts_acpi_id),
index b4768b66eb1012d6c669d7eaeb8848e1f91fc969..a3f4fb85bee58bf1e5f73861bd59637eed4e565f 100644 (file)
@@ -974,10 +974,11 @@ static umode_t iqs5xx_attr_is_visible(struct kobject *kobj,
        return attr->mode;
 }
 
-static const struct attribute_group iqs5xx_attr_group = {
+static const struct attribute_group iqs5xx_group = {
        .is_visible = iqs5xx_attr_is_visible,
        .attrs = iqs5xx_attrs,
 };
+__ATTRIBUTE_GROUPS(iqs5xx);
 
 static int iqs5xx_suspend(struct device *dev)
 {
@@ -1053,12 +1054,6 @@ static int iqs5xx_probe(struct i2c_client *client)
                return error;
        }
 
-       error = devm_device_add_group(&client->dev, &iqs5xx_attr_group);
-       if (error) {
-               dev_err(&client->dev, "Failed to add attributes: %d\n", error);
-               return error;
-       }
-
        if (iqs5xx->input) {
                error = input_register_device(iqs5xx->input);
                if (error)
@@ -1089,6 +1084,7 @@ MODULE_DEVICE_TABLE(of, iqs5xx_of_match);
 static struct i2c_driver iqs5xx_i2c_driver = {
        .driver = {
                .name           = "iqs5xx",
+               .dev_groups     = iqs5xx_groups,
                .of_match_table = iqs5xx_of_match,
                .pm             = pm_sleep_ptr(&iqs5xx_pm),
        },
index 85b95ed461e7d0c21cee81f069b86d2865d6b9c4..bfbebe245040c1d370c6a393161af1320ae8273f 100644 (file)
@@ -252,18 +252,16 @@ static int mainstone_wm97xx_probe(struct platform_device *pdev)
        return wm97xx_register_mach_ops(wm, &mainstone_mach_ops);
 }
 
-static int mainstone_wm97xx_remove(struct platform_device *pdev)
+static void mainstone_wm97xx_remove(struct platform_device *pdev)
 {
        struct wm97xx *wm = platform_get_drvdata(pdev);
 
        wm97xx_unregister_mach_ops(wm);
-
-       return 0;
 }
 
 static struct platform_driver mainstone_wm97xx_driver = {
        .probe  = mainstone_wm97xx_probe,
-       .remove = mainstone_wm97xx_remove,
+       .remove_new = mainstone_wm97xx_remove,
        .driver = {
                .name   = "wm97xx-touch",
        },
index ae0d978c83bfa4754eb5b6e9901da6f9622ca578..cbcd6e34efb7d14eb30d7b4e9de0f42116d3ce59 100644 (file)
@@ -217,18 +217,16 @@ err_free_mem:
        return ret;
 }
 
-static int mc13783_ts_remove(struct platform_device *pdev)
+static void mc13783_ts_remove(struct platform_device *pdev)
 {
        struct mc13783_ts_priv *priv = platform_get_drvdata(pdev);
 
        input_unregister_device(priv->idev);
        kfree(priv);
-
-       return 0;
 }
 
 static struct platform_driver mc13783_ts_driver = {
-       .remove         = mc13783_ts_remove,
+       .remove_new     = mc13783_ts_remove,
        .driver         = {
                .name   = MC13783_TS_NAME,
        },
index 2ac4483fbc2588b552ef9c76f40caa7f51978036..aa325486f61825e2e30d36a432fffafd8ddb2cbb 100644 (file)
@@ -1419,10 +1419,7 @@ static struct attribute *mip4_attrs[] = {
        &dev_attr_update_fw.attr,
        NULL,
 };
-
-static const struct attribute_group mip4_attr_group = {
-       .attrs = mip4_attrs,
-};
+ATTRIBUTE_GROUPS(mip4);
 
 static int mip4_probe(struct i2c_client *client)
 {
@@ -1514,13 +1511,6 @@ static int mip4_probe(struct i2c_client *client)
                return error;
        }
 
-       error = devm_device_add_group(&client->dev, &mip4_attr_group);
-       if (error) {
-               dev_err(&client->dev,
-                       "Failed to create sysfs attribute group: %d\n", error);
-               return error;
-       }
-
        return 0;
 }
 
@@ -1589,6 +1579,7 @@ static struct i2c_driver mip4_driver = {
        .probe = mip4_probe,
        .driver = {
                .name = MIP4_DEVICE_NAME,
+               .dev_groups = mip4_groups,
                .of_match_table = of_match_ptr(mip4_of_match),
                .acpi_match_table = ACPI_PTR(mip4_acpi_match),
                .pm = pm_sleep_ptr(&mip4_pm_ops),
index b2da0194e02ab1e845a1aae978cdbae277daebca..821245019feaae9b81ffdbddb04f889dfa9b1d55 100644 (file)
@@ -197,7 +197,7 @@ fail:
        return err;
 }
 
-static int pcap_ts_remove(struct platform_device *pdev)
+static void pcap_ts_remove(struct platform_device *pdev)
 {
        struct pcap_ts *pcap_ts = platform_get_drvdata(pdev);
 
@@ -207,8 +207,6 @@ static int pcap_ts_remove(struct platform_device *pdev)
        input_unregister_device(pcap_ts->input);
 
        kfree(pcap_ts);
-
-       return 0;
 }
 
 #ifdef CONFIG_PM
@@ -240,7 +238,7 @@ static const struct dev_pm_ops pcap_ts_pm_ops = {
 
 static struct platform_driver pcap_ts_driver = {
        .probe          = pcap_ts_probe,
-       .remove         = pcap_ts_remove,
+       .remove_new     = pcap_ts_remove,
        .driver         = {
                .name   = "pcap-ts",
                .pm     = PCAP_TS_PM_OPS,
index 78dd3059d585ac21ecfd5d1fe502466b111ed984..13c500e776f6d2c20d307b1ca2e46cfb17abb3a6 100644 (file)
@@ -1004,7 +1004,7 @@ static DEVICE_ATTR(boot_mode, S_IRUGO, raydium_i2c_boot_mode_show, NULL);
 static DEVICE_ATTR(update_fw, S_IWUSR, NULL, raydium_i2c_update_fw_store);
 static DEVICE_ATTR(calibrate, S_IWUSR, NULL, raydium_i2c_calibrate_store);
 
-static struct attribute *raydium_i2c_attributes[] = {
+static struct attribute *raydium_i2c_attrs[] = {
        &dev_attr_update_fw.attr,
        &dev_attr_boot_mode.attr,
        &dev_attr_fw_version.attr,
@@ -1012,10 +1012,7 @@ static struct attribute *raydium_i2c_attributes[] = {
        &dev_attr_calibrate.attr,
        NULL
 };
-
-static const struct attribute_group raydium_i2c_attribute_group = {
-       .attrs = raydium_i2c_attributes,
-};
+ATTRIBUTE_GROUPS(raydium_i2c);
 
 static int raydium_i2c_power_on(struct raydium_data *ts)
 {
@@ -1174,14 +1171,6 @@ static int raydium_i2c_probe(struct i2c_client *client)
                return error;
        }
 
-       error = devm_device_add_group(&client->dev,
-                                  &raydium_i2c_attribute_group);
-       if (error) {
-               dev_err(&client->dev, "failed to create sysfs attributes: %d\n",
-                       error);
-               return error;
-       }
-
        return 0;
 }
 
@@ -1265,6 +1254,7 @@ static struct i2c_driver raydium_i2c_driver = {
        .id_table = raydium_i2c_id,
        .driver = {
                .name = "raydium_ts",
+               .dev_groups = raydium_i2c_groups,
                .pm = pm_sleep_ptr(&raydium_i2c_pm_ops),
                .acpi_match_table = ACPI_PTR(raydium_acpi_id),
                .of_match_table = of_match_ptr(raydium_of_match),
index 240424f06b98fccea7fa18161d9524d644e042c5..4493ad0c93221081a3050e75c35fbc265468082c 100644 (file)
@@ -854,10 +854,7 @@ static struct attribute *rohm_ts_attrs[] = {
        &dev_attr_inv_y.attr,
        NULL,
 };
-
-static const struct attribute_group rohm_ts_attr_group = {
-       .attrs = rohm_ts_attrs,
-};
+ATTRIBUTE_GROUPS(rohm_ts);
 
 static int rohm_ts_device_init(struct i2c_client *client, u8 setup2)
 {
@@ -1164,12 +1161,6 @@ static int rohm_bu21023_i2c_probe(struct i2c_client *client)
                return error;
        }
 
-       error = devm_device_add_group(dev, &rohm_ts_attr_group);
-       if (error) {
-               dev_err(dev, "failed to create sysfs group: %d\n", error);
-               return error;
-       }
-
        return error;
 }
 
@@ -1182,6 +1173,7 @@ MODULE_DEVICE_TABLE(i2c, rohm_bu21023_i2c_id);
 static struct i2c_driver rohm_bu21023_i2c_driver = {
        .driver = {
                .name = BU21023_NAME,
+               .dev_groups = rohm_ts_groups,
        },
        .probe = rohm_bu21023_i2c_probe,
        .id_table = rohm_bu21023_i2c_id,
index 998d99d189111f8e1671a28167b517e8b5157575..149cc2c4925e91f3eb964c1ce193733e69828abd 100644 (file)
@@ -286,10 +286,7 @@ static struct attribute *s6sy761_sysfs_attrs[] = {
        &dev_attr_devid.attr,
        NULL
 };
-
-static struct attribute_group s6sy761_attribute_group = {
-       .attrs = s6sy761_sysfs_attrs
-};
+ATTRIBUTE_GROUPS(s6sy761_sysfs);
 
 static int s6sy761_power_on(struct s6sy761_data *sdata)
 {
@@ -465,10 +462,6 @@ static int s6sy761_probe(struct i2c_client *client)
        if (err)
                return err;
 
-       err = devm_device_add_group(&client->dev, &s6sy761_attribute_group);
-       if (err)
-               return err;
-
        pm_runtime_enable(&client->dev);
 
        return 0;
@@ -535,6 +528,7 @@ MODULE_DEVICE_TABLE(i2c, s6sy761_id);
 static struct i2c_driver s6sy761_driver = {
        .driver = {
                .name = S6SY761_DEV_NAME,
+               .dev_groups = s6sy761_sysfs_groups,
                .of_match_table = of_match_ptr(s6sy761_of_match),
                .pm = pm_ptr(&s6sy761_pm_ops),
        },
index 56e371fd88fa1082029cb7432def580dd6a3379b..85010fa079082f4085637065b9d0ef2682af567b 100644 (file)
@@ -517,10 +517,7 @@ static struct attribute *stmfts_sysfs_attrs[] = {
        &dev_attr_hover_enable.attr,
        NULL
 };
-
-static struct attribute_group stmfts_attribute_group = {
-       .attrs = stmfts_sysfs_attrs
-};
+ATTRIBUTE_GROUPS(stmfts_sysfs);
 
 static int stmfts_power_on(struct stmfts_data *sdata)
 {
@@ -727,10 +724,6 @@ static int stmfts_probe(struct i2c_client *client)
                }
        }
 
-       err = devm_device_add_group(&client->dev, &stmfts_attribute_group);
-       if (err)
-               return err;
-
        pm_runtime_enable(&client->dev);
        device_enable_async_suspend(&client->dev);
 
@@ -804,6 +797,7 @@ MODULE_DEVICE_TABLE(i2c, stmfts_id);
 static struct i2c_driver stmfts_driver = {
        .driver = {
                .name = STMFTS_DEV_NAME,
+               .dev_groups = stmfts_sysfs_groups,
                .of_match_table = of_match_ptr(stmfts_of_match),
                .pm = pm_ptr(&stmfts_pm_ops),
                .probe_type = PROBE_PREFER_ASYNCHRONOUS,
index 25c45c3a35615c76eb7b1bc1d02483248fbf0bf1..b204fdb2d22c61131155c451962dc10e058eab05 100644 (file)
@@ -350,13 +350,11 @@ static int stmpe_input_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int stmpe_ts_remove(struct platform_device *pdev)
+static void stmpe_ts_remove(struct platform_device *pdev)
 {
        struct stmpe_touch *ts = platform_get_drvdata(pdev);
 
        stmpe_disable(ts->stmpe, STMPE_BLOCK_TOUCHSCREEN);
-
-       return 0;
 }
 
 static struct platform_driver stmpe_ts_driver = {
@@ -364,7 +362,7 @@ static struct platform_driver stmpe_ts_driver = {
                .name = STMPE_TS_NAME,
        },
        .probe = stmpe_input_probe,
-       .remove = stmpe_ts_remove,
+       .remove_new = stmpe_ts_remove,
 };
 module_platform_driver(stmpe_ts_driver);
 
index bb3c6072fc82797536da2f5e346f88aed525228b..92b2b840b4b7b7b6a281041b652f3211cc139327 100644 (file)
@@ -375,7 +375,7 @@ static int sun4i_ts_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int sun4i_ts_remove(struct platform_device *pdev)
+static void sun4i_ts_remove(struct platform_device *pdev)
 {
        struct sun4i_ts_data *ts = platform_get_drvdata(pdev);
 
@@ -385,8 +385,6 @@ static int sun4i_ts_remove(struct platform_device *pdev)
 
        /* Deactivate all IRQs */
        writel(0, ts->base + TP_INT_FIFOC);
-
-       return 0;
 }
 
 static const struct of_device_id sun4i_ts_of_match[] = {
@@ -403,7 +401,7 @@ static struct platform_driver sun4i_ts_driver = {
                .of_match_table = sun4i_ts_of_match,
        },
        .probe  = sun4i_ts_probe,
-       .remove = sun4i_ts_remove,
+       .remove_new = sun4i_ts_remove,
 };
 
 module_platform_driver(sun4i_ts_driver);
index 9aa4e35fb4f5aaf083341f93703457ead0d1deb2..34324f8512ac2d40d15321551ef2dee0cca00d74 100644 (file)
@@ -491,7 +491,7 @@ err_free_mem:
        return err;
 }
 
-static int titsc_remove(struct platform_device *pdev)
+static void titsc_remove(struct platform_device *pdev)
 {
        struct titsc *ts_dev = platform_get_drvdata(pdev);
        u32 steps;
@@ -508,7 +508,6 @@ static int titsc_remove(struct platform_device *pdev)
        input_unregister_device(ts_dev->input);
 
        kfree(ts_dev);
-       return 0;
 }
 
 static int titsc_suspend(struct device *dev)
@@ -552,7 +551,7 @@ MODULE_DEVICE_TABLE(of, ti_tsc_dt_ids);
 
 static struct platform_driver ti_tsc_driver = {
        .probe  = titsc_probe,
-       .remove = titsc_remove,
+       .remove_new = titsc_remove,
        .driver = {
                .name   = "TI-am335x-tsc",
                .pm     = pm_sleep_ptr(&titsc_pm_ops),
index b5e904c5b7c49e8d7e8d2a8ada90950aae58ef39..89c5248f66f6fcbcd6e410f009340f7cd94898e4 100644 (file)
@@ -63,9 +63,10 @@ MODULE_DEVICE_TABLE(of, tsc2004_of_match);
 
 static struct i2c_driver tsc2004_driver = {
        .driver = {
-               .name   = "tsc2004",
-               .of_match_table = of_match_ptr(tsc2004_of_match),
-               .pm     = pm_sleep_ptr(&tsc200x_pm_ops),
+               .name           = "tsc2004",
+               .dev_groups     = tsc200x_groups,
+               .of_match_table = of_match_ptr(tsc2004_of_match),
+               .pm             = pm_sleep_ptr(&tsc200x_pm_ops),
        },
        .id_table       = tsc2004_idtable,
        .probe          = tsc2004_probe,
index b6dfbcfc8c19b21ef59fb5d0fed0256105171db5..1b40ce0ca1b9910c60a38cd207f8d9de5d16fc57 100644 (file)
@@ -79,9 +79,10 @@ MODULE_DEVICE_TABLE(of, tsc2005_of_match);
 
 static struct spi_driver tsc2005_driver = {
        .driver = {
-               .name   = "tsc2005",
-               .of_match_table = of_match_ptr(tsc2005_of_match),
-               .pm     = pm_sleep_ptr(&tsc200x_pm_ops),
+               .name           = "tsc2005",
+               .dev_groups     = tsc200x_groups,
+               .of_match_table = of_match_ptr(tsc2005_of_match),
+               .pm             = pm_sleep_ptr(&tsc200x_pm_ops),
        },
        .probe  = tsc2005_probe,
        .remove = tsc2005_remove,
index b799f26fcf8facbb090105a7a57cf5949ab60d1c..a4c0e9db9bb94d0abb47aadc793d11838033e3c7 100644 (file)
@@ -356,6 +356,12 @@ static const struct attribute_group tsc200x_attr_group = {
        .attrs          = tsc200x_attrs,
 };
 
+const struct attribute_group *tsc200x_groups[] = {
+       &tsc200x_attr_group,
+       NULL
+};
+EXPORT_SYMBOL_GPL(tsc200x_groups);
+
 static void tsc200x_esd_work(struct work_struct *work)
 {
        struct tsc200x *ts = container_of(work, struct tsc200x, esd_work.work);
@@ -553,25 +559,17 @@ int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
                return error;
 
        dev_set_drvdata(dev, ts);
-       error = sysfs_create_group(&dev->kobj, &tsc200x_attr_group);
-       if (error) {
-               dev_err(dev,
-                       "Failed to create sysfs attributes, err: %d\n", error);
-               goto disable_regulator;
-       }
 
        error = input_register_device(ts->idev);
        if (error) {
                dev_err(dev,
                        "Failed to register input device, err: %d\n", error);
-               goto err_remove_sysfs;
+               goto disable_regulator;
        }
 
        irq_set_irq_wake(irq, 1);
        return 0;
 
-err_remove_sysfs:
-       sysfs_remove_group(&dev->kobj, &tsc200x_attr_group);
 disable_regulator:
        regulator_disable(ts->vio);
        return error;
@@ -582,8 +580,6 @@ void tsc200x_remove(struct device *dev)
 {
        struct tsc200x *ts = dev_get_drvdata(dev);
 
-       sysfs_remove_group(&dev->kobj, &tsc200x_attr_group);
-
        regulator_disable(ts->vio);
 }
 EXPORT_SYMBOL_GPL(tsc200x_remove);
index 4ded34425b21eef7bbbb8270eb22c1a2e7e823e6..37de91efd78ea3cae52bb785d52c8517e5a9b653 100644 (file)
@@ -70,6 +70,7 @@
 
 extern const struct regmap_config tsc200x_regmap_config;
 extern const struct dev_pm_ops tsc200x_pm_ops;
+extern const struct attribute_group *tsc200x_groups[];
 
 int tsc200x_probe(struct device *dev, int irq, const struct input_id *tsc_id,
                  struct regmap *regmap,
index cbc4750c53f9a9bdab1a3abd4b6f706d2de94eb2..128341a6696bc01943c1b7eb7eb42ecf93417183 100644 (file)
@@ -944,10 +944,7 @@ static struct attribute *wdt87xx_attrs[] = {
        &dev_attr_update_fw.attr,
        NULL
 };
-
-static const struct attribute_group wdt87xx_attr_group = {
-       .attrs = wdt87xx_attrs,
-};
+ATTRIBUTE_GROUPS(wdt87xx);
 
 static void wdt87xx_report_contact(struct input_dev *input,
                                   struct wdt87xx_sys_param *param,
@@ -1104,12 +1101,6 @@ static int wdt87xx_ts_probe(struct i2c_client *client)
                return error;
        }
 
-       error = devm_device_add_group(&client->dev, &wdt87xx_attr_group);
-       if (error) {
-               dev_err(&client->dev, "create sysfs failed: %d\n", error);
-               return error;
-       }
-
        return 0;
 }
 
@@ -1172,8 +1163,9 @@ static struct i2c_driver wdt87xx_driver = {
        .probe          = wdt87xx_ts_probe,
        .id_table       = wdt87xx_dev_id,
        .driver = {
-               .name   = WDT87XX_NAME,
-               .pm     = pm_sleep_ptr(&wdt87xx_pm_ops),
+               .name = WDT87XX_NAME,
+               .dev_groups = wdt87xx_groups,
+               .pm = pm_sleep_ptr(&wdt87xx_pm_ops),
                .acpi_match_table = ACPI_PTR(wdt87xx_acpi_id),
        },
 };
index 319f57fb9af52805bd143ba25f55e1650cd05c7a..9cee26b63341df1403f5a966c898cccab4e75ff5 100644 (file)
@@ -374,14 +374,12 @@ err_alloc:
        return error;
 }
 
-static int wm831x_ts_remove(struct platform_device *pdev)
+static void wm831x_ts_remove(struct platform_device *pdev)
 {
        struct wm831x_ts *wm831x_ts = platform_get_drvdata(pdev);
 
        free_irq(wm831x_ts->pd_irq, wm831x_ts);
        free_irq(wm831x_ts->data_irq, wm831x_ts);
-
-       return 0;
 }
 
 static struct platform_driver wm831x_ts_driver = {
@@ -389,7 +387,7 @@ static struct platform_driver wm831x_ts_driver = {
                .name = "wm831x-touch",
        },
        .probe = wm831x_ts_probe,
-       .remove = wm831x_ts_remove,
+       .remove_new = wm831x_ts_remove,
 };
 module_platform_driver(wm831x_ts_driver);
 
index ac3b3dd59488ad72b93ffcc6b96123d352f06e37..f01f6cc9b59fa8a98dc389c8a6fc6d7ec8b9513e 100644 (file)
@@ -756,11 +756,9 @@ batt_err:
        return ret;
 }
 
-static int wm97xx_mfd_remove(struct platform_device *pdev)
+static void wm97xx_mfd_remove(struct platform_device *pdev)
 {
        wm97xx_remove(&pdev->dev);
-
-       return 0;
 }
 
 static int wm97xx_suspend(struct device *dev)
@@ -878,7 +876,7 @@ static struct platform_driver wm97xx_mfd_driver = {
                .pm =           pm_sleep_ptr(&wm97xx_pm_ops),
        },
        .probe =        wm97xx_mfd_probe,
-       .remove =       wm97xx_mfd_remove,
+       .remove_new =   wm97xx_mfd_remove,
 };
 
 static int __init wm97xx_init(void)
index ee9e2a2edbf563efa22ad2902ac9d5d23077c073..7673bb82945b6cbf08ee1d4d44c196dc46875c5a 100644 (file)
@@ -95,7 +95,7 @@ config IOMMU_DEBUGFS
 choice
        prompt "IOMMU default domain type"
        depends on IOMMU_API
-       default IOMMU_DEFAULT_DMA_LAZY if X86
+       default IOMMU_DEFAULT_DMA_LAZY if X86 || S390
        default IOMMU_DEFAULT_DMA_STRICT
        help
          Choose the type of IOMMU domain used to manage DMA API usage by
@@ -150,7 +150,7 @@ config OF_IOMMU
 
 # IOMMU-agnostic DMA-mapping layer
 config IOMMU_DMA
-       def_bool ARM64 || X86
+       def_bool ARM64 || X86 || S390
        select DMA_OPS
        select IOMMU_API
        select IOMMU_IOVA
@@ -240,17 +240,6 @@ config SUN50I_IOMMU
        help
          Support for the IOMMU introduced in the Allwinner H6 SoCs.
 
-config TEGRA_IOMMU_GART
-       bool "Tegra GART IOMMU Support"
-       depends on ARCH_TEGRA_2x_SOC
-       depends on TEGRA_MC
-       select IOMMU_API
-       help
-         Enables support for remapping discontiguous physical memory
-         shared with the operating system into contiguous I/O virtual
-         space through the GART (Graphics Address Relocation Table)
-         hardware included on Tegra SoCs.
-
 config TEGRA_IOMMU_SMMU
        bool "NVIDIA Tegra SMMU Support"
        depends on ARCH_TEGRA
index 769e43d780ce89810033064bfd3baa8420889bed..95ad9dbfbda022d60e8a3e203e9177921d74dbd4 100644 (file)
@@ -20,7 +20,6 @@ obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
 obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
 obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
 obj-$(CONFIG_SUN50I_IOMMU) += sun50i-iommu.o
-obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
 obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
 obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
 obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
index 8bd4c3b183ec6e475b58a1990d7b5c33ab141120..443b2c13c37b5979ae574c6c7ab13ac2ce6b60f1 100644 (file)
@@ -23,15 +23,6 @@ config AMD_IOMMU
          your BIOS for an option to enable it or if you have an IVRS ACPI
          table.
 
-config AMD_IOMMU_V2
-       tristate "AMD IOMMU Version 2 driver"
-       depends on AMD_IOMMU
-       select MMU_NOTIFIER
-       help
-         This option enables support for the AMD IOMMUv2 features of the IOMMU
-         hardware. Select this option if you want to use devices that support
-         the PCI PRI and PASID interface.
-
 config AMD_IOMMU_DEBUGFS
        bool "Enable AMD IOMMU internals in DebugFS"
        depends on AMD_IOMMU && IOMMU_DEBUGFS
index 773d8aa002837eebce6d25c3cfea5e9b28bb85c3..f454fbb1569eb9d792338178faf9611ccb870635 100644 (file)
@@ -1,4 +1,3 @@
 # SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o io_pgtable.o io_pgtable_v2.o
 obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o
-obj-$(CONFIG_AMD_IOMMU_V2) += iommu_v2.o
index e2857109e966015e94efb919f28723c8ed151fa4..86be1edd50ee9afe027addc7c0c1fcbb4f536de5 100644 (file)
@@ -38,9 +38,6 @@ extern int amd_iommu_guest_ir;
 extern enum io_pgtable_fmt amd_iommu_pgtable;
 extern int amd_iommu_gpt_level;
 
-/* IOMMUv2 specific functions */
-struct iommu_domain;
-
 bool amd_iommu_v2_supported(void);
 struct amd_iommu *get_amd_iommu(unsigned int idx);
 u8 amd_iommu_pc_get_max_banks(unsigned int idx);
@@ -51,10 +48,10 @@ int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
 int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr,
                         u8 fxn, u64 *value);
 
-int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
-int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
-void amd_iommu_domain_direct_map(struct iommu_domain *dom);
-int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
+/* Device capabilities */
+int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev);
+void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev);
+
 int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid, u64 address);
 void amd_iommu_update_and_flush_device_table(struct protection_domain *domain);
 void amd_iommu_domain_update(struct protection_domain *domain);
@@ -87,9 +84,25 @@ static inline bool is_rd890_iommu(struct pci_dev *pdev)
               (pdev->device == PCI_DEVICE_ID_RD890_IOMMU);
 }
 
-static inline bool iommu_feature(struct amd_iommu *iommu, u64 mask)
+static inline bool check_feature(u64 mask)
+{
+       return (amd_iommu_efr & mask);
+}
+
+static inline bool check_feature2(u64 mask)
+{
+       return (amd_iommu_efr2 & mask);
+}
+
+static inline int check_feature_gpt_level(void)
+{
+       return ((amd_iommu_efr >> FEATURE_GATS_SHIFT) & FEATURE_GATS_MASK);
+}
+
+static inline bool amd_iommu_gt_ppr_supported(void)
 {
-       return !!(iommu->features & mask);
+       return (check_feature(FEATURE_GT) &&
+               check_feature(FEATURE_PPR));
 }
 
 static inline u64 iommu_virt_to_phys(void *vaddr)
@@ -105,7 +118,6 @@ static inline void *iommu_phys_to_virt(unsigned long paddr)
 static inline
 void amd_iommu_domain_set_pt_root(struct protection_domain *domain, u64 root)
 {
-       atomic64_set(&domain->iop.pt_root, root);
        domain->iop.root = (u64 *)(root & PAGE_MASK);
        domain->iop.mode = root & 7; /* lowest 3 bits encode pgtable mode */
 }
@@ -146,8 +158,5 @@ void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
                                  u64 *root, int mode);
 struct dev_table_entry *get_dev_table(struct amd_iommu *iommu);
 
-extern u64 amd_iommu_efr;
-extern u64 amd_iommu_efr2;
-
 extern bool amd_iommu_snp_en;
 #endif
index dec4e5c2b66b8236fcd6faeb8497fdc9b42dfe20..90b7d7950a9efa032f116a448db3c96fb7570004 100644 (file)
 #define PD_IOMMUV2_MASK                BIT(3) /* domain has gcr3 table */
 #define PD_GIOV_MASK           BIT(4) /* domain enable GIOV support */
 
+/* Timeout stuff */
+#define LOOP_TIMEOUT           100000
+#define MMIO_STATUS_TIMEOUT    2000000
+
 extern bool amd_iommu_dump;
 #define DUMP_printk(format, arg...)                            \
        do {                                                    \
@@ -516,19 +520,6 @@ extern struct kmem_cache *amd_iommu_irq_cache;
 #define APERTURE_RANGE_INDEX(a)        ((a) >> APERTURE_RANGE_SHIFT)
 #define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
 
-/*
- * This struct is used to pass information about
- * incoming PPR faults around.
- */
-struct amd_iommu_fault {
-       u64 address;    /* IO virtual address of the fault*/
-       u32 pasid;      /* Address space identifier */
-       u32 sbdf;       /* Originating PCI device id */
-       u16 tag;        /* PPR tag */
-       u16 flags;      /* Fault flags */
-
-};
-
 
 struct amd_iommu;
 struct iommu_domain;
@@ -555,7 +546,6 @@ struct amd_io_pgtable {
        struct io_pgtable       iop;
        int                     mode;
        u64                     *root;
-       atomic64_t              pt_root;        /* pgtable root and pgtable mode */
        u64                     *pgd;           /* v2 pgtable pgd pointer */
 };
 
@@ -688,9 +678,6 @@ struct amd_iommu {
        /* Extended features 2 */
        u64 features2;
 
-       /* IOMMUv2 */
-       bool is_iommu_v2;
-
        /* PCI device id of the IOMMU device */
        u16 devid;
 
@@ -811,6 +798,14 @@ struct devid_map {
        bool cmd_line;
 };
 
+#define AMD_IOMMU_DEVICE_FLAG_ATS_SUP     0x1    /* ATS feature supported */
+#define AMD_IOMMU_DEVICE_FLAG_PRI_SUP     0x2    /* PRI feature supported */
+#define AMD_IOMMU_DEVICE_FLAG_PASID_SUP   0x4    /* PASID context supported */
+/* Device may request execution on memory pages */
+#define AMD_IOMMU_DEVICE_FLAG_EXEC_SUP    0x8
+/* Device may request super-user privileges */
+#define AMD_IOMMU_DEVICE_FLAG_PRIV_SUP   0x10
+
 /*
  * This struct contains device specific data for the IOMMU
  */
@@ -823,13 +818,15 @@ struct iommu_dev_data {
        struct protection_domain *domain; /* Domain the device is bound to */
        struct device *dev;
        u16 devid;                        /* PCI Device ID */
-       bool iommu_v2;                    /* Device can make use of IOMMUv2 */
-       struct {
-               bool enabled;
-               int qdep;
-       } ats;                            /* ATS state */
-       bool pri_tlp;                     /* PASID TLB required for
+
+       u32 flags;                        /* Holds AMD_IOMMU_DEVICE_FLAG_<*> */
+       int ats_qdep;
+       u8 ats_enabled  :1;               /* ATS state */
+       u8 pri_enabled  :1;               /* PRI state */
+       u8 pasid_enabled:1;               /* PASID state */
+       u8 pri_tlp      :1;               /* PASID TLB required for
                                             PPR completions */
+       u8 ppr          :1;               /* Enable device PPR support */
        bool use_vapic;                   /* Enable device to use vapic mode */
        bool defer_attach;
 
@@ -896,16 +893,15 @@ extern unsigned amd_iommu_aperture_order;
 /* allocation bitmap for domain ids */
 extern unsigned long *amd_iommu_pd_alloc_bitmap;
 
-/* Smallest max PASID supported by any IOMMU in the system */
-extern u32 amd_iommu_max_pasid;
-
-extern bool amd_iommu_v2_present;
-
 extern bool amd_iommu_force_isolation;
 
 /* Max levels of glxval supported */
 extern int amd_iommu_max_glx_val;
 
+/* Global EFR and EFR2 registers */
+extern u64 amd_iommu_efr;
+extern u64 amd_iommu_efr2;
+
 /*
  * This function flushes all internal caches of
  * the IOMMU used by this driver.
index 45efb7e5d725460b39de534c67b5fb5be0d31d1d..64bcf3df37ee5e3813f2bc71d19c6c18e9540cd2 100644 (file)
@@ -83,8 +83,6 @@
 #define ACPI_DEVFLAG_LINT1              0x80
 #define ACPI_DEVFLAG_ATSDIS             0x10000000
 
-#define LOOP_TIMEOUT   2000000
-
 #define IVRS_GET_SBDF_ID(seg, bus, dev, fn)    (((seg & 0xffff) << 16) | ((bus & 0xff) << 8) \
                                                 | ((dev & 0x1f) << 3) | (fn & 0x7))
 
@@ -187,9 +185,6 @@ static int amd_iommus_present;
 bool amd_iommu_np_cache __read_mostly;
 bool amd_iommu_iotlb_sup __read_mostly = true;
 
-u32 amd_iommu_max_pasid __read_mostly = ~0;
-
-bool amd_iommu_v2_present __read_mostly;
 static bool amd_iommu_pc_present __read_mostly;
 bool amdr_ivrs_remap_support __read_mostly;
 
@@ -272,7 +267,7 @@ int amd_iommu_get_num_iommus(void)
  * Iterate through all the IOMMUs to get common EFR
  * masks among all IOMMUs and warn if found inconsistency.
  */
-static void get_global_efr(void)
+static __init void get_global_efr(void)
 {
        struct amd_iommu *iommu;
 
@@ -304,16 +299,6 @@ static void get_global_efr(void)
        pr_info("Using global IVHD EFR:%#llx, EFR2:%#llx\n", amd_iommu_efr, amd_iommu_efr2);
 }
 
-static bool check_feature_on_all_iommus(u64 mask)
-{
-       return !!(amd_iommu_efr & mask);
-}
-
-static inline int check_feature_gpt_level(void)
-{
-       return ((amd_iommu_efr >> FEATURE_GATS_SHIFT) & FEATURE_GATS_MASK);
-}
-
 /*
  * For IVHD type 0x11/0x40, EFR is also available via IVHD.
  * Default to IVHD EFR since it is available sooner
@@ -399,7 +384,7 @@ static void iommu_set_cwwb_range(struct amd_iommu *iommu)
        u64 start = iommu_virt_to_phys((void *)iommu->cmd_sem);
        u64 entry = start & PM_ADDR_MASK;
 
-       if (!check_feature_on_all_iommus(FEATURE_SNP))
+       if (!check_feature(FEATURE_SNP))
                return;
 
        /* Note:
@@ -869,7 +854,7 @@ static void *__init iommu_alloc_4k_pages(struct amd_iommu *iommu,
        void *buf = (void *)__get_free_pages(gfp, order);
 
        if (buf &&
-           check_feature_on_all_iommus(FEATURE_SNP) &&
+           check_feature(FEATURE_SNP) &&
            set_memory_4k((unsigned long)buf, (1 << order))) {
                free_pages((unsigned long)buf, order);
                buf = NULL;
@@ -985,14 +970,14 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
        iommu_feature_enable(iommu, CONTROL_GAINT_EN);
        iommu_feature_enable(iommu, CONTROL_GALOG_EN);
 
-       for (i = 0; i < LOOP_TIMEOUT; ++i) {
+       for (i = 0; i < MMIO_STATUS_TIMEOUT; ++i) {
                status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
                if (status & (MMIO_STATUS_GALOG_RUN_MASK))
                        break;
                udelay(10);
        }
 
-       if (WARN_ON(i >= LOOP_TIMEOUT))
+       if (WARN_ON(i >= MMIO_STATUS_TIMEOUT))
                return -EINVAL;
 
        return 0;
@@ -1048,7 +1033,7 @@ static void iommu_enable_xt(struct amd_iommu *iommu)
 
 static void iommu_enable_gt(struct amd_iommu *iommu)
 {
-       if (!iommu_feature(iommu, FEATURE_GT))
+       if (!check_feature(FEATURE_GT))
                return;
 
        iommu_feature_enable(iommu, CONTROL_GT_EN);
@@ -1987,7 +1972,7 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
        u64 val;
        struct pci_dev *pdev = iommu->dev;
 
-       if (!iommu_feature(iommu, FEATURE_PC))
+       if (!check_feature(FEATURE_PC))
                return;
 
        amd_iommu_pc_present = true;
@@ -2014,8 +1999,7 @@ static ssize_t amd_iommu_show_features(struct device *dev,
                                       struct device_attribute *attr,
                                       char *buf)
 {
-       struct amd_iommu *iommu = dev_to_amd_iommu(dev);
-       return sysfs_emit(buf, "%llx:%llx\n", iommu->features2, iommu->features);
+       return sysfs_emit(buf, "%llx:%llx\n", amd_iommu_efr, amd_iommu_efr2);
 }
 static DEVICE_ATTR(features, S_IRUGO, amd_iommu_show_features, NULL);
 
@@ -2051,9 +2035,9 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu)
        features = readq(iommu->mmio_base + MMIO_EXT_FEATURES);
        features2 = readq(iommu->mmio_base + MMIO_EXT_FEATURES2);
 
-       if (!iommu->features) {
-               iommu->features = features;
-               iommu->features2 = features2;
+       if (!amd_iommu_efr) {
+               amd_iommu_efr = features;
+               amd_iommu_efr2 = features2;
                return;
        }
 
@@ -2061,12 +2045,12 @@ static void __init late_iommu_features_init(struct amd_iommu *iommu)
         * Sanity check and warn if EFR values from
         * IVHD and MMIO conflict.
         */
-       if (features != iommu->features ||
-           features2 != iommu->features2) {
+       if (features != amd_iommu_efr ||
+           features2 != amd_iommu_efr2) {
                pr_warn(FW_WARN
                        "EFR mismatch. Use IVHD EFR (%#llx : %#llx), EFR2 (%#llx : %#llx).\n",
-                       features, iommu->features,
-                       features2, iommu->features2);
+                       features, amd_iommu_efr,
+                       features2, amd_iommu_efr2);
        }
 }
 
@@ -2092,20 +2076,17 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
 
        late_iommu_features_init(iommu);
 
-       if (iommu_feature(iommu, FEATURE_GT)) {
+       if (check_feature(FEATURE_GT)) {
                int glxval;
-               u32 max_pasid;
                u64 pasmax;
 
-               pasmax = iommu->features & FEATURE_PASID_MASK;
+               pasmax = amd_iommu_efr & FEATURE_PASID_MASK;
                pasmax >>= FEATURE_PASID_SHIFT;
-               max_pasid  = (1 << (pasmax + 1)) - 1;
+               iommu->iommu.max_pasids = (1 << (pasmax + 1)) - 1;
 
-               amd_iommu_max_pasid = min(amd_iommu_max_pasid, max_pasid);
+               BUG_ON(iommu->iommu.max_pasids & ~PASID_MASK);
 
-               BUG_ON(amd_iommu_max_pasid & ~PASID_MASK);
-
-               glxval   = iommu->features & FEATURE_GLXVAL_MASK;
+               glxval   = amd_iommu_efr & FEATURE_GLXVAL_MASK;
                glxval >>= FEATURE_GLXVAL_SHIFT;
 
                if (amd_iommu_max_glx_val == -1)
@@ -2114,13 +2095,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
                        amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
        }
 
-       if (iommu_feature(iommu, FEATURE_GT) &&
-           iommu_feature(iommu, FEATURE_PPR)) {
-               iommu->is_iommu_v2   = true;
-               amd_iommu_v2_present = true;
-       }
-
-       if (iommu_feature(iommu, FEATURE_PPR) && alloc_ppr_log(iommu))
+       if (check_feature(FEATURE_PPR) && alloc_ppr_log(iommu))
                return -ENOMEM;
 
        if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE)) {
@@ -2132,13 +2107,10 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
        init_iommu_perf_ctr(iommu);
 
        if (amd_iommu_pgtable == AMD_IOMMU_V2) {
-               if (!iommu_feature(iommu, FEATURE_GIOSUP) ||
-                   !iommu_feature(iommu, FEATURE_GT)) {
+               if (!check_feature(FEATURE_GIOSUP) ||
+                   !check_feature(FEATURE_GT)) {
                        pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n");
                        amd_iommu_pgtable = AMD_IOMMU_V1;
-               } else if (iommu_default_passthrough()) {
-                       pr_warn("V2 page table doesn't support passthrough mode. Fallback to v1.\n");
-                       amd_iommu_pgtable = AMD_IOMMU_V1;
                }
        }
 
@@ -2186,35 +2158,29 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
 
 static void print_iommu_info(void)
 {
+       int i;
        static const char * const feat_str[] = {
                "PreF", "PPR", "X2APIC", "NX", "GT", "[5]",
                "IA", "GA", "HE", "PC"
        };
-       struct amd_iommu *iommu;
-
-       for_each_iommu(iommu) {
-               struct pci_dev *pdev = iommu->dev;
-               int i;
 
-               pci_info(pdev, "Found IOMMU cap 0x%x\n", iommu->cap_ptr);
+       if (amd_iommu_efr) {
+               pr_info("Extended features (%#llx, %#llx):", amd_iommu_efr, amd_iommu_efr2);
 
-               if (iommu->cap & (1 << IOMMU_CAP_EFR)) {
-                       pr_info("Extended features (%#llx, %#llx):", iommu->features, iommu->features2);
-
-                       for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
-                               if (iommu_feature(iommu, (1ULL << i)))
-                                       pr_cont(" %s", feat_str[i]);
-                       }
+               for (i = 0; i < ARRAY_SIZE(feat_str); ++i) {
+                       if (check_feature(1ULL << i))
+                               pr_cont(" %s", feat_str[i]);
+               }
 
-                       if (iommu->features & FEATURE_GAM_VAPIC)
-                               pr_cont(" GA_vAPIC");
+               if (check_feature(FEATURE_GAM_VAPIC))
+                       pr_cont(" GA_vAPIC");
 
-                       if (iommu->features & FEATURE_SNP)
-                               pr_cont(" SNP");
+               if (check_feature(FEATURE_SNP))
+                       pr_cont(" SNP");
 
-                       pr_cont("\n");
-               }
+               pr_cont("\n");
        }
+
        if (irq_remapping_enabled) {
                pr_info("Interrupt remapping enabled\n");
                if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
@@ -2900,19 +2866,19 @@ static void enable_iommus_vapic(void)
                 * Need to set and poll check the GALOGRun bit to zero before
                 * we can set/ modify GA Log registers safely.
                 */
-               for (i = 0; i < LOOP_TIMEOUT; ++i) {
+               for (i = 0; i < MMIO_STATUS_TIMEOUT; ++i) {
                        status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
                        if (!(status & MMIO_STATUS_GALOG_RUN_MASK))
                                break;
                        udelay(10);
                }
 
-               if (WARN_ON(i >= LOOP_TIMEOUT))
+               if (WARN_ON(i >= MMIO_STATUS_TIMEOUT))
                        return;
        }
 
        if (AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir) &&
-           !check_feature_on_all_iommus(FEATURE_GAM_VAPIC)) {
+           !check_feature(FEATURE_GAM_VAPIC)) {
                amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
                return;
        }
@@ -3698,9 +3664,8 @@ bool amd_iommu_v2_supported(void)
         * (i.e. EFR[SNPSup]=1), IOMMUv2 page table cannot be used without
         * setting up IOMMUv1 page table.
         */
-       return amd_iommu_v2_present && !amd_iommu_snp_en;
+       return amd_iommu_gt_ppr_supported() && !amd_iommu_snp_en;
 }
-EXPORT_SYMBOL(amd_iommu_v2_supported);
 
 struct amd_iommu *get_amd_iommu(unsigned int idx)
 {
@@ -3824,7 +3789,7 @@ int amd_iommu_snp_enable(void)
                return -EINVAL;
        }
 
-       amd_iommu_snp_en = check_feature_on_all_iommus(FEATURE_SNP);
+       amd_iommu_snp_en = check_feature(FEATURE_SNP);
        if (!amd_iommu_snp_en)
                return -EINVAL;
 
index e9ef2e0a62f670096cc317876180f1d888fd6e07..f818a7e254d42627ebbd2d3154e290263af37421 100644 (file)
@@ -363,10 +363,10 @@ static void v2_free_pgtable(struct io_pgtable *iop)
        if (!(pdom->flags & PD_IOMMUV2_MASK))
                return;
 
-       /*
-        * Make changes visible to IOMMUs. No need to clear gcr3 entry
-        * as gcr3 table is already freed.
-        */
+       /* Clear gcr3 entry */
+       amd_iommu_domain_clear_gcr3(&pdom->domain, 0);
+
+       /* Make changes visible to IOMMUs */
        amd_iommu_domain_update(pdom);
 
        /* Free page table */
index b399c57413784688e69beaa54f2fed5b23d0462c..fcc987f5d4edc3ae87335fceed57eb26d7ba9b00 100644 (file)
@@ -45,8 +45,6 @@
 
 #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28))
 
-#define LOOP_TIMEOUT   100000
-
 /* IO virtual address start page frame number */
 #define IOVA_START_PFN         (1)
 #define IOVA_PFN(addr)         ((addr) >> PAGE_SHIFT)
@@ -68,7 +66,6 @@ LIST_HEAD(acpihid_map);
 const struct iommu_ops amd_iommu_ops;
 const struct iommu_dirty_ops amd_dirty_ops;
 
-static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
 int amd_iommu_max_glx_val = -1;
 
 /*
@@ -81,7 +78,6 @@ struct iommu_cmd {
 struct kmem_cache *amd_iommu_irq_cache;
 
 static void detach_device(struct device *dev);
-static int domain_enable_v2(struct protection_domain *domain, int pasids);
 
 /****************************************************************************
  *
@@ -324,24 +320,141 @@ static struct iommu_group *acpihid_device_group(struct device *dev)
        return entry->group;
 }
 
-static bool pci_iommuv2_capable(struct pci_dev *pdev)
+static inline bool pdev_pasid_supported(struct iommu_dev_data *dev_data)
 {
-       static const int caps[] = {
-               PCI_EXT_CAP_ID_PRI,
-               PCI_EXT_CAP_ID_PASID,
-       };
-       int i, pos;
+       return (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP);
+}
 
-       if (!pci_ats_supported(pdev))
-               return false;
+static u32 pdev_get_caps(struct pci_dev *pdev)
+{
+       int features;
+       u32 flags = 0;
+
+       if (pci_ats_supported(pdev))
+               flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
+
+       if (pci_pri_supported(pdev))
+               flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
+
+       features = pci_pasid_features(pdev);
+       if (features >= 0) {
+               flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
+
+               if (features & PCI_PASID_CAP_EXEC)
+                       flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
 
-       for (i = 0; i < 2; ++i) {
-               pos = pci_find_ext_capability(pdev, caps[i]);
-               if (pos == 0)
-                       return false;
+               if (features & PCI_PASID_CAP_PRIV)
+                       flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
        }
 
-       return true;
+       return flags;
+}
+
+static inline int pdev_enable_cap_ats(struct pci_dev *pdev)
+{
+       struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
+       int ret = -EINVAL;
+
+       if (dev_data->ats_enabled)
+               return 0;
+
+       if (amd_iommu_iotlb_sup &&
+           (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP)) {
+               ret = pci_enable_ats(pdev, PAGE_SHIFT);
+               if (!ret) {
+                       dev_data->ats_enabled = 1;
+                       dev_data->ats_qdep    = pci_ats_queue_depth(pdev);
+               }
+       }
+
+       return ret;
+}
+
+static inline void pdev_disable_cap_ats(struct pci_dev *pdev)
+{
+       struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
+
+       if (dev_data->ats_enabled) {
+               pci_disable_ats(pdev);
+               dev_data->ats_enabled = 0;
+       }
+}
+
+int amd_iommu_pdev_enable_cap_pri(struct pci_dev *pdev)
+{
+       struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
+       int ret = -EINVAL;
+
+       if (dev_data->pri_enabled)
+               return 0;
+
+       if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) {
+               /*
+                * First reset the PRI state of the device.
+                * FIXME: Hardcode number of outstanding requests for now
+                */
+               if (!pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32)) {
+                       dev_data->pri_enabled = 1;
+                       dev_data->pri_tlp     = pci_prg_resp_pasid_required(pdev);
+
+                       ret = 0;
+               }
+       }
+
+       return ret;
+}
+
+void amd_iommu_pdev_disable_cap_pri(struct pci_dev *pdev)
+{
+       struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
+
+       if (dev_data->pri_enabled) {
+               pci_disable_pri(pdev);
+               dev_data->pri_enabled = 0;
+       }
+}
+
+static inline int pdev_enable_cap_pasid(struct pci_dev *pdev)
+{
+       struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
+       int ret = -EINVAL;
+
+       if (dev_data->pasid_enabled)
+               return 0;
+
+       if (dev_data->flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) {
+               /* Only allow access to user-accessible pages */
+               ret = pci_enable_pasid(pdev, 0);
+               if (!ret)
+                       dev_data->pasid_enabled = 1;
+       }
+
+       return ret;
+}
+
+static inline void pdev_disable_cap_pasid(struct pci_dev *pdev)
+{
+       struct iommu_dev_data *dev_data = dev_iommu_priv_get(&pdev->dev);
+
+       if (dev_data->pasid_enabled) {
+               pci_disable_pasid(pdev);
+               dev_data->pasid_enabled = 0;
+       }
+}
+
+static void pdev_enable_caps(struct pci_dev *pdev)
+{
+       pdev_enable_cap_ats(pdev);
+       pdev_enable_cap_pasid(pdev);
+       amd_iommu_pdev_enable_cap_pri(pdev);
+
+}
+
+static void pdev_disable_caps(struct pci_dev *pdev)
+{
+       pdev_disable_cap_ats(pdev);
+       pdev_disable_cap_pasid(pdev);
+       amd_iommu_pdev_disable_cap_pri(pdev);
 }
 
 /*
@@ -401,8 +514,8 @@ static int iommu_init_device(struct amd_iommu *iommu, struct device *dev)
         * it'll be forced to go into translation mode.
         */
        if ((iommu_default_passthrough() || !amd_iommu_force_isolation) &&
-           dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
-               dev_data->iommu_v2 = iommu->is_iommu_v2;
+           dev_is_pci(dev) && amd_iommu_gt_ppr_supported()) {
+               dev_data->flags = pdev_get_caps(to_pci_dev(dev));
        }
 
        dev_iommu_priv_set(dev, dev_data);
@@ -703,24 +816,6 @@ static void iommu_poll_events(struct amd_iommu *iommu)
        writel(head, iommu->mmio_base + MMIO_EVT_HEAD_OFFSET);
 }
 
-static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
-{
-       struct amd_iommu_fault fault;
-
-       if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
-               pr_err_ratelimited("Unknown PPR request received\n");
-               return;
-       }
-
-       fault.address   = raw[1];
-       fault.pasid     = PPR_PASID(raw[0]);
-       fault.sbdf      = PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, PPR_DEVID(raw[0]));
-       fault.tag       = PPR_TAG(raw[0]);
-       fault.flags     = PPR_FLAGS(raw[0]);
-
-       atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
-}
-
 static void iommu_poll_ppr_log(struct amd_iommu *iommu)
 {
        u32 head, tail;
@@ -766,8 +861,7 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
                head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
                writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
 
-               /* Handle PPR entry */
-               iommu_handle_ppr_entry(iommu, entry);
+               /* TODO: PPR Handler will be added when we add IOPF support */
 
                /* Refresh ring-buffer information */
                head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
@@ -1096,7 +1190,7 @@ static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, u32 pasid,
 }
 
 static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, u32 pasid,
-                              int status, int tag, bool gn)
+                              int status, int tag, u8 gn)
 {
        memset(cmd, 0, sizeof(*cmd));
 
@@ -1300,7 +1394,7 @@ static void amd_iommu_flush_irt_all(struct amd_iommu *iommu)
 
 void iommu_flush_all_caches(struct amd_iommu *iommu)
 {
-       if (iommu_feature(iommu, FEATURE_IA)) {
+       if (check_feature(FEATURE_IA)) {
                amd_iommu_flush_all(iommu);
        } else {
                amd_iommu_flush_dte_all(iommu);
@@ -1319,7 +1413,7 @@ static int device_flush_iotlb(struct iommu_dev_data *dev_data,
        struct iommu_cmd cmd;
        int qdep;
 
-       qdep     = dev_data->ats.qdep;
+       qdep     = dev_data->ats_qdep;
        iommu    = rlookup_amd_iommu(dev_data->dev);
        if (!iommu)
                return -EINVAL;
@@ -1370,7 +1464,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
                        return ret;
        }
 
-       if (dev_data->ats.enabled)
+       if (dev_data->ats_enabled)
                ret = device_flush_iotlb(dev_data, 0, ~0UL);
 
        return ret;
@@ -1403,7 +1497,7 @@ static void __domain_flush_pages(struct protection_domain *domain,
 
        list_for_each_entry(dev_data, &domain->dev_list, list) {
 
-               if (!dev_data->ats.enabled)
+               if (!dev_data->ats_enabled)
                        continue;
 
                ret |= device_flush_iotlb(dev_data, address, size);
@@ -1579,6 +1673,42 @@ static void free_gcr3_table(struct protection_domain *domain)
        free_page((unsigned long)domain->gcr3_tbl);
 }
 
+/*
+ * Number of GCR3 table levels required. Level must be 4-Kbyte
+ * page and can contain up to 512 entries.
+ */
+static int get_gcr3_levels(int pasids)
+{
+       int levels;
+
+       if (pasids == -1)
+               return amd_iommu_max_glx_val;
+
+       levels = get_count_order(pasids);
+
+       return levels ? (DIV_ROUND_UP(levels, 9) - 1) : levels;
+}
+
+/* Note: This function expects iommu_domain->lock to be held prior calling the function. */
+static int setup_gcr3_table(struct protection_domain *domain, int pasids)
+{
+       int levels = get_gcr3_levels(pasids);
+
+       if (levels > amd_iommu_max_glx_val)
+               return -EINVAL;
+
+       domain->gcr3_tbl = alloc_pgtable_page(domain->nid, GFP_ATOMIC);
+       if (domain->gcr3_tbl == NULL)
+               return -ENOMEM;
+
+       domain->glx      = levels;
+       domain->flags   |= PD_IOMMUV2_MASK;
+
+       amd_iommu_domain_update(domain);
+
+       return 0;
+}
+
 static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
                          struct protection_domain *domain, bool ats, bool ppr)
 {
@@ -1607,10 +1737,8 @@ static void set_dte_entry(struct amd_iommu *iommu, u16 devid,
        if (ats)
                flags |= DTE_FLAG_IOTLB;
 
-       if (ppr) {
-               if (iommu_feature(iommu, FEATURE_EPHSUP))
-                       pte_root |= 1ULL << DEV_ENTRY_PPR;
-       }
+       if (ppr)
+               pte_root |= 1ULL << DEV_ENTRY_PPR;
 
        if (domain->dirty_tracking)
                pte_root |= DTE_FLAG_HAD;
@@ -1690,7 +1818,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
        iommu = rlookup_amd_iommu(dev_data->dev);
        if (!iommu)
                return;
-       ats   = dev_data->ats.enabled;
+       ats   = dev_data->ats_enabled;
 
        /* Update data structures */
        dev_data->domain = domain;
@@ -1706,7 +1834,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
 
        /* Update device table */
        set_dte_entry(iommu, dev_data->devid, domain,
-                     ats, dev_data->iommu_v2);
+                     ats, dev_data->ppr);
        clone_aliases(iommu, dev_data->dev);
 
        device_flush_dte(dev_data);
@@ -1741,48 +1869,6 @@ static void do_detach(struct iommu_dev_data *dev_data)
        domain->dev_cnt                 -= 1;
 }
 
-static void pdev_iommuv2_disable(struct pci_dev *pdev)
-{
-       pci_disable_ats(pdev);
-       pci_disable_pri(pdev);
-       pci_disable_pasid(pdev);
-}
-
-static int pdev_pri_ats_enable(struct pci_dev *pdev)
-{
-       int ret;
-
-       /* Only allow access to user-accessible pages */
-       ret = pci_enable_pasid(pdev, 0);
-       if (ret)
-               return ret;
-
-       /* First reset the PRI state of the device */
-       ret = pci_reset_pri(pdev);
-       if (ret)
-               goto out_err_pasid;
-
-       /* Enable PRI */
-       /* FIXME: Hardcode number of outstanding requests for now */
-       ret = pci_enable_pri(pdev, 32);
-       if (ret)
-               goto out_err_pasid;
-
-       ret = pci_enable_ats(pdev, PAGE_SHIFT);
-       if (ret)
-               goto out_err_pri;
-
-       return 0;
-
-out_err_pri:
-       pci_disable_pri(pdev);
-
-out_err_pasid:
-       pci_disable_pasid(pdev);
-
-       return ret;
-}
-
 /*
  * If a device is not yet associated with a domain, this function makes the
  * device visible in the domain
@@ -1791,9 +1877,8 @@ static int attach_device(struct device *dev,
                         struct protection_domain *domain)
 {
        struct iommu_dev_data *dev_data;
-       struct pci_dev *pdev;
        unsigned long flags;
-       int ret;
+       int ret = 0;
 
        spin_lock_irqsave(&domain->lock, flags);
 
@@ -1801,45 +1886,13 @@ static int attach_device(struct device *dev,
 
        spin_lock(&dev_data->lock);
 
-       ret = -EBUSY;
-       if (dev_data->domain != NULL)
+       if (dev_data->domain != NULL) {
+               ret = -EBUSY;
                goto out;
-
-       if (!dev_is_pci(dev))
-               goto skip_ats_check;
-
-       pdev = to_pci_dev(dev);
-       if (domain->flags & PD_IOMMUV2_MASK) {
-               struct iommu_domain *def_domain = iommu_get_dma_domain(dev);
-
-               ret = -EINVAL;
-
-               /*
-                * In case of using AMD_IOMMU_V1 page table mode and the device
-                * is enabling for PPR/ATS support (using v2 table),
-                * we need to make sure that the domain type is identity map.
-                */
-               if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
-                   def_domain->type != IOMMU_DOMAIN_IDENTITY) {
-                       goto out;
-               }
-
-               if (dev_data->iommu_v2) {
-                       if (pdev_pri_ats_enable(pdev) != 0)
-                               goto out;
-
-                       dev_data->ats.enabled = true;
-                       dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
-                       dev_data->pri_tlp     = pci_prg_resp_pasid_required(pdev);
-               }
-       } else if (amd_iommu_iotlb_sup &&
-                  pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
-               dev_data->ats.enabled = true;
-               dev_data->ats.qdep    = pci_ats_queue_depth(pdev);
        }
 
-skip_ats_check:
-       ret = 0;
+       if (dev_is_pci(dev))
+               pdev_enable_caps(to_pci_dev(dev));
 
        do_attach(dev_data, domain);
 
@@ -1887,15 +1940,8 @@ static void detach_device(struct device *dev)
 
        do_detach(dev_data);
 
-       if (!dev_is_pci(dev))
-               goto out;
-
-       if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
-               pdev_iommuv2_disable(to_pci_dev(dev));
-       else if (dev_data->ats.enabled)
-               pci_disable_ats(to_pci_dev(dev));
-
-       dev_data->ats.enabled = false;
+       if (dev_is_pci(dev))
+               pdev_disable_caps(to_pci_dev(dev));
 
 out:
        spin_unlock(&dev_data->lock);
@@ -1985,7 +2031,7 @@ static void update_device_table(struct protection_domain *domain)
                if (!iommu)
                        continue;
                set_dte_entry(iommu, dev_data->devid, domain,
-                             dev_data->ats.enabled, dev_data->iommu_v2);
+                             dev_data->ats_enabled, dev_data->ppr);
                clone_aliases(iommu, dev_data->dev);
        }
 }
@@ -2019,9 +2065,11 @@ void amd_iommu_domain_update(struct protection_domain *domain)
 static void cleanup_domain(struct protection_domain *domain)
 {
        struct iommu_dev_data *entry;
-       unsigned long flags;
 
-       spin_lock_irqsave(&domain->lock, flags);
+       lockdep_assert_held(&domain->lock);
+
+       if (!domain->dev_cnt)
+               return;
 
        while (!list_empty(&domain->dev_list)) {
                entry = list_first_entry(&domain->dev_list,
@@ -2029,8 +2077,7 @@ static void cleanup_domain(struct protection_domain *domain)
                BUG_ON(!entry->domain);
                do_detach(entry);
        }
-
-       spin_unlock_irqrestore(&domain->lock, flags);
+       WARN_ON(domain->dev_cnt != 0);
 }
 
 static void protection_domain_free(struct protection_domain *domain)
@@ -2041,6 +2088,12 @@ static void protection_domain_free(struct protection_domain *domain)
        if (domain->iop.pgtbl_cfg.tlb)
                free_io_pgtable_ops(&domain->iop.iop.ops);
 
+       if (domain->flags & PD_IOMMUV2_MASK)
+               free_gcr3_table(domain);
+
+       if (domain->iop.root)
+               free_page((unsigned long)domain->iop.root);
+
        if (domain->id)
                domain_id_free(domain->id);
 
@@ -2053,18 +2106,10 @@ static int protection_domain_init_v1(struct protection_domain *domain, int mode)
 
        BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL);
 
-       spin_lock_init(&domain->lock);
-       domain->id = domain_id_alloc();
-       if (!domain->id)
-               return -ENOMEM;
-       INIT_LIST_HEAD(&domain->dev_list);
-
        if (mode != PAGE_MODE_NONE) {
                pt_root = (void *)get_zeroed_page(GFP_KERNEL);
-               if (!pt_root) {
-                       domain_id_free(domain->id);
+               if (!pt_root)
                        return -ENOMEM;
-               }
        }
 
        amd_iommu_domain_set_pgtable(domain, pt_root, mode);
@@ -2074,20 +2119,12 @@ static int protection_domain_init_v1(struct protection_domain *domain, int mode)
 
 static int protection_domain_init_v2(struct protection_domain *domain)
 {
-       spin_lock_init(&domain->lock);
-       domain->id = domain_id_alloc();
-       if (!domain->id)
-               return -ENOMEM;
-       INIT_LIST_HEAD(&domain->dev_list);
-
        domain->flags |= PD_GIOV_MASK;
 
        domain->domain.pgsize_bitmap = AMD_IOMMU_PGSIZES_V2;
 
-       if (domain_enable_v2(domain, 1)) {
-               domain_id_free(domain->id);
+       if (setup_gcr3_table(domain, 1))
                return -ENOMEM;
-       }
 
        return 0;
 }
@@ -2097,57 +2134,60 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
        struct io_pgtable_ops *pgtbl_ops;
        struct protection_domain *domain;
        int pgtable;
-       int mode = DEFAULT_PGTABLE_LEVEL;
        int ret;
 
+       domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+       if (!domain)
+               return NULL;
+
+       domain->id = domain_id_alloc();
+       if (!domain->id)
+               goto out_err;
+
+       spin_lock_init(&domain->lock);
+       INIT_LIST_HEAD(&domain->dev_list);
+       domain->nid = NUMA_NO_NODE;
+
+       switch (type) {
+       /* No need to allocate io pgtable ops in passthrough mode */
+       case IOMMU_DOMAIN_IDENTITY:
+               return domain;
+       case IOMMU_DOMAIN_DMA:
+               pgtable = amd_iommu_pgtable;
+               break;
        /*
-        * Force IOMMU v1 page table when iommu=pt and
-        * when allocating domain for pass-through devices.
+        * Force IOMMU v1 page table when allocating
+        * domain for pass-through devices.
         */
-       if (type == IOMMU_DOMAIN_IDENTITY) {
-               pgtable = AMD_IOMMU_V1;
-               mode = PAGE_MODE_NONE;
-       } else if (type == IOMMU_DOMAIN_UNMANAGED) {
+       case IOMMU_DOMAIN_UNMANAGED:
                pgtable = AMD_IOMMU_V1;
-       } else if (type == IOMMU_DOMAIN_DMA || type == IOMMU_DOMAIN_DMA_FQ) {
-               pgtable = amd_iommu_pgtable;
-       } else {
-               return NULL;
+               break;
+       default:
+               goto out_err;
        }
 
-       domain = kzalloc(sizeof(*domain), GFP_KERNEL);
-       if (!domain)
-               return NULL;
-
        switch (pgtable) {
        case AMD_IOMMU_V1:
-               ret = protection_domain_init_v1(domain, mode);
+               ret = protection_domain_init_v1(domain, DEFAULT_PGTABLE_LEVEL);
                break;
        case AMD_IOMMU_V2:
                ret = protection_domain_init_v2(domain);
                break;
        default:
                ret = -EINVAL;
+               break;
        }
 
        if (ret)
                goto out_err;
 
-       /* No need to allocate io pgtable ops in passthrough mode */
-       if (type == IOMMU_DOMAIN_IDENTITY)
-               return domain;
-
-       domain->nid = NUMA_NO_NODE;
-
        pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
-       if (!pgtbl_ops) {
-               domain_id_free(domain->id);
+       if (!pgtbl_ops)
                goto out_err;
-       }
 
        return domain;
 out_err:
-       kfree(domain);
+       protection_domain_free(domain);
        return NULL;
 }
 
@@ -2236,19 +2276,18 @@ amd_iommu_domain_alloc_user(struct device *dev, u32 flags,
 static void amd_iommu_domain_free(struct iommu_domain *dom)
 {
        struct protection_domain *domain;
+       unsigned long flags;
 
-       domain = to_pdomain(dom);
+       if (!dom)
+               return;
 
-       if (domain->dev_cnt > 0)
-               cleanup_domain(domain);
+       domain = to_pdomain(dom);
 
-       BUG_ON(domain->dev_cnt != 0);
+       spin_lock_irqsave(&domain->lock, flags);
 
-       if (!dom)
-               return;
+       cleanup_domain(domain);
 
-       if (domain->flags & PD_IOMMUV2_MASK)
-               free_gcr3_table(domain);
+       spin_unlock_irqrestore(&domain->lock, flags);
 
        protection_domain_free(domain);
 }
@@ -2296,14 +2335,15 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
        return ret;
 }
 
-static void amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
-                                    unsigned long iova, size_t size)
+static int amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
+                                   unsigned long iova, size_t size)
 {
        struct protection_domain *domain = to_pdomain(dom);
        struct io_pgtable_ops *ops = &domain->iop.iop.ops;
 
        if (ops->map_pages)
                domain_flush_np_cache(domain, iova, size);
+       return 0;
 }
 
 static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
@@ -2541,7 +2581,6 @@ bool amd_iommu_is_attach_deferred(struct device *dev)
 
        return dev_data->defer_attach;
 }
-EXPORT_SYMBOL_GPL(amd_iommu_is_attach_deferred);
 
 static void amd_iommu_flush_iotlb_all(struct iommu_domain *domain)
 {
@@ -2581,7 +2620,7 @@ static int amd_iommu_def_domain_type(struct device *dev)
         *    and require remapping.
         *  - SNP is enabled, because it prohibits DTE[Mode]=0.
         */
-       if (dev_data->iommu_v2 &&
+       if (pdev_pasid_supported(dev_data) &&
            !cc_platform_has(CC_ATTR_MEM_ENCRYPT) &&
            !amd_iommu_snp_en) {
                return IOMMU_DOMAIN_IDENTITY;
@@ -2626,93 +2665,6 @@ const struct iommu_ops amd_iommu_ops = {
        }
 };
 
-/*****************************************************************************
- *
- * The next functions do a basic initialization of IOMMU for pass through
- * mode
- *
- * In passthrough mode the IOMMU is initialized and enabled but not used for
- * DMA-API translation.
- *
- *****************************************************************************/
-
-/* IOMMUv2 specific functions */
-int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
-{
-       return atomic_notifier_chain_register(&ppr_notifier, nb);
-}
-EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
-
-int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
-{
-       return atomic_notifier_chain_unregister(&ppr_notifier, nb);
-}
-EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
-
-void amd_iommu_domain_direct_map(struct iommu_domain *dom)
-{
-       struct protection_domain *domain = to_pdomain(dom);
-       unsigned long flags;
-
-       spin_lock_irqsave(&domain->lock, flags);
-
-       if (domain->iop.pgtbl_cfg.tlb)
-               free_io_pgtable_ops(&domain->iop.iop.ops);
-
-       spin_unlock_irqrestore(&domain->lock, flags);
-}
-EXPORT_SYMBOL(amd_iommu_domain_direct_map);
-
-/* Note: This function expects iommu_domain->lock to be held prior calling the function. */
-static int domain_enable_v2(struct protection_domain *domain, int pasids)
-{
-       int levels;
-
-       /* Number of GCR3 table levels required */
-       for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
-               levels += 1;
-
-       if (levels > amd_iommu_max_glx_val)
-               return -EINVAL;
-
-       domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
-       if (domain->gcr3_tbl == NULL)
-               return -ENOMEM;
-
-       domain->glx      = levels;
-       domain->flags   |= PD_IOMMUV2_MASK;
-
-       amd_iommu_domain_update(domain);
-
-       return 0;
-}
-
-int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
-{
-       struct protection_domain *pdom = to_pdomain(dom);
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&pdom->lock, flags);
-
-       /*
-        * Save us all sanity checks whether devices already in the
-        * domain support IOMMUv2. Just force that the domain has no
-        * devices attached when it is switched into IOMMUv2 mode.
-        */
-       ret = -EBUSY;
-       if (pdom->dev_cnt > 0 || pdom->flags & PD_IOMMUV2_MASK)
-               goto out;
-
-       if (!pdom->gcr3_tbl)
-               ret = domain_enable_v2(pdom, pasids);
-
-out:
-       spin_unlock_irqrestore(&pdom->lock, flags);
-       return ret;
-}
-EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
-
 static int __flush_pasid(struct protection_domain *domain, u32 pasid,
                         u64 address, bool size)
 {
@@ -2750,10 +2702,10 @@ static int __flush_pasid(struct protection_domain *domain, u32 pasid,
                   There might be non-IOMMUv2 capable devices in an IOMMUv2
                 * domain.
                 */
-               if (!dev_data->ats.enabled)
+               if (!dev_data->ats_enabled)
                        continue;
 
-               qdep  = dev_data->ats.qdep;
+               qdep  = dev_data->ats_qdep;
                iommu = rlookup_amd_iommu(dev_data->dev);
                if (!iommu)
                        continue;
@@ -2794,7 +2746,6 @@ int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
 
        return ret;
 }
-EXPORT_SYMBOL(amd_iommu_flush_page);
 
 static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid)
 {
@@ -2814,7 +2765,6 @@ int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid)
 
        return ret;
 }
-EXPORT_SYMBOL(amd_iommu_flush_tlb);
 
 static u64 *__get_gcr3_pte(u64 *root, int level, u32 pasid, bool alloc)
 {
@@ -2894,7 +2844,6 @@ int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, u32 pasid,
 
        return ret;
 }
-EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
 
 int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid)
 {
@@ -2908,7 +2857,6 @@ int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, u32 pasid)
 
        return ret;
 }
-EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
 
 int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
                           int status, int tag)
@@ -2927,49 +2875,6 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
 
        return iommu_queue_command(iommu, &cmd);
 }
-EXPORT_SYMBOL(amd_iommu_complete_ppr);
-
-int amd_iommu_device_info(struct pci_dev *pdev,
-                          struct amd_iommu_device_info *info)
-{
-       int max_pasids;
-       int pos;
-
-       if (pdev == NULL || info == NULL)
-               return -EINVAL;
-
-       if (!amd_iommu_v2_supported())
-               return -EINVAL;
-
-       memset(info, 0, sizeof(*info));
-
-       if (pci_ats_supported(pdev))
-               info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
-
-       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
-       if (pos)
-               info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
-
-       pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
-       if (pos) {
-               int features;
-
-               max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
-               max_pasids = min(max_pasids, (1 << 20));
-
-               info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
-               info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
-
-               features = pci_pasid_features(pdev);
-               if (features & PCI_PASID_CAP_EXEC)
-                       info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
-               if (features & PCI_PASID_CAP_PRIV)
-                       info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(amd_iommu_device_info);
 
 #ifdef CONFIG_IRQ_REMAP
 
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
deleted file mode 100644 (file)
index 57c2fb1..0000000
+++ /dev/null
@@ -1,996 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
- * Author: Joerg Roedel <jroedel@suse.de>
- */
-
-#define pr_fmt(fmt)     "AMD-Vi: " fmt
-
-#include <linux/refcount.h>
-#include <linux/mmu_notifier.h>
-#include <linux/amd-iommu.h>
-#include <linux/mm_types.h>
-#include <linux/profile.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/sched/mm.h>
-#include <linux/wait.h>
-#include <linux/pci.h>
-#include <linux/gfp.h>
-#include <linux/cc_platform.h>
-
-#include "amd_iommu.h"
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
-
-#define PRI_QUEUE_SIZE         512
-
-struct pri_queue {
-       atomic_t inflight;
-       bool finish;
-       int status;
-};
-
-struct pasid_state {
-       struct list_head list;                  /* For global state-list */
-       refcount_t count;                               /* Reference count */
-       unsigned mmu_notifier_count;            /* Counting nested mmu_notifier
-                                                  calls */
-       struct mm_struct *mm;                   /* mm_struct for the faults */
-       struct mmu_notifier mn;                 /* mmu_notifier handle */
-       struct pri_queue pri[PRI_QUEUE_SIZE];   /* PRI tag states */
-       struct device_state *device_state;      /* Link to our device_state */
-       u32 pasid;                              /* PASID index */
-       bool invalid;                           /* Used during setup and
-                                                  teardown of the pasid */
-       spinlock_t lock;                        /* Protect pri_queues and
-                                                  mmu_notifer_count */
-       wait_queue_head_t wq;                   /* To wait for count == 0 */
-};
-
-struct device_state {
-       struct list_head list;
-       u32 sbdf;
-       atomic_t count;
-       struct pci_dev *pdev;
-       struct pasid_state **states;
-       struct iommu_domain *domain;
-       int pasid_levels;
-       int max_pasids;
-       amd_iommu_invalid_ppr_cb inv_ppr_cb;
-       amd_iommu_invalidate_ctx inv_ctx_cb;
-       spinlock_t lock;
-       wait_queue_head_t wq;
-};
-
-struct fault {
-       struct work_struct work;
-       struct device_state *dev_state;
-       struct pasid_state *state;
-       struct mm_struct *mm;
-       u64 address;
-       u32 pasid;
-       u16 tag;
-       u16 finish;
-       u16 flags;
-};
-
-static LIST_HEAD(state_list);
-static DEFINE_SPINLOCK(state_lock);
-
-static struct workqueue_struct *iommu_wq;
-
-static void free_pasid_states(struct device_state *dev_state);
-
-static struct device_state *__get_device_state(u32 sbdf)
-{
-       struct device_state *dev_state;
-
-       list_for_each_entry(dev_state, &state_list, list) {
-               if (dev_state->sbdf == sbdf)
-                       return dev_state;
-       }
-
-       return NULL;
-}
-
-static struct device_state *get_device_state(u32 sbdf)
-{
-       struct device_state *dev_state;
-       unsigned long flags;
-
-       spin_lock_irqsave(&state_lock, flags);
-       dev_state = __get_device_state(sbdf);
-       if (dev_state != NULL)
-               atomic_inc(&dev_state->count);
-       spin_unlock_irqrestore(&state_lock, flags);
-
-       return dev_state;
-}
-
-static void free_device_state(struct device_state *dev_state)
-{
-       struct iommu_group *group;
-
-       /* Get rid of any remaining pasid states */
-       free_pasid_states(dev_state);
-
-       /*
-        * Wait until the last reference is dropped before freeing
-        * the device state.
-        */
-       wait_event(dev_state->wq, !atomic_read(&dev_state->count));
-
-       /*
-        * First detach device from domain - No more PRI requests will arrive
-        * from that device after it is unbound from the IOMMUv2 domain.
-        */
-       group = iommu_group_get(&dev_state->pdev->dev);
-       if (WARN_ON(!group))
-               return;
-
-       iommu_detach_group(dev_state->domain, group);
-
-       iommu_group_put(group);
-
-       /* Everything is down now, free the IOMMUv2 domain */
-       iommu_domain_free(dev_state->domain);
-
-       /* Finally get rid of the device-state */
-       kfree(dev_state);
-}
-
-static void put_device_state(struct device_state *dev_state)
-{
-       if (atomic_dec_and_test(&dev_state->count))
-               wake_up(&dev_state->wq);
-}
-
-/* Must be called under dev_state->lock */
-static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
-                                                 u32 pasid, bool alloc)
-{
-       struct pasid_state **root, **ptr;
-       int level, index;
-
-       level = dev_state->pasid_levels;
-       root  = dev_state->states;
-
-       while (true) {
-
-               index = (pasid >> (9 * level)) & 0x1ff;
-               ptr   = &root[index];
-
-               if (level == 0)
-                       break;
-
-               if (*ptr == NULL) {
-                       if (!alloc)
-                               return NULL;
-
-                       *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
-                       if (*ptr == NULL)
-                               return NULL;
-               }
-
-               root   = (struct pasid_state **)*ptr;
-               level -= 1;
-       }
-
-       return ptr;
-}
-
-static int set_pasid_state(struct device_state *dev_state,
-                          struct pasid_state *pasid_state,
-                          u32 pasid)
-{
-       struct pasid_state **ptr;
-       unsigned long flags;
-       int ret;
-
-       spin_lock_irqsave(&dev_state->lock, flags);
-       ptr = __get_pasid_state_ptr(dev_state, pasid, true);
-
-       ret = -ENOMEM;
-       if (ptr == NULL)
-               goto out_unlock;
-
-       ret = -ENOMEM;
-       if (*ptr != NULL)
-               goto out_unlock;
-
-       *ptr = pasid_state;
-
-       ret = 0;
-
-out_unlock:
-       spin_unlock_irqrestore(&dev_state->lock, flags);
-
-       return ret;
-}
-
-static void clear_pasid_state(struct device_state *dev_state, u32 pasid)
-{
-       struct pasid_state **ptr;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev_state->lock, flags);
-       ptr = __get_pasid_state_ptr(dev_state, pasid, true);
-
-       if (ptr == NULL)
-               goto out_unlock;
-
-       *ptr = NULL;
-
-out_unlock:
-       spin_unlock_irqrestore(&dev_state->lock, flags);
-}
-
-static struct pasid_state *get_pasid_state(struct device_state *dev_state,
-                                          u32 pasid)
-{
-       struct pasid_state **ptr, *ret = NULL;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev_state->lock, flags);
-       ptr = __get_pasid_state_ptr(dev_state, pasid, false);
-
-       if (ptr == NULL)
-               goto out_unlock;
-
-       ret = *ptr;
-       if (ret)
-               refcount_inc(&ret->count);
-
-out_unlock:
-       spin_unlock_irqrestore(&dev_state->lock, flags);
-
-       return ret;
-}
-
-static void free_pasid_state(struct pasid_state *pasid_state)
-{
-       kfree(pasid_state);
-}
-
-static void put_pasid_state(struct pasid_state *pasid_state)
-{
-       if (refcount_dec_and_test(&pasid_state->count))
-               wake_up(&pasid_state->wq);
-}
-
-static void put_pasid_state_wait(struct pasid_state *pasid_state)
-{
-       if (!refcount_dec_and_test(&pasid_state->count))
-               wait_event(pasid_state->wq, !refcount_read(&pasid_state->count));
-       free_pasid_state(pasid_state);
-}
-
-static void unbind_pasid(struct pasid_state *pasid_state)
-{
-       struct iommu_domain *domain;
-
-       domain = pasid_state->device_state->domain;
-
-       /*
-        * Mark pasid_state as invalid, no more faults will we added to the
-        * work queue after this is visible everywhere.
-        */
-       pasid_state->invalid = true;
-
-       /* Make sure this is visible */
-       smp_wmb();
-
-       /* After this the device/pasid can't access the mm anymore */
-       amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
-
-       /* Make sure no more pending faults are in the queue */
-       flush_workqueue(iommu_wq);
-}
-
-static void free_pasid_states_level1(struct pasid_state **tbl)
-{
-       int i;
-
-       for (i = 0; i < 512; ++i) {
-               if (tbl[i] == NULL)
-                       continue;
-
-               free_page((unsigned long)tbl[i]);
-       }
-}
-
-static void free_pasid_states_level2(struct pasid_state **tbl)
-{
-       struct pasid_state **ptr;
-       int i;
-
-       for (i = 0; i < 512; ++i) {
-               if (tbl[i] == NULL)
-                       continue;
-
-               ptr = (struct pasid_state **)tbl[i];
-               free_pasid_states_level1(ptr);
-       }
-}
-
-static void free_pasid_states(struct device_state *dev_state)
-{
-       struct pasid_state *pasid_state;
-       int i;
-
-       for (i = 0; i < dev_state->max_pasids; ++i) {
-               pasid_state = get_pasid_state(dev_state, i);
-               if (pasid_state == NULL)
-                       continue;
-
-               put_pasid_state(pasid_state);
-
-               /* Clear the pasid state so that the pasid can be re-used */
-               clear_pasid_state(dev_state, pasid_state->pasid);
-
-               /*
-                * This will call the mn_release function and
-                * unbind the PASID
-                */
-               mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
-
-               put_pasid_state_wait(pasid_state); /* Reference taken in
-                                                     amd_iommu_bind_pasid */
-
-               /* Drop reference taken in amd_iommu_bind_pasid */
-               put_device_state(dev_state);
-       }
-
-       if (dev_state->pasid_levels == 2)
-               free_pasid_states_level2(dev_state->states);
-       else if (dev_state->pasid_levels == 1)
-               free_pasid_states_level1(dev_state->states);
-       else
-               BUG_ON(dev_state->pasid_levels != 0);
-
-       free_page((unsigned long)dev_state->states);
-}
-
-static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
-{
-       return container_of(mn, struct pasid_state, mn);
-}
-
-static void mn_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
-                                       struct mm_struct *mm,
-                                       unsigned long start, unsigned long end)
-{
-       struct pasid_state *pasid_state;
-       struct device_state *dev_state;
-
-       pasid_state = mn_to_state(mn);
-       dev_state   = pasid_state->device_state;
-
-       if ((start ^ (end - 1)) < PAGE_SIZE)
-               amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
-                                    start);
-       else
-               amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
-}
-
-static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
-{
-       struct pasid_state *pasid_state;
-       struct device_state *dev_state;
-       bool run_inv_ctx_cb;
-
-       might_sleep();
-
-       pasid_state    = mn_to_state(mn);
-       dev_state      = pasid_state->device_state;
-       run_inv_ctx_cb = !pasid_state->invalid;
-
-       if (run_inv_ctx_cb && dev_state->inv_ctx_cb)
-               dev_state->inv_ctx_cb(dev_state->pdev, pasid_state->pasid);
-
-       unbind_pasid(pasid_state);
-}
-
-static const struct mmu_notifier_ops iommu_mn = {
-       .release                        = mn_release,
-       .arch_invalidate_secondary_tlbs = mn_arch_invalidate_secondary_tlbs,
-};
-
-static void set_pri_tag_status(struct pasid_state *pasid_state,
-                              u16 tag, int status)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&pasid_state->lock, flags);
-       pasid_state->pri[tag].status = status;
-       spin_unlock_irqrestore(&pasid_state->lock, flags);
-}
-
-static void finish_pri_tag(struct device_state *dev_state,
-                          struct pasid_state *pasid_state,
-                          u16 tag)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&pasid_state->lock, flags);
-       if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
-           pasid_state->pri[tag].finish) {
-               amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
-                                      pasid_state->pri[tag].status, tag);
-               pasid_state->pri[tag].finish = false;
-               pasid_state->pri[tag].status = PPR_SUCCESS;
-       }
-       spin_unlock_irqrestore(&pasid_state->lock, flags);
-}
-
-static void handle_fault_error(struct fault *fault)
-{
-       int status;
-
-       if (!fault->dev_state->inv_ppr_cb) {
-               set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
-               return;
-       }
-
-       status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
-                                             fault->pasid,
-                                             fault->address,
-                                             fault->flags);
-       switch (status) {
-       case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
-               set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
-               break;
-       case AMD_IOMMU_INV_PRI_RSP_INVALID:
-               set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
-               break;
-       case AMD_IOMMU_INV_PRI_RSP_FAIL:
-               set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
-               break;
-       default:
-               BUG();
-       }
-}
-
-static bool access_error(struct vm_area_struct *vma, struct fault *fault)
-{
-       unsigned long requested = 0;
-
-       if (fault->flags & PPR_FAULT_EXEC)
-               requested |= VM_EXEC;
-
-       if (fault->flags & PPR_FAULT_READ)
-               requested |= VM_READ;
-
-       if (fault->flags & PPR_FAULT_WRITE)
-               requested |= VM_WRITE;
-
-       return (requested & ~vma->vm_flags) != 0;
-}
-
-static void do_fault(struct work_struct *work)
-{
-       struct fault *fault = container_of(work, struct fault, work);
-       struct vm_area_struct *vma;
-       vm_fault_t ret = VM_FAULT_ERROR;
-       unsigned int flags = 0;
-       struct mm_struct *mm;
-       u64 address;
-
-       mm = fault->state->mm;
-       address = fault->address;
-
-       if (fault->flags & PPR_FAULT_USER)
-               flags |= FAULT_FLAG_USER;
-       if (fault->flags & PPR_FAULT_WRITE)
-               flags |= FAULT_FLAG_WRITE;
-       flags |= FAULT_FLAG_REMOTE;
-
-       mmap_read_lock(mm);
-       vma = vma_lookup(mm, address);
-       if (!vma)
-               /* failed to get a vma in the right range */
-               goto out;
-
-       /* Check if we have the right permissions on the vma */
-       if (access_error(vma, fault))
-               goto out;
-
-       ret = handle_mm_fault(vma, address, flags, NULL);
-out:
-       mmap_read_unlock(mm);
-
-       if (ret & VM_FAULT_ERROR)
-               /* failed to service fault */
-               handle_fault_error(fault);
-
-       finish_pri_tag(fault->dev_state, fault->state, fault->tag);
-
-       put_pasid_state(fault->state);
-
-       kfree(fault);
-}
-
-static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
-{
-       struct amd_iommu_fault *iommu_fault;
-       struct pasid_state *pasid_state;
-       struct device_state *dev_state;
-       struct pci_dev *pdev = NULL;
-       unsigned long flags;
-       struct fault *fault;
-       bool finish;
-       u16 tag, devid, seg_id;
-       int ret;
-
-       iommu_fault = data;
-       tag         = iommu_fault->tag & 0x1ff;
-       finish      = (iommu_fault->tag >> 9) & 1;
-
-       seg_id = PCI_SBDF_TO_SEGID(iommu_fault->sbdf);
-       devid = PCI_SBDF_TO_DEVID(iommu_fault->sbdf);
-       pdev = pci_get_domain_bus_and_slot(seg_id, PCI_BUS_NUM(devid),
-                                          devid & 0xff);
-       if (!pdev)
-               return -ENODEV;
-
-       ret = NOTIFY_DONE;
-
-       /* In kdump kernel pci dev is not initialized yet -> send INVALID */
-       if (amd_iommu_is_attach_deferred(&pdev->dev)) {
-               amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
-                                      PPR_INVALID, tag);
-               goto out;
-       }
-
-       dev_state = get_device_state(iommu_fault->sbdf);
-       if (dev_state == NULL)
-               goto out;
-
-       pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
-       if (pasid_state == NULL || pasid_state->invalid) {
-               /* We know the device but not the PASID -> send INVALID */
-               amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
-                                      PPR_INVALID, tag);
-               goto out_drop_state;
-       }
-
-       spin_lock_irqsave(&pasid_state->lock, flags);
-       atomic_inc(&pasid_state->pri[tag].inflight);
-       if (finish)
-               pasid_state->pri[tag].finish = true;
-       spin_unlock_irqrestore(&pasid_state->lock, flags);
-
-       fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
-       if (fault == NULL) {
-               /* We are OOM - send success and let the device re-fault */
-               finish_pri_tag(dev_state, pasid_state, tag);
-               goto out_drop_state;
-       }
-
-       fault->dev_state = dev_state;
-       fault->address   = iommu_fault->address;
-       fault->state     = pasid_state;
-       fault->tag       = tag;
-       fault->finish    = finish;
-       fault->pasid     = iommu_fault->pasid;
-       fault->flags     = iommu_fault->flags;
-       INIT_WORK(&fault->work, do_fault);
-
-       queue_work(iommu_wq, &fault->work);
-
-       ret = NOTIFY_OK;
-
-out_drop_state:
-
-       if (ret != NOTIFY_OK && pasid_state)
-               put_pasid_state(pasid_state);
-
-       put_device_state(dev_state);
-
-out:
-       pci_dev_put(pdev);
-       return ret;
-}
-
-static struct notifier_block ppr_nb = {
-       .notifier_call = ppr_notifier,
-};
-
-int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
-                        struct task_struct *task)
-{
-       struct pasid_state *pasid_state;
-       struct device_state *dev_state;
-       struct mm_struct *mm;
-       u32 sbdf;
-       int ret;
-
-       might_sleep();
-
-       if (!amd_iommu_v2_supported())
-               return -ENODEV;
-
-       sbdf      = get_pci_sbdf_id(pdev);
-       dev_state = get_device_state(sbdf);
-
-       if (dev_state == NULL)
-               return -EINVAL;
-
-       ret = -EINVAL;
-       if (pasid >= dev_state->max_pasids)
-               goto out;
-
-       ret = -ENOMEM;
-       pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
-       if (pasid_state == NULL)
-               goto out;
-
-
-       refcount_set(&pasid_state->count, 1);
-       init_waitqueue_head(&pasid_state->wq);
-       spin_lock_init(&pasid_state->lock);
-
-       mm                        = get_task_mm(task);
-       pasid_state->mm           = mm;
-       pasid_state->device_state = dev_state;
-       pasid_state->pasid        = pasid;
-       pasid_state->invalid      = true; /* Mark as valid only if we are
-                                            done with setting up the pasid */
-       pasid_state->mn.ops       = &iommu_mn;
-
-       if (pasid_state->mm == NULL)
-               goto out_free;
-
-       ret = mmu_notifier_register(&pasid_state->mn, mm);
-       if (ret)
-               goto out_free;
-
-       ret = set_pasid_state(dev_state, pasid_state, pasid);
-       if (ret)
-               goto out_unregister;
-
-       ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
-                                       __pa(pasid_state->mm->pgd));
-       if (ret)
-               goto out_clear_state;
-
-       /* Now we are ready to handle faults */
-       pasid_state->invalid = false;
-
-       /*
-        * Drop the reference to the mm_struct here. We rely on the
-        * mmu_notifier release call-back to inform us when the mm
-        * is going away.
-        */
-       mmput(mm);
-
-       return 0;
-
-out_clear_state:
-       clear_pasid_state(dev_state, pasid);
-
-out_unregister:
-       mmu_notifier_unregister(&pasid_state->mn, mm);
-       mmput(mm);
-
-out_free:
-       free_pasid_state(pasid_state);
-
-out:
-       put_device_state(dev_state);
-
-       return ret;
-}
-EXPORT_SYMBOL(amd_iommu_bind_pasid);
-
-void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid)
-{
-       struct pasid_state *pasid_state;
-       struct device_state *dev_state;
-       u32 sbdf;
-
-       might_sleep();
-
-       if (!amd_iommu_v2_supported())
-               return;
-
-       sbdf = get_pci_sbdf_id(pdev);
-       dev_state = get_device_state(sbdf);
-       if (dev_state == NULL)
-               return;
-
-       if (pasid >= dev_state->max_pasids)
-               goto out;
-
-       pasid_state = get_pasid_state(dev_state, pasid);
-       if (pasid_state == NULL)
-               goto out;
-       /*
-        * Drop reference taken here. We are safe because we still hold
-        * the reference taken in the amd_iommu_bind_pasid function.
-        */
-       put_pasid_state(pasid_state);
-
-       /* Clear the pasid state so that the pasid can be re-used */
-       clear_pasid_state(dev_state, pasid_state->pasid);
-
-       /*
-        * Call mmu_notifier_unregister to drop our reference
-        * to pasid_state->mm
-        */
-       mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
-
-       put_pasid_state_wait(pasid_state); /* Reference taken in
-                                             amd_iommu_bind_pasid */
-out:
-       /* Drop reference taken in this function */
-       put_device_state(dev_state);
-
-       /* Drop reference taken in amd_iommu_bind_pasid */
-       put_device_state(dev_state);
-}
-EXPORT_SYMBOL(amd_iommu_unbind_pasid);
-
-int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
-{
-       struct device_state *dev_state;
-       struct iommu_group *group;
-       unsigned long flags;
-       int ret, tmp;
-       u32 sbdf;
-
-       might_sleep();
-
-       /*
-        * When memory encryption is active the device is likely not in a
-        * direct-mapped domain. Forbid using IOMMUv2 functionality for now.
-        */
-       if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
-               return -ENODEV;
-
-       if (!amd_iommu_v2_supported())
-               return -ENODEV;
-
-       if (pasids <= 0 || pasids > (PASID_MASK + 1))
-               return -EINVAL;
-
-       sbdf = get_pci_sbdf_id(pdev);
-
-       dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
-       if (dev_state == NULL)
-               return -ENOMEM;
-
-       spin_lock_init(&dev_state->lock);
-       init_waitqueue_head(&dev_state->wq);
-       dev_state->pdev  = pdev;
-       dev_state->sbdf = sbdf;
-
-       tmp = pasids;
-       for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
-               dev_state->pasid_levels += 1;
-
-       atomic_set(&dev_state->count, 1);
-       dev_state->max_pasids = pasids;
-
-       ret = -ENOMEM;
-       dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
-       if (dev_state->states == NULL)
-               goto out_free_dev_state;
-
-       dev_state->domain = iommu_domain_alloc(&pci_bus_type);
-       if (dev_state->domain == NULL)
-               goto out_free_states;
-
-       /* See iommu_is_default_domain() */
-       dev_state->domain->type = IOMMU_DOMAIN_IDENTITY;
-       amd_iommu_domain_direct_map(dev_state->domain);
-
-       ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
-       if (ret)
-               goto out_free_domain;
-
-       group = iommu_group_get(&pdev->dev);
-       if (!group) {
-               ret = -EINVAL;
-               goto out_free_domain;
-       }
-
-       ret = iommu_attach_group(dev_state->domain, group);
-       if (ret != 0)
-               goto out_drop_group;
-
-       iommu_group_put(group);
-
-       spin_lock_irqsave(&state_lock, flags);
-
-       if (__get_device_state(sbdf) != NULL) {
-               spin_unlock_irqrestore(&state_lock, flags);
-               ret = -EBUSY;
-               goto out_free_domain;
-       }
-
-       list_add_tail(&dev_state->list, &state_list);
-
-       spin_unlock_irqrestore(&state_lock, flags);
-
-       return 0;
-
-out_drop_group:
-       iommu_group_put(group);
-
-out_free_domain:
-       iommu_domain_free(dev_state->domain);
-
-out_free_states:
-       free_page((unsigned long)dev_state->states);
-
-out_free_dev_state:
-       kfree(dev_state);
-
-       return ret;
-}
-EXPORT_SYMBOL(amd_iommu_init_device);
-
-void amd_iommu_free_device(struct pci_dev *pdev)
-{
-       struct device_state *dev_state;
-       unsigned long flags;
-       u32 sbdf;
-
-       if (!amd_iommu_v2_supported())
-               return;
-
-       sbdf = get_pci_sbdf_id(pdev);
-
-       spin_lock_irqsave(&state_lock, flags);
-
-       dev_state = __get_device_state(sbdf);
-       if (dev_state == NULL) {
-               spin_unlock_irqrestore(&state_lock, flags);
-               return;
-       }
-
-       list_del(&dev_state->list);
-
-       spin_unlock_irqrestore(&state_lock, flags);
-
-       put_device_state(dev_state);
-       free_device_state(dev_state);
-}
-EXPORT_SYMBOL(amd_iommu_free_device);
-
-int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
-                                amd_iommu_invalid_ppr_cb cb)
-{
-       struct device_state *dev_state;
-       unsigned long flags;
-       u32 sbdf;
-       int ret;
-
-       if (!amd_iommu_v2_supported())
-               return -ENODEV;
-
-       sbdf = get_pci_sbdf_id(pdev);
-
-       spin_lock_irqsave(&state_lock, flags);
-
-       ret = -EINVAL;
-       dev_state = __get_device_state(sbdf);
-       if (dev_state == NULL)
-               goto out_unlock;
-
-       dev_state->inv_ppr_cb = cb;
-
-       ret = 0;
-
-out_unlock:
-       spin_unlock_irqrestore(&state_lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
-
-int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
-                                   amd_iommu_invalidate_ctx cb)
-{
-       struct device_state *dev_state;
-       unsigned long flags;
-       u32 sbdf;
-       int ret;
-
-       if (!amd_iommu_v2_supported())
-               return -ENODEV;
-
-       sbdf = get_pci_sbdf_id(pdev);
-
-       spin_lock_irqsave(&state_lock, flags);
-
-       ret = -EINVAL;
-       dev_state = __get_device_state(sbdf);
-       if (dev_state == NULL)
-               goto out_unlock;
-
-       dev_state->inv_ctx_cb = cb;
-
-       ret = 0;
-
-out_unlock:
-       spin_unlock_irqrestore(&state_lock, flags);
-
-       return ret;
-}
-EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
-
-static int __init amd_iommu_v2_init(void)
-{
-       int ret;
-
-       if (!amd_iommu_v2_supported()) {
-               pr_info("AMD IOMMUv2 functionality not available on this system - This is not a bug.\n");
-               /*
-                * Load anyway to provide the symbols to other modules
-                * which may use AMD IOMMUv2 optionally.
-                */
-               return 0;
-       }
-
-       ret = -ENOMEM;
-       iommu_wq = alloc_workqueue("amd_iommu_v2", WQ_MEM_RECLAIM, 0);
-       if (iommu_wq == NULL)
-               goto out;
-
-       amd_iommu_register_ppr_notifier(&ppr_nb);
-
-       pr_info("AMD IOMMUv2 loaded and initialized\n");
-
-       return 0;
-
-out:
-       return ret;
-}
-
-static void __exit amd_iommu_v2_exit(void)
-{
-       struct device_state *dev_state, *next;
-       unsigned long flags;
-       LIST_HEAD(freelist);
-
-       if (!amd_iommu_v2_supported())
-               return;
-
-       amd_iommu_unregister_ppr_notifier(&ppr_nb);
-
-       flush_workqueue(iommu_wq);
-
-       /*
-        * The loop below might call flush_workqueue(), so call
-        * destroy_workqueue() after it
-        */
-       spin_lock_irqsave(&state_lock, flags);
-
-       list_for_each_entry_safe(dev_state, next, &state_list, list) {
-               WARN_ON_ONCE(1);
-
-               put_device_state(dev_state);
-               list_del(&dev_state->list);
-               list_add_tail(&dev_state->list, &freelist);
-       }
-
-       spin_unlock_irqrestore(&state_lock, flags);
-
-       /*
-        * Since free_device_state waits on the count to be zero,
-        * we need to free dev_state outside the spinlock.
-        */
-       list_for_each_entry_safe(dev_state, next, &freelist, list) {
-               list_del(&dev_state->list);
-               free_device_state(dev_state);
-       }
-
-       destroy_workqueue(iommu_wq);
-}
-
-module_init(amd_iommu_v2_init);
-module_exit(amd_iommu_v2_exit);
index 0b89275084274659fb91f8dbd2567693bdf963dd..ee05f4824bfad1d6515fd506e9c1c2fd6760e18f 100644 (file)
@@ -196,7 +196,6 @@ struct apple_dart_hw {
  * @lock: lock for hardware operations involving this dart
  * @pgsize: pagesize supported by this DART
  * @supports_bypass: indicates if this DART supports bypass mode
- * @force_bypass: force bypass mode due to pagesize mismatch?
  * @sid2group: maps stream ids to iommu_groups
  * @iommu: iommu core device
  */
@@ -217,7 +216,6 @@ struct apple_dart {
        u32 pgsize;
        u32 num_streams;
        u32 supports_bypass : 1;
-       u32 force_bypass : 1;
 
        struct iommu_group *sid2group[DART_MAX_STREAMS];
        struct iommu_device iommu;
@@ -506,10 +504,11 @@ static void apple_dart_iotlb_sync(struct iommu_domain *domain,
        apple_dart_domain_flush_tlb(to_dart_domain(domain));
 }
 
-static void apple_dart_iotlb_sync_map(struct iommu_domain *domain,
-                                     unsigned long iova, size_t size)
+static int apple_dart_iotlb_sync_map(struct iommu_domain *domain,
+                                    unsigned long iova, size_t size)
 {
        apple_dart_domain_flush_tlb(to_dart_domain(domain));
+       return 0;
 }
 
 static phys_addr_t apple_dart_iova_to_phys(struct iommu_domain *domain,
@@ -568,15 +567,17 @@ apple_dart_setup_translation(struct apple_dart_domain *domain,
        stream_map->dart->hw->invalidate_tlb(stream_map);
 }
 
-static int apple_dart_finalize_domain(struct iommu_domain *domain,
+static int apple_dart_finalize_domain(struct apple_dart_domain *dart_domain,
                                      struct apple_dart_master_cfg *cfg)
 {
-       struct apple_dart_domain *dart_domain = to_dart_domain(domain);
        struct apple_dart *dart = cfg->stream_maps[0].dart;
        struct io_pgtable_cfg pgtbl_cfg;
        int ret = 0;
        int i, j;
 
+       if (dart->pgsize > PAGE_SIZE)
+               return -EINVAL;
+
        mutex_lock(&dart_domain->init_lock);
 
        if (dart_domain->finalized)
@@ -597,17 +598,18 @@ static int apple_dart_finalize_domain(struct iommu_domain *domain,
                .iommu_dev = dart->dev,
        };
 
-       dart_domain->pgtbl_ops =
-               alloc_io_pgtable_ops(dart->hw->fmt, &pgtbl_cfg, domain);
+       dart_domain->pgtbl_ops = alloc_io_pgtable_ops(dart->hw->fmt, &pgtbl_cfg,
+                                                     &dart_domain->domain);
        if (!dart_domain->pgtbl_ops) {
                ret = -ENOMEM;
                goto done;
        }
 
-       domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
-       domain->geometry.aperture_start = 0;
-       domain->geometry.aperture_end = (dma_addr_t)DMA_BIT_MASK(dart->ias);
-       domain->geometry.force_aperture = true;
+       dart_domain->domain.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
+       dart_domain->domain.geometry.aperture_start = 0;
+       dart_domain->domain.geometry.aperture_end =
+               (dma_addr_t)DMA_BIT_MASK(dart->ias);
+       dart_domain->domain.geometry.force_aperture = true;
 
        dart_domain->finalized = true;
 
@@ -651,47 +653,72 @@ static int apple_dart_domain_add_streams(struct apple_dart_domain *domain,
                                      true);
 }
 
-static int apple_dart_attach_dev(struct iommu_domain *domain,
-                                struct device *dev)
+static int apple_dart_attach_dev_paging(struct iommu_domain *domain,
+                                       struct device *dev)
 {
        int ret, i;
        struct apple_dart_stream_map *stream_map;
        struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
        struct apple_dart_domain *dart_domain = to_dart_domain(domain);
 
-       if (cfg->stream_maps[0].dart->force_bypass &&
-           domain->type != IOMMU_DOMAIN_IDENTITY)
-               return -EINVAL;
-       if (!cfg->stream_maps[0].dart->supports_bypass &&
-           domain->type == IOMMU_DOMAIN_IDENTITY)
-               return -EINVAL;
+       ret = apple_dart_finalize_domain(dart_domain, cfg);
+       if (ret)
+               return ret;
 
-       ret = apple_dart_finalize_domain(domain, cfg);
+       ret = apple_dart_domain_add_streams(dart_domain, cfg);
        if (ret)
                return ret;
 
-       switch (domain->type) {
-       default:
-               ret = apple_dart_domain_add_streams(dart_domain, cfg);
-               if (ret)
-                       return ret;
+       for_each_stream_map(i, cfg, stream_map)
+               apple_dart_setup_translation(dart_domain, stream_map);
+       return 0;
+}
 
-               for_each_stream_map(i, cfg, stream_map)
-                       apple_dart_setup_translation(dart_domain, stream_map);
-               break;
-       case IOMMU_DOMAIN_BLOCKED:
-               for_each_stream_map(i, cfg, stream_map)
-                       apple_dart_hw_disable_dma(stream_map);
-               break;
-       case IOMMU_DOMAIN_IDENTITY:
-               for_each_stream_map(i, cfg, stream_map)
-                       apple_dart_hw_enable_bypass(stream_map);
-               break;
-       }
+static int apple_dart_attach_dev_identity(struct iommu_domain *domain,
+                                         struct device *dev)
+{
+       struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
+       struct apple_dart_stream_map *stream_map;
+       int i;
 
-       return ret;
+       if (!cfg->stream_maps[0].dart->supports_bypass)
+               return -EINVAL;
+
+       for_each_stream_map(i, cfg, stream_map)
+               apple_dart_hw_enable_bypass(stream_map);
+       return 0;
 }
 
+static const struct iommu_domain_ops apple_dart_identity_ops = {
+       .attach_dev = apple_dart_attach_dev_identity,
+};
+
+static struct iommu_domain apple_dart_identity_domain = {
+       .type = IOMMU_DOMAIN_IDENTITY,
+       .ops = &apple_dart_identity_ops,
+};
+
+static int apple_dart_attach_dev_blocked(struct iommu_domain *domain,
+                                        struct device *dev)
+{
+       struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
+       struct apple_dart_stream_map *stream_map;
+       int i;
+
+       for_each_stream_map(i, cfg, stream_map)
+               apple_dart_hw_disable_dma(stream_map);
+       return 0;
+}
+
+static const struct iommu_domain_ops apple_dart_blocked_ops = {
+       .attach_dev = apple_dart_attach_dev_blocked,
+};
+
+static struct iommu_domain apple_dart_blocked_domain = {
+       .type = IOMMU_DOMAIN_BLOCKED,
+       .ops = &apple_dart_blocked_ops,
+};
+
 static struct iommu_device *apple_dart_probe_device(struct device *dev)
 {
        struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
@@ -717,24 +744,26 @@ static void apple_dart_release_device(struct device *dev)
        kfree(cfg);
 }
 
-static struct iommu_domain *apple_dart_domain_alloc(unsigned int type)
+static struct iommu_domain *apple_dart_domain_alloc_paging(struct device *dev)
 {
        struct apple_dart_domain *dart_domain;
 
-       if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED &&
-           type != IOMMU_DOMAIN_IDENTITY && type != IOMMU_DOMAIN_BLOCKED)
-               return NULL;
-
        dart_domain = kzalloc(sizeof(*dart_domain), GFP_KERNEL);
        if (!dart_domain)
                return NULL;
 
        mutex_init(&dart_domain->init_lock);
 
-       /* no need to allocate pgtbl_ops or do any other finalization steps */
-       if (type == IOMMU_DOMAIN_IDENTITY || type == IOMMU_DOMAIN_BLOCKED)
-               dart_domain->finalized = true;
+       if (dev) {
+               struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
+               int ret;
 
+               ret = apple_dart_finalize_domain(dart_domain, cfg);
+               if (ret) {
+                       kfree(dart_domain);
+                       return ERR_PTR(ret);
+               }
+       }
        return &dart_domain->domain;
 }
 
@@ -770,8 +799,6 @@ static int apple_dart_of_xlate(struct device *dev, struct of_phandle_args *args)
        if (cfg_dart) {
                if (cfg_dart->supports_bypass != dart->supports_bypass)
                        return -EINVAL;
-               if (cfg_dart->force_bypass != dart->force_bypass)
-                       return -EINVAL;
                if (cfg_dart->pgsize != dart->pgsize)
                        return -EINVAL;
        }
@@ -913,7 +940,7 @@ static int apple_dart_def_domain_type(struct device *dev)
 {
        struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
 
-       if (cfg->stream_maps[0].dart->force_bypass)
+       if (cfg->stream_maps[0].dart->pgsize > PAGE_SIZE)
                return IOMMU_DOMAIN_IDENTITY;
        if (!cfg->stream_maps[0].dart->supports_bypass)
                return IOMMU_DOMAIN_DMA;
@@ -947,7 +974,9 @@ static void apple_dart_get_resv_regions(struct device *dev,
 }
 
 static const struct iommu_ops apple_dart_iommu_ops = {
-       .domain_alloc = apple_dart_domain_alloc,
+       .identity_domain = &apple_dart_identity_domain,
+       .blocked_domain = &apple_dart_blocked_domain,
+       .domain_alloc_paging = apple_dart_domain_alloc_paging,
        .probe_device = apple_dart_probe_device,
        .release_device = apple_dart_release_device,
        .device_group = apple_dart_device_group,
@@ -957,7 +986,7 @@ static const struct iommu_ops apple_dart_iommu_ops = {
        .pgsize_bitmap = -1UL, /* Restricted during dart probe */
        .owner = THIS_MODULE,
        .default_domain_ops = &(const struct iommu_domain_ops) {
-               .attach_dev     = apple_dart_attach_dev,
+               .attach_dev     = apple_dart_attach_dev_paging,
                .map_pages      = apple_dart_map_pages,
                .unmap_pages    = apple_dart_unmap_pages,
                .flush_iotlb_all = apple_dart_flush_iotlb_all,
@@ -1111,8 +1140,6 @@ static int apple_dart_probe(struct platform_device *pdev)
                goto err_clk_disable;
        }
 
-       dart->force_bypass = dart->pgsize > PAGE_SIZE;
-
        ret = apple_dart_hw_reset(dart);
        if (ret)
                goto err_clk_disable;
@@ -1136,7 +1163,8 @@ static int apple_dart_probe(struct platform_device *pdev)
        dev_info(
                &pdev->dev,
                "DART [pagesize %x, %d streams, bypass support: %d, bypass forced: %d] initialized\n",
-               dart->pgsize, dart->num_streams, dart->supports_bypass, dart->force_bypass);
+               dart->pgsize, dart->num_streams, dart->supports_bypass,
+               dart->pgsize > PAGE_SIZE);
        return 0;
 
 err_sysfs_remove:
index 8a16cd3ef487cad4d63e8ebfae6f775b7bef3825..353248ab18e76d3ab1f07c894cfb903f7e424b83 100644 (file)
@@ -25,11 +25,9 @@ struct arm_smmu_mmu_notifier {
 #define mn_to_smmu(mn) container_of(mn, struct arm_smmu_mmu_notifier, mn)
 
 struct arm_smmu_bond {
-       struct iommu_sva                sva;
        struct mm_struct                *mm;
        struct arm_smmu_mmu_notifier    *smmu_mn;
        struct list_head                list;
-       refcount_t                      refs;
 };
 
 #define sva_to_bond(handle) \
@@ -37,6 +35,25 @@ struct arm_smmu_bond {
 
 static DEFINE_MUTEX(sva_lock);
 
+/*
+ * Write the CD to the CD tables for all masters that this domain is attached
+ * to. Note that this is only used to update existing CD entries in the target
+ * CD table, for which it's assumed that arm_smmu_write_ctx_desc can't fail.
+ */
+static void arm_smmu_update_ctx_desc_devices(struct arm_smmu_domain *smmu_domain,
+                                          int ssid,
+                                          struct arm_smmu_ctx_desc *cd)
+{
+       struct arm_smmu_master *master;
+       unsigned long flags;
+
+       spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+       list_for_each_entry(master, &smmu_domain->devices, domain_head) {
+               arm_smmu_write_ctx_desc(master, ssid, cd);
+       }
+       spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
+}
+
 /*
  * Check if the CPU ASID is available on the SMMU side. If a private context
  * descriptor is using it, try to replace it.
@@ -62,7 +79,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
                return cd;
        }
 
-       smmu_domain = container_of(cd, struct arm_smmu_domain, s1_cfg.cd);
+       smmu_domain = container_of(cd, struct arm_smmu_domain, cd);
        smmu = smmu_domain->smmu;
 
        ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd,
@@ -80,7 +97,7 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
         * be some overlap between use of both ASIDs, until we invalidate the
         * TLB.
         */
-       arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, cd);
+       arm_smmu_update_ctx_desc_devices(smmu_domain, IOMMU_NO_PASID, cd);
 
        /* Invalidate TLB entries previously associated with that context */
        arm_smmu_tlb_inv_asid(smmu, asid);
@@ -247,7 +264,7 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
         * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
         * but disable translation.
         */
-       arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, &quiet_cd);
+       arm_smmu_update_ctx_desc_devices(smmu_domain, mm->pasid, &quiet_cd);
 
        arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
        arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0);
@@ -273,8 +290,10 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
                          struct mm_struct *mm)
 {
        int ret;
+       unsigned long flags;
        struct arm_smmu_ctx_desc *cd;
        struct arm_smmu_mmu_notifier *smmu_mn;
+       struct arm_smmu_master *master;
 
        list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
                if (smmu_mn->mn.mm == mm) {
@@ -304,7 +323,16 @@ arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
                goto err_free_cd;
        }
 
-       ret = arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, cd);
+       spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+       list_for_each_entry(master, &smmu_domain->devices, domain_head) {
+               ret = arm_smmu_write_ctx_desc(master, mm->pasid, cd);
+               if (ret) {
+                       list_for_each_entry_from_reverse(master, &smmu_domain->devices, domain_head)
+                               arm_smmu_write_ctx_desc(master, mm->pasid, NULL);
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
        if (ret)
                goto err_put_notifier;
 
@@ -329,7 +357,8 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
                return;
 
        list_del(&smmu_mn->list);
-       arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, NULL);
+
+       arm_smmu_update_ctx_desc_devices(smmu_domain, mm->pasid, NULL);
 
        /*
         * If we went through clear(), we've already invalidated, and no
@@ -345,8 +374,7 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
        arm_smmu_free_shared_cd(cd);
 }
 
-static struct iommu_sva *
-__arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
+static int __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
 {
        int ret;
        struct arm_smmu_bond *bond;
@@ -355,23 +383,13 @@ __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 
        if (!master || !master->sva_enabled)
-               return ERR_PTR(-ENODEV);
-
-       /* If bind() was already called for this {dev, mm} pair, reuse it. */
-       list_for_each_entry(bond, &master->bonds, list) {
-               if (bond->mm == mm) {
-                       refcount_inc(&bond->refs);
-                       return &bond->sva;
-               }
-       }
+               return -ENODEV;
 
        bond = kzalloc(sizeof(*bond), GFP_KERNEL);
        if (!bond)
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
 
        bond->mm = mm;
-       bond->sva.dev = dev;
-       refcount_set(&bond->refs, 1);
 
        bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm);
        if (IS_ERR(bond->smmu_mn)) {
@@ -380,11 +398,11 @@ __arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm)
        }
 
        list_add(&bond->list, &master->bonds);
-       return &bond->sva;
+       return 0;
 
 err_free_bond:
        kfree(bond);
-       return ERR_PTR(ret);
+       return ret;
 }
 
 bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
@@ -550,7 +568,7 @@ void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
                }
        }
 
-       if (!WARN_ON(!bond) && refcount_dec_and_test(&bond->refs)) {
+       if (!WARN_ON(!bond)) {
                list_del(&bond->list);
                arm_smmu_mmu_notifier_put(bond->smmu_mn);
                kfree(bond);
@@ -562,13 +580,10 @@ static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
                                      struct device *dev, ioasid_t id)
 {
        int ret = 0;
-       struct iommu_sva *handle;
        struct mm_struct *mm = domain->mm;
 
        mutex_lock(&sva_lock);
-       handle = __arm_smmu_sva_bind(dev, mm);
-       if (IS_ERR(handle))
-               ret = PTR_ERR(handle);
+       ret = __arm_smmu_sva_bind(dev, mm);
        mutex_unlock(&sva_lock);
 
        return ret;
index bd0a596f9863a32eb11316822d424ec6349de0ef..7445454c2af244f03b9274db12e3e4dd325e31ab 100644 (file)
@@ -971,14 +971,12 @@ void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid)
        arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
 }
 
-static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
+static void arm_smmu_sync_cd(struct arm_smmu_master *master,
                             int ssid, bool leaf)
 {
        size_t i;
-       unsigned long flags;
-       struct arm_smmu_master *master;
        struct arm_smmu_cmdq_batch cmds;
-       struct arm_smmu_device *smmu = smmu_domain->smmu;
+       struct arm_smmu_device *smmu = master->smmu;
        struct arm_smmu_cmdq_ent cmd = {
                .opcode = CMDQ_OP_CFGI_CD,
                .cfgi   = {
@@ -988,15 +986,10 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
        };
 
        cmds.num = 0;
-
-       spin_lock_irqsave(&smmu_domain->devices_lock, flags);
-       list_for_each_entry(master, &smmu_domain->devices, domain_head) {
-               for (i = 0; i < master->num_streams; i++) {
-                       cmd.cfgi.sid = master->streams[i].id;
-                       arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
-               }
+       for (i = 0; i < master->num_streams; i++) {
+               cmd.cfgi.sid = master->streams[i].id;
+               arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
        }
-       spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
 
        arm_smmu_cmdq_batch_submit(smmu, &cmds);
 }
@@ -1026,34 +1019,33 @@ static void arm_smmu_write_cd_l1_desc(__le64 *dst,
        WRITE_ONCE(*dst, cpu_to_le64(val));
 }
 
-static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_domain *smmu_domain,
-                                  u32 ssid)
+static __le64 *arm_smmu_get_cd_ptr(struct arm_smmu_master *master, u32 ssid)
 {
        __le64 *l1ptr;
        unsigned int idx;
        struct arm_smmu_l1_ctx_desc *l1_desc;
-       struct arm_smmu_device *smmu = smmu_domain->smmu;
-       struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
+       struct arm_smmu_device *smmu = master->smmu;
+       struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
 
-       if (smmu_domain->s1_cfg.s1fmt == STRTAB_STE_0_S1FMT_LINEAR)
-               return cdcfg->cdtab + ssid * CTXDESC_CD_DWORDS;
+       if (cd_table->s1fmt == STRTAB_STE_0_S1FMT_LINEAR)
+               return cd_table->cdtab + ssid * CTXDESC_CD_DWORDS;
 
        idx = ssid >> CTXDESC_SPLIT;
-       l1_desc = &cdcfg->l1_desc[idx];
+       l1_desc = &cd_table->l1_desc[idx];
        if (!l1_desc->l2ptr) {
                if (arm_smmu_alloc_cd_leaf_table(smmu, l1_desc))
                        return NULL;
 
-               l1ptr = cdcfg->cdtab + idx * CTXDESC_L1_DESC_DWORDS;
+               l1ptr = cd_table->cdtab + idx * CTXDESC_L1_DESC_DWORDS;
                arm_smmu_write_cd_l1_desc(l1ptr, l1_desc);
                /* An invalid L1CD can be cached */
-               arm_smmu_sync_cd(smmu_domain, ssid, false);
+               arm_smmu_sync_cd(master, ssid, false);
        }
        idx = ssid & (CTXDESC_L2_ENTRIES - 1);
        return l1_desc->l2ptr + idx * CTXDESC_CD_DWORDS;
 }
 
-int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
+int arm_smmu_write_ctx_desc(struct arm_smmu_master *master, int ssid,
                            struct arm_smmu_ctx_desc *cd)
 {
        /*
@@ -1070,11 +1062,12 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
        u64 val;
        bool cd_live;
        __le64 *cdptr;
+       struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
 
-       if (WARN_ON(ssid >= (1 << smmu_domain->s1_cfg.s1cdmax)))
+       if (WARN_ON(ssid >= (1 << cd_table->s1cdmax)))
                return -E2BIG;
 
-       cdptr = arm_smmu_get_cd_ptr(smmu_domain, ssid);
+       cdptr = arm_smmu_get_cd_ptr(master, ssid);
        if (!cdptr)
                return -ENOMEM;
 
@@ -1098,11 +1091,11 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
                cdptr[3] = cpu_to_le64(cd->mair);
 
                /*
-                * STE is live, and the SMMU might read dwords of this CD in any
+                * STE may be live, and the SMMU might read dwords of this CD in any
                 * order. Ensure that it observes valid values before reading
                 * V=1.
                 */
-               arm_smmu_sync_cd(smmu_domain, ssid, true);
+               arm_smmu_sync_cd(master, ssid, true);
 
                val = cd->tcr |
 #ifdef __BIG_ENDIAN
@@ -1114,7 +1107,7 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
                        FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) |
                        CTXDESC_CD_0_V;
 
-               if (smmu_domain->stall_enabled)
+               if (cd_table->stall_enabled)
                        val |= CTXDESC_CD_0_S;
        }
 
@@ -1128,44 +1121,45 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
         *   without first making the structure invalid.
         */
        WRITE_ONCE(cdptr[0], cpu_to_le64(val));
-       arm_smmu_sync_cd(smmu_domain, ssid, true);
+       arm_smmu_sync_cd(master, ssid, true);
        return 0;
 }
 
-static int arm_smmu_alloc_cd_tables(struct arm_smmu_domain *smmu_domain)
+static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master)
 {
        int ret;
        size_t l1size;
        size_t max_contexts;
-       struct arm_smmu_device *smmu = smmu_domain->smmu;
-       struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
-       struct arm_smmu_ctx_desc_cfg *cdcfg = &cfg->cdcfg;
+       struct arm_smmu_device *smmu = master->smmu;
+       struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
 
-       max_contexts = 1 << cfg->s1cdmax;
+       cd_table->stall_enabled = master->stall_enabled;
+       cd_table->s1cdmax = master->ssid_bits;
+       max_contexts = 1 << cd_table->s1cdmax;
 
        if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB) ||
            max_contexts <= CTXDESC_L2_ENTRIES) {
-               cfg->s1fmt = STRTAB_STE_0_S1FMT_LINEAR;
-               cdcfg->num_l1_ents = max_contexts;
+               cd_table->s1fmt = STRTAB_STE_0_S1FMT_LINEAR;
+               cd_table->num_l1_ents = max_contexts;
 
                l1size = max_contexts * (CTXDESC_CD_DWORDS << 3);
        } else {
-               cfg->s1fmt = STRTAB_STE_0_S1FMT_64K_L2;
-               cdcfg->num_l1_ents = DIV_ROUND_UP(max_contexts,
+               cd_table->s1fmt = STRTAB_STE_0_S1FMT_64K_L2;
+               cd_table->num_l1_ents = DIV_ROUND_UP(max_contexts,
                                                  CTXDESC_L2_ENTRIES);
 
-               cdcfg->l1_desc = devm_kcalloc(smmu->dev, cdcfg->num_l1_ents,
-                                             sizeof(*cdcfg->l1_desc),
+               cd_table->l1_desc = devm_kcalloc(smmu->dev, cd_table->num_l1_ents,
+                                             sizeof(*cd_table->l1_desc),
                                              GFP_KERNEL);
-               if (!cdcfg->l1_desc)
+               if (!cd_table->l1_desc)
                        return -ENOMEM;
 
-               l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
+               l1size = cd_table->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
        }
 
-       cdcfg->cdtab = dmam_alloc_coherent(smmu->dev, l1size, &cdcfg->cdtab_dma,
+       cd_table->cdtab = dmam_alloc_coherent(smmu->dev, l1size, &cd_table->cdtab_dma,
                                           GFP_KERNEL);
-       if (!cdcfg->cdtab) {
+       if (!cd_table->cdtab) {
                dev_warn(smmu->dev, "failed to allocate context descriptor\n");
                ret = -ENOMEM;
                goto err_free_l1;
@@ -1174,42 +1168,42 @@ static int arm_smmu_alloc_cd_tables(struct arm_smmu_domain *smmu_domain)
        return 0;
 
 err_free_l1:
-       if (cdcfg->l1_desc) {
-               devm_kfree(smmu->dev, cdcfg->l1_desc);
-               cdcfg->l1_desc = NULL;
+       if (cd_table->l1_desc) {
+               devm_kfree(smmu->dev, cd_table->l1_desc);
+               cd_table->l1_desc = NULL;
        }
        return ret;
 }
 
-static void arm_smmu_free_cd_tables(struct arm_smmu_domain *smmu_domain)
+static void arm_smmu_free_cd_tables(struct arm_smmu_master *master)
 {
        int i;
        size_t size, l1size;
-       struct arm_smmu_device *smmu = smmu_domain->smmu;
-       struct arm_smmu_ctx_desc_cfg *cdcfg = &smmu_domain->s1_cfg.cdcfg;
+       struct arm_smmu_device *smmu = master->smmu;
+       struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
 
-       if (cdcfg->l1_desc) {
+       if (cd_table->l1_desc) {
                size = CTXDESC_L2_ENTRIES * (CTXDESC_CD_DWORDS << 3);
 
-               for (i = 0; i < cdcfg->num_l1_ents; i++) {
-                       if (!cdcfg->l1_desc[i].l2ptr)
+               for (i = 0; i < cd_table->num_l1_ents; i++) {
+                       if (!cd_table->l1_desc[i].l2ptr)
                                continue;
 
                        dmam_free_coherent(smmu->dev, size,
-                                          cdcfg->l1_desc[i].l2ptr,
-                                          cdcfg->l1_desc[i].l2ptr_dma);
+                                          cd_table->l1_desc[i].l2ptr,
+                                          cd_table->l1_desc[i].l2ptr_dma);
                }
-               devm_kfree(smmu->dev, cdcfg->l1_desc);
-               cdcfg->l1_desc = NULL;
+               devm_kfree(smmu->dev, cd_table->l1_desc);
+               cd_table->l1_desc = NULL;
 
-               l1size = cdcfg->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
+               l1size = cd_table->num_l1_ents * (CTXDESC_L1_DESC_DWORDS << 3);
        } else {
-               l1size = cdcfg->num_l1_ents * (CTXDESC_CD_DWORDS << 3);
+               l1size = cd_table->num_l1_ents * (CTXDESC_CD_DWORDS << 3);
        }
 
-       dmam_free_coherent(smmu->dev, l1size, cdcfg->cdtab, cdcfg->cdtab_dma);
-       cdcfg->cdtab_dma = 0;
-       cdcfg->cdtab = NULL;
+       dmam_free_coherent(smmu->dev, l1size, cd_table->cdtab, cd_table->cdtab_dma);
+       cd_table->cdtab_dma = 0;
+       cd_table->cdtab = NULL;
 }
 
 bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd)
@@ -1276,7 +1270,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
        u64 val = le64_to_cpu(dst[0]);
        bool ste_live = false;
        struct arm_smmu_device *smmu = NULL;
-       struct arm_smmu_s1_cfg *s1_cfg = NULL;
+       struct arm_smmu_ctx_desc_cfg *cd_table = NULL;
        struct arm_smmu_s2_cfg *s2_cfg = NULL;
        struct arm_smmu_domain *smmu_domain = NULL;
        struct arm_smmu_cmdq_ent prefetch_cmd = {
@@ -1294,7 +1288,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
        if (smmu_domain) {
                switch (smmu_domain->stage) {
                case ARM_SMMU_DOMAIN_S1:
-                       s1_cfg = &smmu_domain->s1_cfg;
+                       cd_table = &master->cd_table;
                        break;
                case ARM_SMMU_DOMAIN_S2:
                case ARM_SMMU_DOMAIN_NESTED:
@@ -1325,7 +1319,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
        val = STRTAB_STE_0_V;
 
        /* Bypass/fault */
-       if (!smmu_domain || !(s1_cfg || s2_cfg)) {
+       if (!smmu_domain || !(cd_table || s2_cfg)) {
                if (!smmu_domain && disable_bypass)
                        val |= FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT);
                else
@@ -1344,7 +1338,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
                return;
        }
 
-       if (s1_cfg) {
+       if (cd_table) {
                u64 strw = smmu->features & ARM_SMMU_FEAT_E2H ?
                        STRTAB_STE_1_STRW_EL2 : STRTAB_STE_1_STRW_NSEL1;
 
@@ -1360,10 +1354,10 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_master *master, u32 sid,
                    !master->stall_enabled)
                        dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
 
-               val |= (s1_cfg->cdcfg.cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
+               val |= (cd_table->cdtab_dma & STRTAB_STE_0_S1CTXPTR_MASK) |
                        FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_S1_TRANS) |
-                       FIELD_PREP(STRTAB_STE_0_S1CDMAX, s1_cfg->s1cdmax) |
-                       FIELD_PREP(STRTAB_STE_0_S1FMT, s1_cfg->s1fmt);
+                       FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax) |
+                       FIELD_PREP(STRTAB_STE_0_S1FMT, cd_table->s1fmt);
        }
 
        if (s2_cfg) {
@@ -1869,7 +1863,7 @@ static void arm_smmu_tlb_inv_context(void *cookie)
         * careful, 007.
         */
        if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
-               arm_smmu_tlb_inv_asid(smmu, smmu_domain->s1_cfg.cd.asid);
+               arm_smmu_tlb_inv_asid(smmu, smmu_domain->cd.asid);
        } else {
                cmd.opcode      = CMDQ_OP_TLBI_S12_VMALL;
                cmd.tlbi.vmid   = smmu_domain->s2_cfg.vmid;
@@ -1962,7 +1956,7 @@ static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size,
        if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
                cmd.opcode      = smmu_domain->smmu->features & ARM_SMMU_FEAT_E2H ?
                                  CMDQ_OP_TLBI_EL2_VA : CMDQ_OP_TLBI_NH_VA;
-               cmd.tlbi.asid   = smmu_domain->s1_cfg.cd.asid;
+               cmd.tlbi.asid   = smmu_domain->cd.asid;
        } else {
                cmd.opcode      = CMDQ_OP_TLBI_S2_IPA;
                cmd.tlbi.vmid   = smmu_domain->s2_cfg.vmid;
@@ -2067,15 +2061,11 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
 
        free_io_pgtable_ops(smmu_domain->pgtbl_ops);
 
-       /* Free the CD and ASID, if we allocated them */
+       /* Free the ASID or VMID */
        if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
-               struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
-
                /* Prevent SVA from touching the CD while we're freeing it */
                mutex_lock(&arm_smmu_asid_lock);
-               if (cfg->cdcfg.cdtab)
-                       arm_smmu_free_cd_tables(smmu_domain);
-               arm_smmu_free_asid(&cfg->cd);
+               arm_smmu_free_asid(&smmu_domain->cd);
                mutex_unlock(&arm_smmu_asid_lock);
        } else {
                struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
@@ -2087,66 +2077,43 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
 }
 
 static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
-                                      struct arm_smmu_master *master,
                                       struct io_pgtable_cfg *pgtbl_cfg)
 {
        int ret;
        u32 asid;
        struct arm_smmu_device *smmu = smmu_domain->smmu;
-       struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
+       struct arm_smmu_ctx_desc *cd = &smmu_domain->cd;
        typeof(&pgtbl_cfg->arm_lpae_s1_cfg.tcr) tcr = &pgtbl_cfg->arm_lpae_s1_cfg.tcr;
 
-       refcount_set(&cfg->cd.refs, 1);
+       refcount_set(&cd->refs, 1);
 
        /* Prevent SVA from modifying the ASID until it is written to the CD */
        mutex_lock(&arm_smmu_asid_lock);
-       ret = xa_alloc(&arm_smmu_asid_xa, &asid, &cfg->cd,
+       ret = xa_alloc(&arm_smmu_asid_xa, &asid, cd,
                       XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
        if (ret)
                goto out_unlock;
 
-       cfg->s1cdmax = master->ssid_bits;
-
-       smmu_domain->stall_enabled = master->stall_enabled;
-
-       ret = arm_smmu_alloc_cd_tables(smmu_domain);
-       if (ret)
-               goto out_free_asid;
-
-       cfg->cd.asid    = (u16)asid;
-       cfg->cd.ttbr    = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
-       cfg->cd.tcr     = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) |
+       cd->asid        = (u16)asid;
+       cd->ttbr        = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
+       cd->tcr         = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, tcr->tsz) |
                          FIELD_PREP(CTXDESC_CD_0_TCR_TG0, tcr->tg) |
                          FIELD_PREP(CTXDESC_CD_0_TCR_IRGN0, tcr->irgn) |
                          FIELD_PREP(CTXDESC_CD_0_TCR_ORGN0, tcr->orgn) |
                          FIELD_PREP(CTXDESC_CD_0_TCR_SH0, tcr->sh) |
                          FIELD_PREP(CTXDESC_CD_0_TCR_IPS, tcr->ips) |
                          CTXDESC_CD_0_TCR_EPD1 | CTXDESC_CD_0_AA64;
-       cfg->cd.mair    = pgtbl_cfg->arm_lpae_s1_cfg.mair;
-
-       /*
-        * Note that this will end up calling arm_smmu_sync_cd() before
-        * the master has been added to the devices list for this domain.
-        * This isn't an issue because the STE hasn't been installed yet.
-        */
-       ret = arm_smmu_write_ctx_desc(smmu_domain, IOMMU_NO_PASID, &cfg->cd);
-       if (ret)
-               goto out_free_cd_tables;
+       cd->mair        = pgtbl_cfg->arm_lpae_s1_cfg.mair;
 
        mutex_unlock(&arm_smmu_asid_lock);
        return 0;
 
-out_free_cd_tables:
-       arm_smmu_free_cd_tables(smmu_domain);
-out_free_asid:
-       arm_smmu_free_asid(&cfg->cd);
 out_unlock:
        mutex_unlock(&arm_smmu_asid_lock);
        return ret;
 }
 
 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
-                                      struct arm_smmu_master *master,
                                       struct io_pgtable_cfg *pgtbl_cfg)
 {
        int vmid;
@@ -2173,8 +2140,7 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
        return 0;
 }
 
-static int arm_smmu_domain_finalise(struct iommu_domain *domain,
-                                   struct arm_smmu_master *master)
+static int arm_smmu_domain_finalise(struct iommu_domain *domain)
 {
        int ret;
        unsigned long ias, oas;
@@ -2182,7 +2148,6 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain,
        struct io_pgtable_cfg pgtbl_cfg;
        struct io_pgtable_ops *pgtbl_ops;
        int (*finalise_stage_fn)(struct arm_smmu_domain *,
-                                struct arm_smmu_master *,
                                 struct io_pgtable_cfg *);
        struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
        struct arm_smmu_device *smmu = smmu_domain->smmu;
@@ -2234,7 +2199,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain,
        domain->geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
        domain->geometry.force_aperture = true;
 
-       ret = finalise_stage_fn(smmu_domain, master, &pgtbl_cfg);
+       ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
        if (ret < 0) {
                free_io_pgtable_ops(pgtbl_ops);
                return ret;
@@ -2403,6 +2368,14 @@ static void arm_smmu_detach_dev(struct arm_smmu_master *master)
        master->domain = NULL;
        master->ats_enabled = false;
        arm_smmu_install_ste_for_dev(master);
+       /*
+        * Clearing the CD entry isn't strictly required to detach the domain
+        * since the table is uninstalled anyway, but it helps avoid confusion
+        * in the call to arm_smmu_write_ctx_desc on the next attach (which
+        * expects the entry to be empty).
+        */
+       if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 && master->cd_table.cdtab)
+               arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, NULL);
 }
 
 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
@@ -2436,23 +2409,15 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
 
        if (!smmu_domain->smmu) {
                smmu_domain->smmu = smmu;
-               ret = arm_smmu_domain_finalise(domain, master);
-               if (ret) {
+               ret = arm_smmu_domain_finalise(domain);
+               if (ret)
                        smmu_domain->smmu = NULL;
-                       goto out_unlock;
-               }
-       } else if (smmu_domain->smmu != smmu) {
-               ret = -EINVAL;
-               goto out_unlock;
-       } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
-                  master->ssid_bits != smmu_domain->s1_cfg.s1cdmax) {
+       } else if (smmu_domain->smmu != smmu)
                ret = -EINVAL;
-               goto out_unlock;
-       } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1 &&
-                  smmu_domain->stall_enabled != master->stall_enabled) {
-               ret = -EINVAL;
-               goto out_unlock;
-       }
+
+       mutex_unlock(&smmu_domain->init_mutex);
+       if (ret)
+               return ret;
 
        master->domain = smmu_domain;
 
@@ -2466,16 +2431,42 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
        if (smmu_domain->stage != ARM_SMMU_DOMAIN_BYPASS)
                master->ats_enabled = arm_smmu_ats_supported(master);
 
-       arm_smmu_install_ste_for_dev(master);
-
        spin_lock_irqsave(&smmu_domain->devices_lock, flags);
        list_add(&master->domain_head, &smmu_domain->devices);
        spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
 
+       if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
+               if (!master->cd_table.cdtab) {
+                       ret = arm_smmu_alloc_cd_tables(master);
+                       if (ret) {
+                               master->domain = NULL;
+                               goto out_list_del;
+                       }
+               }
+
+               /*
+                * Prevent SVA from concurrently modifying the CD or writing to
+                * the CD entry
+                */
+               mutex_lock(&arm_smmu_asid_lock);
+               ret = arm_smmu_write_ctx_desc(master, IOMMU_NO_PASID, &smmu_domain->cd);
+               mutex_unlock(&arm_smmu_asid_lock);
+               if (ret) {
+                       master->domain = NULL;
+                       goto out_list_del;
+               }
+       }
+
+       arm_smmu_install_ste_for_dev(master);
+
        arm_smmu_enable_ats(master);
+       return 0;
+
+out_list_del:
+       spin_lock_irqsave(&smmu_domain->devices_lock, flags);
+       list_del(&master->domain_head);
+       spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
 
-out_unlock:
-       mutex_unlock(&smmu_domain->init_mutex);
        return ret;
 }
 
@@ -2720,6 +2711,8 @@ static void arm_smmu_release_device(struct device *dev)
        arm_smmu_detach_dev(master);
        arm_smmu_disable_pasid(master);
        arm_smmu_remove_master(master);
+       if (master->cd_table.cdtab)
+               arm_smmu_free_cd_tables(master);
        kfree(master);
 }
 
index 9915850dd4dbf771981cb378155ef8fee47ab4d6..961205ba86d25d0cbaa2c116ea9091b59ccd0ddf 100644 (file)
@@ -595,13 +595,11 @@ struct arm_smmu_ctx_desc_cfg {
        dma_addr_t                      cdtab_dma;
        struct arm_smmu_l1_ctx_desc     *l1_desc;
        unsigned int                    num_l1_ents;
-};
-
-struct arm_smmu_s1_cfg {
-       struct arm_smmu_ctx_desc_cfg    cdcfg;
-       struct arm_smmu_ctx_desc        cd;
        u8                              s1fmt;
+       /* log2 of the maximum number of CDs supported by this table */
        u8                              s1cdmax;
+       /* Whether CD entries in this table have the stall bit set. */
+       u8                              stall_enabled:1;
 };
 
 struct arm_smmu_s2_cfg {
@@ -697,6 +695,8 @@ struct arm_smmu_master {
        struct arm_smmu_domain          *domain;
        struct list_head                domain_head;
        struct arm_smmu_stream          *streams;
+       /* Locked by the iommu core using the group mutex */
+       struct arm_smmu_ctx_desc_cfg    cd_table;
        unsigned int                    num_streams;
        bool                            ats_enabled;
        bool                            stall_enabled;
@@ -719,13 +719,12 @@ struct arm_smmu_domain {
        struct mutex                    init_mutex; /* Protects smmu pointer */
 
        struct io_pgtable_ops           *pgtbl_ops;
-       bool                            stall_enabled;
        atomic_t                        nr_ats_masters;
 
        enum arm_smmu_domain_stage      stage;
        union {
-               struct arm_smmu_s1_cfg  s1_cfg;
-               struct arm_smmu_s2_cfg  s2_cfg;
+               struct arm_smmu_ctx_desc        cd;
+               struct arm_smmu_s2_cfg          s2_cfg;
        };
 
        struct iommu_domain             domain;
@@ -745,7 +744,7 @@ extern struct xarray arm_smmu_asid_xa;
 extern struct mutex arm_smmu_asid_lock;
 extern struct arm_smmu_ctx_desc quiet_cd;
 
-int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid,
+int arm_smmu_write_ctx_desc(struct arm_smmu_master *smmu_master, int ssid,
                            struct arm_smmu_ctx_desc *cd);
 void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid);
 void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
index 7f52ac67495fd10467095b5e395782238d5368b9..549ae4dba3a681b08832d00bf5057f7d803fc06c 100644 (file)
@@ -251,6 +251,7 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
        { .compatible = "qcom,sc7280-mss-pil" },
        { .compatible = "qcom,sc8180x-mdss" },
        { .compatible = "qcom,sc8280xp-mdss" },
+       { .compatible = "qcom,sdm670-mdss" },
        { .compatible = "qcom,sdm845-mdss" },
        { .compatible = "qcom,sdm845-mss-pil" },
        { .compatible = "qcom,sm6350-mdss" },
@@ -532,6 +533,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
        { .compatible = "qcom,sm6350-smmu-500", .data = &qcom_smmu_500_impl0_data },
        { .compatible = "qcom,sm6375-smmu-v2", .data = &qcom_smmu_v2_data },
        { .compatible = "qcom,sm6375-smmu-500", .data = &qcom_smmu_500_impl0_data },
+       { .compatible = "qcom,sm7150-smmu-v2", .data = &qcom_smmu_v2_data },
        { .compatible = "qcom,sm8150-smmu-500", .data = &qcom_smmu_500_impl0_data },
        { .compatible = "qcom,sm8250-smmu-500", .data = &qcom_smmu_500_impl0_data },
        { .compatible = "qcom,sm8350-smmu-500", .data = &qcom_smmu_500_impl0_data },
index 775a3cbaff4ed0522fadb2db80fd2e00f6dc1acc..97b2122032b2371915047aa03d3118005fc49496 100644 (file)
@@ -332,12 +332,10 @@ out_unlock:
        return ret;
 }
 
-static struct iommu_domain *qcom_iommu_domain_alloc(unsigned type)
+static struct iommu_domain *qcom_iommu_domain_alloc_paging(struct device *dev)
 {
        struct qcom_iommu_domain *qcom_domain;
 
-       if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
-               return NULL;
        /*
         * Allocate the domain and initialise some of its data structures.
         * We can't really do anything meaningful until we've added a
@@ -400,6 +398,44 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev
        return 0;
 }
 
+static int qcom_iommu_identity_attach(struct iommu_domain *identity_domain,
+                                     struct device *dev)
+{
+       struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+       struct qcom_iommu_domain *qcom_domain;
+       struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+       struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
+       unsigned int i;
+
+       if (domain == identity_domain || !domain)
+               return 0;
+
+       qcom_domain = to_qcom_iommu_domain(domain);
+       if (WARN_ON(!qcom_domain->iommu))
+               return -EINVAL;
+
+       pm_runtime_get_sync(qcom_iommu->dev);
+       for (i = 0; i < fwspec->num_ids; i++) {
+               struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
+
+               /* Disable the context bank: */
+               iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
+
+               ctx->domain = NULL;
+       }
+       pm_runtime_put_sync(qcom_iommu->dev);
+       return 0;
+}
+
+static struct iommu_domain_ops qcom_iommu_identity_ops = {
+       .attach_dev = qcom_iommu_identity_attach,
+};
+
+static struct iommu_domain qcom_iommu_identity_domain = {
+       .type = IOMMU_DOMAIN_IDENTITY,
+       .ops = &qcom_iommu_identity_ops,
+};
+
 static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
                          phys_addr_t paddr, size_t pgsize, size_t pgcount,
                          int prot, gfp_t gfp, size_t *mapped)
@@ -565,8 +601,9 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
 }
 
 static const struct iommu_ops qcom_iommu_ops = {
+       .identity_domain = &qcom_iommu_identity_domain,
        .capable        = qcom_iommu_capable,
-       .domain_alloc   = qcom_iommu_domain_alloc,
+       .domain_alloc_paging = qcom_iommu_domain_alloc_paging,
        .probe_device   = qcom_iommu_probe_device,
        .device_group   = generic_device_group,
        .of_xlate       = qcom_iommu_of_xlate,
index 4b1a88f514c9c06151e54036b295df6d48e2d8ba..85163a83df2f68f94a23c48731c45e755bf625bd 100644 (file)
@@ -43,14 +43,28 @@ enum iommu_dma_cookie_type {
        IOMMU_DMA_MSI_COOKIE,
 };
 
+enum iommu_dma_queue_type {
+       IOMMU_DMA_OPTS_PER_CPU_QUEUE,
+       IOMMU_DMA_OPTS_SINGLE_QUEUE,
+};
+
+struct iommu_dma_options {
+       enum iommu_dma_queue_type qt;
+       size_t          fq_size;
+       unsigned int    fq_timeout;
+};
+
 struct iommu_dma_cookie {
        enum iommu_dma_cookie_type      type;
        union {
                /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
                struct {
                        struct iova_domain      iovad;
-
-                       struct iova_fq __percpu *fq;    /* Flush queue */
+                       /* Flush queue */
+                       union {
+                               struct iova_fq  *single_fq;
+                               struct iova_fq  __percpu *percpu_fq;
+                       };
                        /* Number of TLB flushes that have been started */
                        atomic64_t              fq_flush_start_cnt;
                        /* Number of TLB flushes that have been finished */
@@ -67,6 +81,8 @@ struct iommu_dma_cookie {
 
        /* Domain for flush queue callback; NULL if flush queue not in use */
        struct iommu_domain             *fq_domain;
+       /* Options for dma-iommu use */
+       struct iommu_dma_options        options;
        struct mutex                    mutex;
 };
 
@@ -84,10 +100,12 @@ static int __init iommu_dma_forcedac_setup(char *str)
 early_param("iommu.forcedac", iommu_dma_forcedac_setup);
 
 /* Number of entries per flush queue */
-#define IOVA_FQ_SIZE   256
+#define IOVA_DEFAULT_FQ_SIZE   256
+#define IOVA_SINGLE_FQ_SIZE    32768
 
 /* Timeout (in ms) after which entries are flushed from the queue */
-#define IOVA_FQ_TIMEOUT        10
+#define IOVA_DEFAULT_FQ_TIMEOUT        10
+#define IOVA_SINGLE_FQ_TIMEOUT 1000
 
 /* Flush queue entry for deferred flushing */
 struct iova_fq_entry {
@@ -99,18 +117,19 @@ struct iova_fq_entry {
 
 /* Per-CPU flush queue structure */
 struct iova_fq {
-       struct iova_fq_entry entries[IOVA_FQ_SIZE];
-       unsigned int head, tail;
        spinlock_t lock;
+       unsigned int head, tail;
+       unsigned int mod_mask;
+       struct iova_fq_entry entries[];
 };
 
 #define fq_ring_for_each(i, fq) \
-       for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
+       for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) & (fq)->mod_mask)
 
 static inline bool fq_full(struct iova_fq *fq)
 {
        assert_spin_locked(&fq->lock);
-       return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
+       return (((fq->tail + 1) & fq->mod_mask) == fq->head);
 }
 
 static inline unsigned int fq_ring_add(struct iova_fq *fq)
@@ -119,12 +138,12 @@ static inline unsigned int fq_ring_add(struct iova_fq *fq)
 
        assert_spin_locked(&fq->lock);
 
-       fq->tail = (idx + 1) % IOVA_FQ_SIZE;
+       fq->tail = (idx + 1) & fq->mod_mask;
 
        return idx;
 }
 
-static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
+static void fq_ring_free_locked(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
 {
        u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt);
        unsigned int idx;
@@ -141,10 +160,19 @@ static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
                               fq->entries[idx].iova_pfn,
                               fq->entries[idx].pages);
 
-               fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
+               fq->head = (fq->head + 1) & fq->mod_mask;
        }
 }
 
+static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&fq->lock, flags);
+       fq_ring_free_locked(cookie, fq);
+       spin_unlock_irqrestore(&fq->lock, flags);
+}
+
 static void fq_flush_iotlb(struct iommu_dma_cookie *cookie)
 {
        atomic64_inc(&cookie->fq_flush_start_cnt);
@@ -160,14 +188,11 @@ static void fq_flush_timeout(struct timer_list *t)
        atomic_set(&cookie->fq_timer_on, 0);
        fq_flush_iotlb(cookie);
 
-       for_each_possible_cpu(cpu) {
-               unsigned long flags;
-               struct iova_fq *fq;
-
-               fq = per_cpu_ptr(cookie->fq, cpu);
-               spin_lock_irqsave(&fq->lock, flags);
-               fq_ring_free(cookie, fq);
-               spin_unlock_irqrestore(&fq->lock, flags);
+       if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) {
+               fq_ring_free(cookie, cookie->single_fq);
+       } else {
+               for_each_possible_cpu(cpu)
+                       fq_ring_free(cookie, per_cpu_ptr(cookie->percpu_fq, cpu));
        }
 }
 
@@ -188,7 +213,11 @@ static void queue_iova(struct iommu_dma_cookie *cookie,
         */
        smp_mb();
 
-       fq = raw_cpu_ptr(cookie->fq);
+       if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
+               fq = cookie->single_fq;
+       else
+               fq = raw_cpu_ptr(cookie->percpu_fq);
+
        spin_lock_irqsave(&fq->lock, flags);
 
        /*
@@ -196,11 +225,11 @@ static void queue_iova(struct iommu_dma_cookie *cookie,
         * flushed out on another CPU. This makes the fq_full() check below less
         * likely to be true.
         */
-       fq_ring_free(cookie, fq);
+       fq_ring_free_locked(cookie, fq);
 
        if (fq_full(fq)) {
                fq_flush_iotlb(cookie);
-               fq_ring_free(cookie, fq);
+               fq_ring_free_locked(cookie, fq);
        }
 
        idx = fq_ring_add(fq);
@@ -216,34 +245,95 @@ static void queue_iova(struct iommu_dma_cookie *cookie,
        if (!atomic_read(&cookie->fq_timer_on) &&
            !atomic_xchg(&cookie->fq_timer_on, 1))
                mod_timer(&cookie->fq_timer,
-                         jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
+                         jiffies + msecs_to_jiffies(cookie->options.fq_timeout));
 }
 
-static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie)
+static void iommu_dma_free_fq_single(struct iova_fq *fq)
 {
-       int cpu, idx;
+       int idx;
 
-       if (!cookie->fq)
-               return;
+       fq_ring_for_each(idx, fq)
+               put_pages_list(&fq->entries[idx].freelist);
+       vfree(fq);
+}
+
+static void iommu_dma_free_fq_percpu(struct iova_fq __percpu *percpu_fq)
+{
+       int cpu, idx;
 
-       del_timer_sync(&cookie->fq_timer);
        /* The IOVAs will be torn down separately, so just free our queued pages */
        for_each_possible_cpu(cpu) {
-               struct iova_fq *fq = per_cpu_ptr(cookie->fq, cpu);
+               struct iova_fq *fq = per_cpu_ptr(percpu_fq, cpu);
 
                fq_ring_for_each(idx, fq)
                        put_pages_list(&fq->entries[idx].freelist);
        }
 
-       free_percpu(cookie->fq);
+       free_percpu(percpu_fq);
+}
+
+static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie)
+{
+       if (!cookie->fq_domain)
+               return;
+
+       del_timer_sync(&cookie->fq_timer);
+       if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
+               iommu_dma_free_fq_single(cookie->single_fq);
+       else
+               iommu_dma_free_fq_percpu(cookie->percpu_fq);
+}
+
+static void iommu_dma_init_one_fq(struct iova_fq *fq, size_t fq_size)
+{
+       int i;
+
+       fq->head = 0;
+       fq->tail = 0;
+       fq->mod_mask = fq_size - 1;
+
+       spin_lock_init(&fq->lock);
+
+       for (i = 0; i < fq_size; i++)
+               INIT_LIST_HEAD(&fq->entries[i].freelist);
+}
+
+static int iommu_dma_init_fq_single(struct iommu_dma_cookie *cookie)
+{
+       size_t fq_size = cookie->options.fq_size;
+       struct iova_fq *queue;
+
+       queue = vmalloc(struct_size(queue, entries, fq_size));
+       if (!queue)
+               return -ENOMEM;
+       iommu_dma_init_one_fq(queue, fq_size);
+       cookie->single_fq = queue;
+
+       return 0;
+}
+
+static int iommu_dma_init_fq_percpu(struct iommu_dma_cookie *cookie)
+{
+       size_t fq_size = cookie->options.fq_size;
+       struct iova_fq __percpu *queue;
+       int cpu;
+
+       queue = __alloc_percpu(struct_size(queue, entries, fq_size),
+                              __alignof__(*queue));
+       if (!queue)
+               return -ENOMEM;
+
+       for_each_possible_cpu(cpu)
+               iommu_dma_init_one_fq(per_cpu_ptr(queue, cpu), fq_size);
+       cookie->percpu_fq = queue;
+       return 0;
 }
 
 /* sysfs updates are serialised by the mutex of the group owning @domain */
 int iommu_dma_init_fq(struct iommu_domain *domain)
 {
        struct iommu_dma_cookie *cookie = domain->iova_cookie;
-       struct iova_fq __percpu *queue;
-       int i, cpu;
+       int rc;
 
        if (cookie->fq_domain)
                return 0;
@@ -251,26 +341,16 @@ int iommu_dma_init_fq(struct iommu_domain *domain)
        atomic64_set(&cookie->fq_flush_start_cnt,  0);
        atomic64_set(&cookie->fq_flush_finish_cnt, 0);
 
-       queue = alloc_percpu(struct iova_fq);
-       if (!queue) {
+       if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE)
+               rc = iommu_dma_init_fq_single(cookie);
+       else
+               rc = iommu_dma_init_fq_percpu(cookie);
+
+       if (rc) {
                pr_warn("iova flush queue initialization failed\n");
                return -ENOMEM;
        }
 
-       for_each_possible_cpu(cpu) {
-               struct iova_fq *fq = per_cpu_ptr(queue, cpu);
-
-               fq->head = 0;
-               fq->tail = 0;
-
-               spin_lock_init(&fq->lock);
-
-               for (i = 0; i < IOVA_FQ_SIZE; i++)
-                       INIT_LIST_HEAD(&fq->entries[i].freelist);
-       }
-
-       cookie->fq = queue;
-
        timer_setup(&cookie->fq_timer, fq_flush_timeout, 0);
        atomic_set(&cookie->fq_timer_on, 0);
        /*
@@ -554,6 +634,28 @@ static bool dev_use_sg_swiotlb(struct device *dev, struct scatterlist *sg,
        return false;
 }
 
+/**
+ * iommu_dma_init_options - Initialize dma-iommu options
+ * @options: The options to be initialized
+ * @dev: Device the options are set for
+ *
+ * This allows tuning dma-iommu specific to device properties
+ */
+static void iommu_dma_init_options(struct iommu_dma_options *options,
+                                  struct device *dev)
+{
+       /* Shadowing IOTLB flushes do better with a single large queue */
+       if (dev->iommu->shadow_on_flush) {
+               options->qt = IOMMU_DMA_OPTS_SINGLE_QUEUE;
+               options->fq_timeout = IOVA_SINGLE_FQ_TIMEOUT;
+               options->fq_size = IOVA_SINGLE_FQ_SIZE;
+       } else {
+               options->qt = IOMMU_DMA_OPTS_PER_CPU_QUEUE;
+               options->fq_size = IOVA_DEFAULT_FQ_SIZE;
+               options->fq_timeout = IOVA_DEFAULT_FQ_TIMEOUT;
+       }
+}
+
 /**
  * iommu_dma_init_domain - Initialise a DMA mapping domain
  * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
@@ -614,6 +716,8 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
        if (ret)
                goto done_unlock;
 
+       iommu_dma_init_options(&cookie->options, dev);
+
        /* If the FQ fails we can simply fall back to strict mode */
        if (domain->type == IOMMU_DOMAIN_DMA_FQ &&
            (!device_iommu_capable(dev, IOMMU_CAP_DEFERRED_FLUSH) || iommu_dma_init_fq(domain)))
index c275fe71c4db32623f7794728a82696fff8c9c86..2c6e9094f1e979dade6045192432ca5cc129f3da 100644 (file)
@@ -24,6 +24,7 @@
 
 typedef u32 sysmmu_iova_t;
 typedef u32 sysmmu_pte_t;
+static struct iommu_domain exynos_identity_domain;
 
 /* We do not consider super section mapping (16MB) */
 #define SECT_ORDER 20
@@ -829,7 +830,7 @@ static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
                struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
 
                mutex_lock(&owner->rpm_lock);
-               if (data->domain) {
+               if (&data->domain->domain != &exynos_identity_domain) {
                        dev_dbg(data->sysmmu, "saving state\n");
                        __sysmmu_disable(data);
                }
@@ -847,7 +848,7 @@ static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
                struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
 
                mutex_lock(&owner->rpm_lock);
-               if (data->domain) {
+               if (&data->domain->domain != &exynos_identity_domain) {
                        dev_dbg(data->sysmmu, "restoring state\n");
                        __sysmmu_enable(data);
                }
@@ -886,7 +887,7 @@ static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
                                   DMA_TO_DEVICE);
 }
 
-static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
+static struct iommu_domain *exynos_iommu_domain_alloc_paging(struct device *dev)
 {
        struct exynos_iommu_domain *domain;
        dma_addr_t handle;
@@ -895,9 +896,6 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
        /* Check if correct PTE offsets are initialized */
        BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
 
-       if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED)
-               return NULL;
-
        domain = kzalloc(sizeof(*domain), GFP_KERNEL);
        if (!domain)
                return NULL;
@@ -980,17 +978,20 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
        kfree(domain);
 }
 
-static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
-                                   struct device *dev)
+static int exynos_iommu_identity_attach(struct iommu_domain *identity_domain,
+                                       struct device *dev)
 {
-       struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
        struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
-       phys_addr_t pagetable = virt_to_phys(domain->pgtable);
+       struct exynos_iommu_domain *domain;
+       phys_addr_t pagetable;
        struct sysmmu_drvdata *data, *next;
        unsigned long flags;
 
-       if (!has_sysmmu(dev) || owner->domain != iommu_domain)
-               return;
+       if (owner->domain == identity_domain)
+               return 0;
+
+       domain = to_exynos_domain(owner->domain);
+       pagetable = virt_to_phys(domain->pgtable);
 
        mutex_lock(&owner->rpm_lock);
 
@@ -1009,15 +1010,25 @@ static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
                list_del_init(&data->domain_node);
                spin_unlock(&data->lock);
        }
-       owner->domain = NULL;
+       owner->domain = identity_domain;
        spin_unlock_irqrestore(&domain->lock, flags);
 
        mutex_unlock(&owner->rpm_lock);
 
-       dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
-               &pagetable);
+       dev_dbg(dev, "%s: Restored IOMMU to IDENTITY from pgtable %pa\n",
+               __func__, &pagetable);
+       return 0;
 }
 
+static struct iommu_domain_ops exynos_identity_ops = {
+       .attach_dev = exynos_iommu_identity_attach,
+};
+
+static struct iommu_domain exynos_identity_domain = {
+       .type = IOMMU_DOMAIN_IDENTITY,
+       .ops = &exynos_identity_ops,
+};
+
 static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
                                   struct device *dev)
 {
@@ -1026,12 +1037,11 @@ static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
        struct sysmmu_drvdata *data;
        phys_addr_t pagetable = virt_to_phys(domain->pgtable);
        unsigned long flags;
+       int err;
 
-       if (!has_sysmmu(dev))
-               return -ENODEV;
-
-       if (owner->domain)
-               exynos_iommu_detach_device(owner->domain, dev);
+       err = exynos_iommu_identity_attach(&exynos_identity_domain, dev);
+       if (err)
+               return err;
 
        mutex_lock(&owner->rpm_lock);
 
@@ -1219,7 +1229,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
  */
 static int exynos_iommu_map(struct iommu_domain *iommu_domain,
                            unsigned long l_iova, phys_addr_t paddr, size_t size,
-                           int prot, gfp_t gfp)
+                           size_t count, int prot, gfp_t gfp, size_t *mapped)
 {
        struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
        sysmmu_pte_t *entry;
@@ -1253,6 +1263,8 @@ static int exynos_iommu_map(struct iommu_domain *iommu_domain,
        if (ret)
                pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
                        __func__, ret, size, iova);
+       else
+               *mapped = size;
 
        spin_unlock_irqrestore(&domain->pgtablelock, flags);
 
@@ -1274,7 +1286,7 @@ static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain
 }
 
 static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
-                                unsigned long l_iova, size_t size,
+                                unsigned long l_iova, size_t size, size_t count,
                                 struct iommu_iotlb_gather *gather)
 {
        struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
@@ -1407,26 +1419,12 @@ static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
        return &data->iommu;
 }
 
-static void exynos_iommu_set_platform_dma(struct device *dev)
-{
-       struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
-
-       if (owner->domain) {
-               struct iommu_group *group = iommu_group_get(dev);
-
-               if (group) {
-                       exynos_iommu_detach_device(owner->domain, dev);
-                       iommu_group_put(group);
-               }
-       }
-}
-
 static void exynos_iommu_release_device(struct device *dev)
 {
        struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
        struct sysmmu_drvdata *data;
 
-       exynos_iommu_set_platform_dma(dev);
+       WARN_ON(exynos_iommu_identity_attach(&exynos_identity_domain, dev));
 
        list_for_each_entry(data, &owner->controllers, owner_node)
                device_link_del(data->link);
@@ -1457,6 +1455,7 @@ static int exynos_iommu_of_xlate(struct device *dev,
 
                INIT_LIST_HEAD(&owner->controllers);
                mutex_init(&owner->rpm_lock);
+               owner->domain = &exynos_identity_domain;
                dev_iommu_priv_set(dev, owner);
        }
 
@@ -1471,19 +1470,17 @@ static int exynos_iommu_of_xlate(struct device *dev,
 }
 
 static const struct iommu_ops exynos_iommu_ops = {
-       .domain_alloc = exynos_iommu_domain_alloc,
+       .identity_domain = &exynos_identity_domain,
+       .domain_alloc_paging = exynos_iommu_domain_alloc_paging,
        .device_group = generic_device_group,
-#ifdef CONFIG_ARM
-       .set_platform_dma_ops = exynos_iommu_set_platform_dma,
-#endif
        .probe_device = exynos_iommu_probe_device,
        .release_device = exynos_iommu_release_device,
        .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
        .of_xlate = exynos_iommu_of_xlate,
        .default_domain_ops = &(const struct iommu_domain_ops) {
                .attach_dev     = exynos_iommu_attach_device,
-               .map            = exynos_iommu_map,
-               .unmap          = exynos_iommu_unmap,
+               .map_pages      = exynos_iommu_map,
+               .unmap_pages    = exynos_iommu_unmap,
                .iova_to_phys   = exynos_iommu_iova_to_phys,
                .free           = exynos_iommu_domain_free,
        }
index 4ac0e247ec2b51777b2b30d73e8d17fb2c401054..e9d2bff4659b7c09f4e14ea14032c861c088f7e1 100644 (file)
@@ -196,6 +196,13 @@ static struct iommu_domain *fsl_pamu_domain_alloc(unsigned type)
 {
        struct fsl_dma_domain *dma_domain;
 
+       /*
+        * FIXME: This isn't creating an unmanaged domain since the
+        * default_domain_ops do not have any map/unmap function it doesn't meet
+        * the requirements for __IOMMU_DOMAIN_PAGING. The only purpose seems to
+        * allow drivers/soc/fsl/qbman/qman_portal.c to do
+        * fsl_pamu_configure_l1_stash()
+        */
        if (type != IOMMU_DOMAIN_UNMANAGED)
                return NULL;
 
@@ -283,15 +290,33 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
        return ret;
 }
 
-static void fsl_pamu_set_platform_dma(struct device *dev)
+/*
+ * FIXME: fsl/pamu is completely broken in terms of how it works with the iommu
+ * API. Immediately after probe the HW is left in an IDENTITY translation and
+ * the driver provides a non-working UNMANAGED domain that it can switch over
+ * to. However it cannot switch back to an IDENTITY translation, instead it
+ * switches to what looks like BLOCKING.
+ */
+static int fsl_pamu_platform_attach(struct iommu_domain *platform_domain,
+                                   struct device *dev)
 {
        struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
-       struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
+       struct fsl_dma_domain *dma_domain;
        const u32 *prop;
        int len;
        struct pci_dev *pdev = NULL;
        struct pci_controller *pci_ctl;
 
+       /*
+        * Hack to keep things working as they always have, only leaving an
+        * UNMANAGED domain makes it BLOCKING.
+        */
+       if (domain == platform_domain || !domain ||
+           domain->type != IOMMU_DOMAIN_UNMANAGED)
+               return 0;
+
+       dma_domain = to_fsl_dma_domain(domain);
+
        /*
         * Use LIODN of the PCI controller while detaching a
         * PCI device.
@@ -312,8 +337,18 @@ static void fsl_pamu_set_platform_dma(struct device *dev)
                detach_device(dev, dma_domain);
        else
                pr_debug("missing fsl,liodn property at %pOF\n", dev->of_node);
+       return 0;
 }
 
+static struct iommu_domain_ops fsl_pamu_platform_ops = {
+       .attach_dev = fsl_pamu_platform_attach,
+};
+
+static struct iommu_domain fsl_pamu_platform_domain = {
+       .type = IOMMU_DOMAIN_PLATFORM,
+       .ops = &fsl_pamu_platform_ops,
+};
+
 /* Set the domain stash attribute */
 int fsl_pamu_configure_l1_stash(struct iommu_domain *domain, u32 cpu)
 {
@@ -395,11 +430,11 @@ static struct iommu_device *fsl_pamu_probe_device(struct device *dev)
 }
 
 static const struct iommu_ops fsl_pamu_ops = {
+       .default_domain = &fsl_pamu_platform_domain,
        .capable        = fsl_pamu_capable,
        .domain_alloc   = fsl_pamu_domain_alloc,
        .probe_device   = fsl_pamu_probe_device,
        .device_group   = fsl_pamu_device_group,
-       .set_platform_dma_ops = fsl_pamu_set_platform_dma,
        .default_domain_ops = &(const struct iommu_domain_ops) {
                .attach_dev     = fsl_pamu_attach_device,
                .iova_to_phys   = fsl_pamu_iova_to_phys,
index 1f925285104eee0c318e7753e696d7f0188e88b1..dee61e513be6d44f72d5b2c7071bf4a30e696e5f 100644 (file)
@@ -111,6 +111,8 @@ static const struct iommu_regset iommu_regs_64[] = {
        IOMMU_REGSET_ENTRY(VCRSP),
 };
 
+static struct dentry *intel_iommu_debug;
+
 static int iommu_regset_show(struct seq_file *m, void *unused)
 {
        struct dmar_drhd_unit *drhd;
@@ -311,9 +313,14 @@ static inline unsigned long level_to_directory_size(int level)
 static inline void
 dump_page_info(struct seq_file *m, unsigned long iova, u64 *path)
 {
-       seq_printf(m, "0x%013lx |\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\t0x%016llx\n",
-                  iova >> VTD_PAGE_SHIFT, path[5], path[4],
-                  path[3], path[2], path[1]);
+       seq_printf(m, "0x%013lx |\t0x%016llx\t0x%016llx\t0x%016llx",
+                  iova >> VTD_PAGE_SHIFT, path[5], path[4], path[3]);
+       if (path[2]) {
+               seq_printf(m, "\t0x%016llx", path[2]);
+               if (path[1])
+                       seq_printf(m, "\t0x%016llx", path[1]);
+       }
+       seq_putc(m, '\n');
 }
 
 static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde,
@@ -340,58 +347,140 @@ static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde,
        }
 }
 
-static int __show_device_domain_translation(struct device *dev, void *data)
+static int domain_translation_struct_show(struct seq_file *m,
+                                         struct device_domain_info *info,
+                                         ioasid_t pasid)
 {
-       struct dmar_domain *domain;
-       struct seq_file *m = data;
-       u64 path[6] = { 0 };
-
-       domain = to_dmar_domain(iommu_get_domain_for_dev(dev));
-       if (!domain)
-               return 0;
+       bool scalable, found = false;
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
+       u16 devfn, bus, seg;
 
-       seq_printf(m, "Device %s @0x%llx\n", dev_name(dev),
-                  (u64)virt_to_phys(domain->pgd));
-       seq_puts(m, "IOVA_PFN\t\tPML5E\t\t\tPML4E\t\t\tPDPE\t\t\tPDE\t\t\tPTE\n");
+       bus = info->bus;
+       devfn = info->devfn;
+       seg = info->segment;
 
-       pgtable_walk_level(m, domain->pgd, domain->agaw + 2, 0, path);
-       seq_putc(m, '\n');
+       rcu_read_lock();
+       for_each_active_iommu(iommu, drhd) {
+               struct context_entry *context;
+               u64 pgd, path[6] = { 0 };
+               u32 sts, agaw;
 
-       /* Don't iterate */
-       return 1;
-}
+               if (seg != iommu->segment)
+                       continue;
 
-static int show_device_domain_translation(struct device *dev, void *data)
-{
-       struct iommu_group *group;
+               sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
+               if (!(sts & DMA_GSTS_TES)) {
+                       seq_printf(m, "DMA Remapping is not enabled on %s\n",
+                                  iommu->name);
+                       continue;
+               }
+               if (dmar_readq(iommu->reg + DMAR_RTADDR_REG) & DMA_RTADDR_SMT)
+                       scalable = true;
+               else
+                       scalable = false;
 
-       group = iommu_group_get(dev);
-       if (group) {
                /*
-                * The group->mutex is held across the callback, which will
-                * block calls to iommu_attach/detach_group/device. Hence,
+                * The iommu->lock is held across the callback, which will
+                * block calls to domain_attach/domain_detach. Hence,
                 * the domain of the device will not change during traversal.
                 *
-                * All devices in an iommu group share a single domain, hence
-                * we only dump the domain of the first device. Even though,
-                * this code still possibly races with the iommu_unmap()
+                * Traversing page table possibly races with the iommu_unmap()
                 * interface. This could be solved by RCU-freeing the page
                 * table pages in the iommu_unmap() path.
                 */
-               iommu_group_for_each_dev(group, data,
-                                        __show_device_domain_translation);
-               iommu_group_put(group);
+               spin_lock(&iommu->lock);
+
+               context = iommu_context_addr(iommu, bus, devfn, 0);
+               if (!context || !context_present(context))
+                       goto iommu_unlock;
+
+               if (scalable) { /* scalable mode */
+                       struct pasid_entry *pasid_tbl, *pasid_tbl_entry;
+                       struct pasid_dir_entry *dir_tbl, *dir_entry;
+                       u16 dir_idx, tbl_idx, pgtt;
+                       u64 pasid_dir_ptr;
+
+                       pasid_dir_ptr = context->lo & VTD_PAGE_MASK;
+
+                       /* Dump specified device domain mappings with PASID. */
+                       dir_idx = pasid >> PASID_PDE_SHIFT;
+                       tbl_idx = pasid & PASID_PTE_MASK;
+
+                       dir_tbl = phys_to_virt(pasid_dir_ptr);
+                       dir_entry = &dir_tbl[dir_idx];
+
+                       pasid_tbl = get_pasid_table_from_pde(dir_entry);
+                       if (!pasid_tbl)
+                               goto iommu_unlock;
+
+                       pasid_tbl_entry = &pasid_tbl[tbl_idx];
+                       if (!pasid_pte_is_present(pasid_tbl_entry))
+                               goto iommu_unlock;
+
+                       /*
+                        * According to PASID Granular Translation Type(PGTT),
+                        * get the page table pointer.
+                        */
+                       pgtt = (u16)(pasid_tbl_entry->val[0] & GENMASK_ULL(8, 6)) >> 6;
+                       agaw = (u8)(pasid_tbl_entry->val[0] & GENMASK_ULL(4, 2)) >> 2;
+
+                       switch (pgtt) {
+                       case PASID_ENTRY_PGTT_FL_ONLY:
+                               pgd = pasid_tbl_entry->val[2];
+                               break;
+                       case PASID_ENTRY_PGTT_SL_ONLY:
+                       case PASID_ENTRY_PGTT_NESTED:
+                               pgd = pasid_tbl_entry->val[0];
+                               break;
+                       default:
+                               goto iommu_unlock;
+                       }
+                       pgd &= VTD_PAGE_MASK;
+               } else { /* legacy mode */
+                       pgd = context->lo & VTD_PAGE_MASK;
+                       agaw = context->hi & 7;
+               }
+
+               seq_printf(m, "Device %04x:%02x:%02x.%x ",
+                          iommu->segment, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
+
+               if (scalable)
+                       seq_printf(m, "with pasid %x @0x%llx\n", pasid, pgd);
+               else
+                       seq_printf(m, "@0x%llx\n", pgd);
+
+               seq_printf(m, "%-17s\t%-18s\t%-18s\t%-18s\t%-18s\t%-s\n",
+                          "IOVA_PFN", "PML5E", "PML4E", "PDPE", "PDE", "PTE");
+               pgtable_walk_level(m, phys_to_virt(pgd), agaw + 2, 0, path);
+
+               found = true;
+iommu_unlock:
+               spin_unlock(&iommu->lock);
+               if (found)
+                       break;
        }
+       rcu_read_unlock();
 
        return 0;
 }
 
-static int domain_translation_struct_show(struct seq_file *m, void *unused)
+static int dev_domain_translation_struct_show(struct seq_file *m, void *unused)
+{
+       struct device_domain_info *info = (struct device_domain_info *)m->private;
+
+       return domain_translation_struct_show(m, info, IOMMU_NO_PASID);
+}
+DEFINE_SHOW_ATTRIBUTE(dev_domain_translation_struct);
+
+static int pasid_domain_translation_struct_show(struct seq_file *m, void *unused)
 {
-       return bus_for_each_dev(&pci_bus_type, NULL, m,
-                               show_device_domain_translation);
+       struct dev_pasid_info *dev_pasid = (struct dev_pasid_info *)m->private;
+       struct device_domain_info *info = dev_iommu_priv_get(dev_pasid->dev);
+
+       return domain_translation_struct_show(m, info, dev_pasid->pasid);
 }
-DEFINE_SHOW_ATTRIBUTE(domain_translation_struct);
+DEFINE_SHOW_ATTRIBUTE(pasid_domain_translation_struct);
 
 static void invalidation_queue_entry_show(struct seq_file *m,
                                          struct intel_iommu *iommu)
@@ -666,16 +755,12 @@ static const struct file_operations dmar_perf_latency_fops = {
 
 void __init intel_iommu_debugfs_init(void)
 {
-       struct dentry *intel_iommu_debug = debugfs_create_dir("intel",
-                                               iommu_debugfs_dir);
+       intel_iommu_debug = debugfs_create_dir("intel", iommu_debugfs_dir);
 
        debugfs_create_file("iommu_regset", 0444, intel_iommu_debug, NULL,
                            &iommu_regset_fops);
        debugfs_create_file("dmar_translation_struct", 0444, intel_iommu_debug,
                            NULL, &dmar_translation_struct_fops);
-       debugfs_create_file("domain_translation_struct", 0444,
-                           intel_iommu_debug, NULL,
-                           &domain_translation_struct_fops);
        debugfs_create_file("invalidation_queue", 0444, intel_iommu_debug,
                            NULL, &invalidation_queue_fops);
 #ifdef CONFIG_IRQ_REMAP
@@ -685,3 +770,51 @@ void __init intel_iommu_debugfs_init(void)
        debugfs_create_file("dmar_perf_latency", 0644, intel_iommu_debug,
                            NULL, &dmar_perf_latency_fops);
 }
+
+/*
+ * Create a debugfs directory for each device, and then create a
+ * debugfs file in this directory for users to dump the page table
+ * of the default domain. e.g.
+ * /sys/kernel/debug/iommu/intel/0000:00:01.0/domain_translation_struct
+ */
+void intel_iommu_debugfs_create_dev(struct device_domain_info *info)
+{
+       info->debugfs_dentry = debugfs_create_dir(dev_name(info->dev), intel_iommu_debug);
+
+       debugfs_create_file("domain_translation_struct", 0444, info->debugfs_dentry,
+                           info, &dev_domain_translation_struct_fops);
+}
+
+/* Remove the device debugfs directory. */
+void intel_iommu_debugfs_remove_dev(struct device_domain_info *info)
+{
+       debugfs_remove_recursive(info->debugfs_dentry);
+}
+
+/*
+ * Create a debugfs directory per pair of {device, pasid}, then create the
+ * corresponding debugfs file in this directory for users to dump its page
+ * table. e.g.
+ * /sys/kernel/debug/iommu/intel/0000:00:01.0/1/domain_translation_struct
+ *
+ * The debugfs only dumps the page tables whose mappings are created and
+ * destroyed by the iommu_map/unmap() interfaces. Check the mapping type
+ * of the domain before creating debugfs directory.
+ */
+void intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info *dev_pasid)
+{
+       struct device_domain_info *info = dev_iommu_priv_get(dev_pasid->dev);
+       char dir_name[10];
+
+       sprintf(dir_name, "%x", dev_pasid->pasid);
+       dev_pasid->debugfs_dentry = debugfs_create_dir(dir_name, info->debugfs_dentry);
+
+       debugfs_create_file("domain_translation_struct", 0444, dev_pasid->debugfs_dentry,
+                           dev_pasid, &pasid_domain_translation_struct_fops);
+}
+
+/* Remove the device pasid debugfs directory. */
+void intel_iommu_debugfs_remove_dev_pasid(struct dev_pasid_info *dev_pasid)
+{
+       debugfs_remove_recursive(dev_pasid->debugfs_dentry);
+}
index d1037280abf7a2bc4fd51d5e6de5ce0932c66424..3531b956556c7df268ee32a5053a755d5ce0630b 100644 (file)
@@ -4016,9 +4016,9 @@ static int blocking_domain_attach_dev(struct iommu_domain *domain,
 }
 
 static struct iommu_domain blocking_domain = {
+       .type = IOMMU_DOMAIN_BLOCKED,
        .ops = &(const struct iommu_domain_ops) {
                .attach_dev     = blocking_domain_attach_dev,
-               .free           = intel_iommu_domain_free
        }
 };
 
@@ -4028,8 +4028,6 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type)
        struct iommu_domain *domain;
 
        switch (type) {
-       case IOMMU_DOMAIN_BLOCKED:
-               return &blocking_domain;
        case IOMMU_DOMAIN_DMA:
        case IOMMU_DOMAIN_UNMANAGED:
                dmar_domain = alloc_domain(type);
@@ -4111,7 +4109,7 @@ intel_iommu_domain_alloc_user(struct device *dev, u32 flags,
 
 static void intel_iommu_domain_free(struct iommu_domain *domain)
 {
-       if (domain != &si_domain->domain && domain != &blocking_domain)
+       if (domain != &si_domain->domain)
                domain_exit(to_dmar_domain(domain));
 }
 
@@ -4465,6 +4463,8 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
                }
        }
 
+       intel_iommu_debugfs_create_dev(info);
+
        return &iommu->iommu;
 }
 
@@ -4474,6 +4474,7 @@ static void intel_iommu_release_device(struct device *dev)
 
        dmar_remove_one_dev_info(dev);
        intel_pasid_free_table(dev);
+       intel_iommu_debugfs_remove_dev(info);
        dev_iommu_priv_set(dev, NULL);
        kfree(info);
        set_dma_ops(dev, NULL);
@@ -4718,8 +4719,8 @@ static bool risky_device(struct pci_dev *pdev)
        return false;
 }
 
-static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
-                                      unsigned long iova, size_t size)
+static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
+                                     unsigned long iova, size_t size)
 {
        struct dmar_domain *dmar_domain = to_dmar_domain(domain);
        unsigned long pages = aligned_nrpages(iova, size);
@@ -4729,6 +4730,7 @@ static void intel_iommu_iotlb_sync_map(struct iommu_domain *domain,
 
        xa_for_each(&dmar_domain->iommu_array, i, info)
                __mapping_notify_one(info->iommu, dmar_domain, pfn, pages);
+       return 0;
 }
 
 static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
@@ -4766,6 +4768,7 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
        spin_unlock_irqrestore(&dmar_domain->lock, flags);
 
        domain_detach_iommu(dmar_domain, iommu);
+       intel_iommu_debugfs_remove_dev_pasid(dev_pasid);
        kfree(dev_pasid);
 out_tear_down:
        intel_pasid_tear_down_entry(iommu, dev, pasid, false);
@@ -4821,6 +4824,9 @@ static int intel_iommu_set_dev_pasid(struct iommu_domain *domain,
        list_add(&dev_pasid->link_domain, &dmar_domain->dev_pasids);
        spin_unlock_irqrestore(&dmar_domain->lock, flags);
 
+       if (domain->type & __IOMMU_DOMAIN_PAGING)
+               intel_iommu_debugfs_create_dev_pasid(dev_pasid);
+
        return 0;
 out_detach_iommu:
        domain_detach_iommu(dmar_domain, iommu);
@@ -4925,6 +4931,7 @@ const struct iommu_dirty_ops intel_dirty_ops = {
 };
 
 const struct iommu_ops intel_iommu_ops = {
+       .blocked_domain         = &blocking_domain,
        .capable                = intel_iommu_capable,
        .hw_info                = intel_iommu_hw_info,
        .domain_alloc           = intel_iommu_domain_alloc,
index d796d0d9b114a4cf29bda9202636a05df091421d..65d37a138c75d48ccac9a3de4ea68573a1c1fc6b 100644 (file)
@@ -749,12 +749,18 @@ struct device_domain_info {
        struct intel_iommu *iommu; /* IOMMU used by this device */
        struct dmar_domain *domain; /* pointer to domain */
        struct pasid_table *pasid_table; /* pasid table */
+#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
+       struct dentry *debugfs_dentry; /* pointer to device directory dentry */
+#endif
 };
 
 struct dev_pasid_info {
        struct list_head link_domain;   /* link to domain siblings */
        struct device *dev;
        ioasid_t pasid;
+#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
+       struct dentry *debugfs_dentry; /* pointer to pasid directory dentry */
+#endif
 };
 
 static inline void __iommu_flush_cache(
@@ -935,8 +941,16 @@ static inline void intel_svm_remove_dev_pasid(struct device *dev, ioasid_t pasid
 
 #ifdef CONFIG_INTEL_IOMMU_DEBUGFS
 void intel_iommu_debugfs_init(void);
+void intel_iommu_debugfs_create_dev(struct device_domain_info *info);
+void intel_iommu_debugfs_remove_dev(struct device_domain_info *info);
+void intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info *dev_pasid);
+void intel_iommu_debugfs_remove_dev_pasid(struct dev_pasid_info *dev_pasid);
 #else
 static inline void intel_iommu_debugfs_init(void) {}
+static inline void intel_iommu_debugfs_create_dev(struct device_domain_info *info) {}
+static inline void intel_iommu_debugfs_remove_dev(struct device_domain_info *info) {}
+static inline void intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info *dev_pasid) {}
+static inline void intel_iommu_debugfs_remove_dev_pasid(struct dev_pasid_info *dev_pasid) {}
 #endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
 
 extern const struct attribute_group *intel_iommu_groups[];
index c146378c7d032c3805f46e6db24a6a9e47e4a264..f17a1113f3d6a35daf9cff1def8f4f5158416ea2 100644 (file)
@@ -37,7 +37,6 @@
 #include "iommu-priv.h"
 
 #include "iommu-sva.h"
-#include "iommu-priv.h"
 
 static struct kset *iommu_group_kset;
 static DEFINE_IDA(iommu_group_ida);
@@ -96,8 +95,8 @@ static const char * const iommu_group_resv_type_string[] = {
 static int iommu_bus_notifier(struct notifier_block *nb,
                              unsigned long action, void *data);
 static void iommu_release_device(struct device *dev);
-static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus,
-                                                unsigned type);
+static struct iommu_domain *
+__iommu_group_domain_alloc(struct iommu_group *group, unsigned int type);
 static int __iommu_attach_device(struct iommu_domain *domain,
                                 struct device *dev);
 static int __iommu_attach_group(struct iommu_domain *domain,
@@ -184,6 +183,8 @@ static const char *iommu_domain_type_str(unsigned int t)
        case IOMMU_DOMAIN_DMA:
        case IOMMU_DOMAIN_DMA_FQ:
                return "Translated";
+       case IOMMU_DOMAIN_PLATFORM:
+               return "Platform";
        default:
                return "Unknown";
        }
@@ -290,6 +291,10 @@ void iommu_device_unregister(struct iommu_device *iommu)
        spin_lock(&iommu_device_lock);
        list_del(&iommu->list);
        spin_unlock(&iommu_device_lock);
+
+       /* Pairs with the alloc in generic_single_device_group() */
+       iommu_group_put(iommu->singleton_group);
+       iommu->singleton_group = NULL;
 }
 EXPORT_SYMBOL_GPL(iommu_device_unregister);
 
@@ -404,6 +409,7 @@ static int iommu_init_device(struct device *dev, const struct iommu_ops *ops)
                ret = PTR_ERR(iommu_dev);
                goto err_module_put;
        }
+       dev->iommu->iommu_dev = iommu_dev;
 
        ret = iommu_device_link(iommu_dev, dev);
        if (ret)
@@ -418,7 +424,6 @@ static int iommu_init_device(struct device *dev, const struct iommu_ops *ops)
        }
        dev->iommu_group = group;
 
-       dev->iommu->iommu_dev = iommu_dev;
        dev->iommu->max_pasids = dev_iommu_get_max_pasids(dev);
        if (ops->is_attach_deferred)
                dev->iommu->attach_deferred = ops->is_attach_deferred(dev);
@@ -432,6 +437,7 @@ err_release:
 err_module_put:
        module_put(ops->owner);
 err_free:
+       dev->iommu->iommu_dev = NULL;
        dev_iommu_free(dev);
        return ret;
 }
@@ -1636,6 +1642,27 @@ struct iommu_group *generic_device_group(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(generic_device_group);
 
+/*
+ * Generic device_group call-back function. It just allocates one
+ * iommu-group per iommu driver instance shared by every device
+ * probed by that iommu driver.
+ */
+struct iommu_group *generic_single_device_group(struct device *dev)
+{
+       struct iommu_device *iommu = dev->iommu->iommu_dev;
+
+       if (!iommu->singleton_group) {
+               struct iommu_group *group;
+
+               group = iommu_group_alloc();
+               if (IS_ERR(group))
+                       return group;
+               iommu->singleton_group = group;
+       }
+       return iommu_group_ref_get(iommu->singleton_group);
+}
+EXPORT_SYMBOL_GPL(generic_single_device_group);
+
 /*
  * Use standard PCI bus topology, isolation features, and DMA alias quirks
  * to find or create an IOMMU group for a device.
@@ -1717,26 +1744,29 @@ struct iommu_group *fsl_mc_device_group(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(fsl_mc_device_group);
 
-static int iommu_get_def_domain_type(struct device *dev)
-{
-       const struct iommu_ops *ops = dev_iommu_ops(dev);
-
-       if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted)
-               return IOMMU_DOMAIN_DMA;
-
-       if (ops->def_domain_type)
-               return ops->def_domain_type(dev);
-
-       return 0;
-}
-
 static struct iommu_domain *
-__iommu_group_alloc_default_domain(const struct bus_type *bus,
-                                  struct iommu_group *group, int req_type)
+__iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
 {
        if (group->default_domain && group->default_domain->type == req_type)
                return group->default_domain;
-       return __iommu_domain_alloc(bus, req_type);
+       return __iommu_group_domain_alloc(group, req_type);
+}
+
+/*
+ * Returns the iommu_ops for the devices in an iommu group.
+ *
+ * It is assumed that all devices in an iommu group are managed by a single
+ * IOMMU unit. Therefore, this returns the dev_iommu_ops of the first device
+ * in the group.
+ */
+static const struct iommu_ops *group_iommu_ops(struct iommu_group *group)
+{
+       struct group_device *device =
+               list_first_entry(&group->devices, struct group_device, list);
+
+       lockdep_assert_held(&group->mutex);
+
+       return dev_iommu_ops(device->dev);
 }
 
 /*
@@ -1746,25 +1776,34 @@ __iommu_group_alloc_default_domain(const struct bus_type *bus,
 static struct iommu_domain *
 iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
 {
-       const struct bus_type *bus =
-               list_first_entry(&group->devices, struct group_device, list)
-                       ->dev->bus;
+       const struct iommu_ops *ops = group_iommu_ops(group);
        struct iommu_domain *dom;
 
        lockdep_assert_held(&group->mutex);
 
+       /*
+        * Allow legacy drivers to specify the domain that will be the default
+        * domain. This should always be either an IDENTITY/BLOCKED/PLATFORM
+        * domain. Do not use in new drivers.
+        */
+       if (ops->default_domain) {
+               if (req_type)
+                       return NULL;
+               return ops->default_domain;
+       }
+
        if (req_type)
-               return __iommu_group_alloc_default_domain(bus, group, req_type);
+               return __iommu_group_alloc_default_domain(group, req_type);
 
        /* The driver gave no guidance on what type to use, try the default */
-       dom = __iommu_group_alloc_default_domain(bus, group, iommu_def_domain_type);
+       dom = __iommu_group_alloc_default_domain(group, iommu_def_domain_type);
        if (dom)
                return dom;
 
        /* Otherwise IDENTITY and DMA_FQ defaults will try DMA */
        if (iommu_def_domain_type == IOMMU_DOMAIN_DMA)
                return NULL;
-       dom = __iommu_group_alloc_default_domain(bus, group, IOMMU_DOMAIN_DMA);
+       dom = __iommu_group_alloc_default_domain(group, IOMMU_DOMAIN_DMA);
        if (!dom)
                return NULL;
 
@@ -1808,40 +1847,109 @@ static int iommu_bus_notifier(struct notifier_block *nb,
        return 0;
 }
 
-/* A target_type of 0 will select the best domain type and cannot fail */
+/*
+ * Combine the driver's chosen def_domain_type across all the devices in a
+ * group. Drivers must give a consistent result.
+ */
+static int iommu_get_def_domain_type(struct iommu_group *group,
+                                    struct device *dev, int cur_type)
+{
+       const struct iommu_ops *ops = group_iommu_ops(group);
+       int type;
+
+       if (!ops->def_domain_type)
+               return cur_type;
+
+       type = ops->def_domain_type(dev);
+       if (!type || cur_type == type)
+               return cur_type;
+       if (!cur_type)
+               return type;
+
+       dev_err_ratelimited(
+               dev,
+               "IOMMU driver error, requesting conflicting def_domain_type, %s and %s, for devices in group %u.\n",
+               iommu_domain_type_str(cur_type), iommu_domain_type_str(type),
+               group->id);
+
+       /*
+        * Try to recover, drivers are allowed to force IDENITY or DMA, IDENTITY
+        * takes precedence.
+        */
+       if (type == IOMMU_DOMAIN_IDENTITY)
+               return type;
+       return cur_type;
+}
+
+/*
+ * A target_type of 0 will select the best domain type. 0 can be returned in
+ * this case meaning the global default should be used.
+ */
 static int iommu_get_default_domain_type(struct iommu_group *group,
                                         int target_type)
 {
-       int best_type = target_type;
+       struct device *untrusted = NULL;
        struct group_device *gdev;
-       struct device *last_dev;
+       int driver_type = 0;
 
        lockdep_assert_held(&group->mutex);
 
+       /*
+        * ARM32 drivers supporting CONFIG_ARM_DMA_USE_IOMMU can declare an
+        * identity_domain and it will automatically become their default
+        * domain. Later on ARM_DMA_USE_IOMMU will install its UNMANAGED domain.
+        * Override the selection to IDENTITY.
+        */
+       if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
+               static_assert(!(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) &&
+                               IS_ENABLED(CONFIG_IOMMU_DMA)));
+               driver_type = IOMMU_DOMAIN_IDENTITY;
+       }
+
        for_each_group_device(group, gdev) {
-               unsigned int type = iommu_get_def_domain_type(gdev->dev);
-
-               if (best_type && type && best_type != type) {
-                       if (target_type) {
-                               dev_err_ratelimited(
-                                       gdev->dev,
-                                       "Device cannot be in %s domain\n",
-                                       iommu_domain_type_str(target_type));
+               driver_type = iommu_get_def_domain_type(group, gdev->dev,
+                                                       driver_type);
+
+               if (dev_is_pci(gdev->dev) && to_pci_dev(gdev->dev)->untrusted) {
+                       /*
+                        * No ARM32 using systems will set untrusted, it cannot
+                        * work.
+                        */
+                       if (WARN_ON(IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)))
                                return -1;
-                       }
+                       untrusted = gdev->dev;
+               }
+       }
 
-                       dev_warn(
-                               gdev->dev,
-                               "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n",
-                               iommu_domain_type_str(type), dev_name(last_dev),
-                               iommu_domain_type_str(best_type));
-                       return 0;
+       /*
+        * If the common dma ops are not selected in kconfig then we cannot use
+        * IOMMU_DOMAIN_DMA at all. Force IDENTITY if nothing else has been
+        * selected.
+        */
+       if (!IS_ENABLED(CONFIG_IOMMU_DMA)) {
+               if (WARN_ON(driver_type == IOMMU_DOMAIN_DMA))
+                       return -1;
+               if (!driver_type)
+                       driver_type = IOMMU_DOMAIN_IDENTITY;
+       }
+
+       if (untrusted) {
+               if (driver_type && driver_type != IOMMU_DOMAIN_DMA) {
+                       dev_err_ratelimited(
+                               untrusted,
+                               "Device is not trusted, but driver is overriding group %u to %s, refusing to probe.\n",
+                               group->id, iommu_domain_type_str(driver_type));
+                       return -1;
                }
-               if (!best_type)
-                       best_type = type;
-               last_dev = gdev->dev;
+               driver_type = IOMMU_DOMAIN_DMA;
        }
-       return best_type;
+
+       if (target_type) {
+               if (driver_type && target_type != driver_type)
+                       return -1;
+               return target_type;
+       }
+       return driver_type;
 }
 
 static void iommu_group_do_probe_finalize(struct device *dev)
@@ -1970,16 +2078,24 @@ void iommu_set_fault_handler(struct iommu_domain *domain,
 }
 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
 
-static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus,
-                                                unsigned type)
+static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops,
+                                                struct device *dev,
+                                                unsigned int type)
 {
        struct iommu_domain *domain;
        unsigned int alloc_type = type & IOMMU_DOMAIN_ALLOC_FLAGS;
 
-       if (bus == NULL || bus->iommu_ops == NULL)
+       if (alloc_type == IOMMU_DOMAIN_IDENTITY && ops->identity_domain)
+               return ops->identity_domain;
+       else if (alloc_type == IOMMU_DOMAIN_BLOCKED && ops->blocked_domain)
+               return ops->blocked_domain;
+       else if (type & __IOMMU_DOMAIN_PAGING && ops->domain_alloc_paging)
+               domain = ops->domain_alloc_paging(dev);
+       else if (ops->domain_alloc)
+               domain = ops->domain_alloc(alloc_type);
+       else
                return NULL;
 
-       domain = bus->iommu_ops->domain_alloc(alloc_type);
        if (!domain)
                return NULL;
 
@@ -1989,10 +2105,10 @@ static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus,
         * may override this later
         */
        if (!domain->pgsize_bitmap)
-               domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
+               domain->pgsize_bitmap = ops->pgsize_bitmap;
 
        if (!domain->ops)
-               domain->ops = bus->iommu_ops->default_domain_ops;
+               domain->ops = ops->default_domain_ops;
 
        if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) {
                iommu_domain_free(domain);
@@ -2001,9 +2117,22 @@ static struct iommu_domain *__iommu_domain_alloc(const struct bus_type *bus,
        return domain;
 }
 
+static struct iommu_domain *
+__iommu_group_domain_alloc(struct iommu_group *group, unsigned int type)
+{
+       struct device *dev =
+               list_first_entry(&group->devices, struct group_device, list)
+                       ->dev;
+
+       return __iommu_domain_alloc(group_iommu_ops(group), dev, type);
+}
+
 struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
 {
-       return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
+       if (bus == NULL || bus->iommu_ops == NULL)
+               return NULL;
+       return __iommu_domain_alloc(bus->iommu_ops, NULL,
+                                   IOMMU_DOMAIN_UNMANAGED);
 }
 EXPORT_SYMBOL_GPL(iommu_domain_alloc);
 
@@ -2012,7 +2141,8 @@ void iommu_domain_free(struct iommu_domain *domain)
        if (domain->type == IOMMU_DOMAIN_SVA)
                mmdrop(domain->mm);
        iommu_put_dma_cookie(domain);
-       domain->ops->free(domain);
+       if (domain->ops->free)
+               domain->ops->free(domain);
 }
 EXPORT_SYMBOL_GPL(iommu_domain_free);
 
@@ -2062,10 +2192,10 @@ static int __iommu_attach_device(struct iommu_domain *domain,
  */
 int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
 {
-       struct iommu_group *group;
+       /* Caller must be a probed driver on dev */
+       struct iommu_group *group = dev->iommu_group;
        int ret;
 
-       group = iommu_group_get(dev);
        if (!group)
                return -ENODEV;
 
@@ -2082,8 +2212,6 @@ int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
 
 out_unlock:
        mutex_unlock(&group->mutex);
-       iommu_group_put(group);
-
        return ret;
 }
 EXPORT_SYMBOL_GPL(iommu_attach_device);
@@ -2098,9 +2226,9 @@ int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
 
 void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
 {
-       struct iommu_group *group;
+       /* Caller must be a probed driver on dev */
+       struct iommu_group *group = dev->iommu_group;
 
-       group = iommu_group_get(dev);
        if (!group)
                return;
 
@@ -2112,24 +2240,18 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
 
 out_unlock:
        mutex_unlock(&group->mutex);
-       iommu_group_put(group);
 }
 EXPORT_SYMBOL_GPL(iommu_detach_device);
 
 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
 {
-       struct iommu_domain *domain;
-       struct iommu_group *group;
+       /* Caller must be a probed driver on dev */
+       struct iommu_group *group = dev->iommu_group;
 
-       group = iommu_group_get(dev);
        if (!group)
                return NULL;
 
-       domain = group->domain;
-
-       iommu_group_put(group);
-
-       return domain;
+       return group->domain;
 }
 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
 
@@ -2275,21 +2397,8 @@ static int __iommu_group_set_domain_internal(struct iommu_group *group,
        if (group->domain == new_domain)
                return 0;
 
-       /*
-        * New drivers should support default domains, so set_platform_dma()
-        * op will never be called. Otherwise the NULL domain represents some
-        * platform specific behavior.
-        */
-       if (!new_domain) {
-               for_each_group_device(group, gdev) {
-                       const struct iommu_ops *ops = dev_iommu_ops(gdev->dev);
-
-                       if (!WARN_ON(!ops->set_platform_dma_ops))
-                               ops->set_platform_dma_ops(gdev->dev);
-               }
-               group->domain = NULL;
-               return 0;
-       }
+       if (WARN_ON(!new_domain))
+               return -EINVAL;
 
        /*
         * Changing the domain is done by calling attach_dev() on the new
@@ -2325,19 +2434,15 @@ err_revert:
         */
        last_gdev = gdev;
        for_each_group_device(group, gdev) {
-               const struct iommu_ops *ops = dev_iommu_ops(gdev->dev);
-
                /*
-                * If set_platform_dma_ops is not present a NULL domain can
-                * happen only for first probe, in which case we leave
-                * group->domain as NULL and let release clean everything up.
+                * A NULL domain can happen only for first probe, in which case
+                * we leave group->domain as NULL and let release clean
+                * everything up.
                 */
                if (group->domain)
                        WARN_ON(__iommu_device_set_domain(
                                group, gdev->dev, group->domain,
                                IOMMU_SET_DOMAIN_MUST_SUCCEED));
-               else if (ops->set_platform_dma_ops)
-                       ops->set_platform_dma_ops(gdev->dev);
                if (gdev == last_gdev)
                        break;
        }
@@ -2418,30 +2523,6 @@ out_set_count:
        return pgsize;
 }
 
-static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
-                            phys_addr_t paddr, size_t size, int prot,
-                            gfp_t gfp, size_t *mapped)
-{
-       const struct iommu_domain_ops *ops = domain->ops;
-       size_t pgsize, count;
-       int ret;
-
-       pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
-
-       pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n",
-                iova, &paddr, pgsize, count);
-
-       if (ops->map_pages) {
-               ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot,
-                                    gfp, mapped);
-       } else {
-               ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
-               *mapped = ret ? 0 : pgsize;
-       }
-
-       return ret;
-}
-
 static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
                       phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
 {
@@ -2452,13 +2533,12 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
        phys_addr_t orig_paddr = paddr;
        int ret = 0;
 
-       if (unlikely(!(ops->map || ops->map_pages) ||
-                    domain->pgsize_bitmap == 0UL))
-               return -ENODEV;
-
        if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
                return -EINVAL;
 
+       if (WARN_ON(!ops->map_pages || domain->pgsize_bitmap == 0UL))
+               return -ENODEV;
+
        /* find out the minimum page size supported */
        min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
 
@@ -2476,10 +2556,14 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
        pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
 
        while (size) {
-               size_t mapped = 0;
+               size_t pgsize, count, mapped = 0;
+
+               pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
 
-               ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp,
-                                       &mapped);
+               pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n",
+                        iova, &paddr, pgsize, count);
+               ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot,
+                                    gfp, &mapped);
                /*
                 * Some pages may have been mapped, even if an error occurred,
                 * so we should account for those so they can be unmapped.
@@ -2516,25 +2600,21 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
                return -EINVAL;
 
        ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
-       if (ret == 0 && ops->iotlb_sync_map)
-               ops->iotlb_sync_map(domain, iova, size);
+       if (ret == 0 && ops->iotlb_sync_map) {
+               ret = ops->iotlb_sync_map(domain, iova, size);
+               if (ret)
+                       goto out_err;
+       }
 
        return ret;
-}
-EXPORT_SYMBOL_GPL(iommu_map);
 
-static size_t __iommu_unmap_pages(struct iommu_domain *domain,
-                                 unsigned long iova, size_t size,
-                                 struct iommu_iotlb_gather *iotlb_gather)
-{
-       const struct iommu_domain_ops *ops = domain->ops;
-       size_t pgsize, count;
+out_err:
+       /* undo mappings already done */
+       iommu_unmap(domain, iova, size);
 
-       pgsize = iommu_pgsize(domain, iova, iova, size, &count);
-       return ops->unmap_pages ?
-              ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) :
-              ops->unmap(domain, iova, pgsize, iotlb_gather);
+       return ret;
 }
+EXPORT_SYMBOL_GPL(iommu_map);
 
 static size_t __iommu_unmap(struct iommu_domain *domain,
                            unsigned long iova, size_t size,
@@ -2545,11 +2625,10 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
        unsigned long orig_iova = iova;
        unsigned int min_pagesz;
 
-       if (unlikely(!(ops->unmap || ops->unmap_pages) ||
-                    domain->pgsize_bitmap == 0UL))
+       if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
                return 0;
 
-       if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
+       if (WARN_ON(!ops->unmap_pages || domain->pgsize_bitmap == 0UL))
                return 0;
 
        /* find out the minimum page size supported */
@@ -2573,9 +2652,10 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
         * or we hit an area that isn't mapped.
         */
        while (unmapped < size) {
-               unmapped_page = __iommu_unmap_pages(domain, iova,
-                                                   size - unmapped,
-                                                   iotlb_gather);
+               size_t pgsize, count;
+
+               pgsize = iommu_pgsize(domain, iova, iova, size - unmapped, &count);
+               unmapped_page = ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather);
                if (!unmapped_page)
                        break;
 
@@ -2658,8 +2738,11 @@ next:
                        sg = sg_next(sg);
        }
 
-       if (ops->iotlb_sync_map)
-               ops->iotlb_sync_map(domain, iova, mapped);
+       if (ops->iotlb_sync_map) {
+               ret = ops->iotlb_sync_map(domain, iova, mapped);
+               if (ret)
+                       goto out_err;
+       }
        return mapped;
 
 out_err:
@@ -2957,21 +3040,9 @@ static int iommu_setup_default_domain(struct iommu_group *group,
        if (req_type < 0)
                return -EINVAL;
 
-       /*
-        * There are still some drivers which don't support default domains, so
-        * we ignore the failure and leave group->default_domain NULL.
-        *
-        * We assume that the iommu driver starts up the device in
-        * 'set_platform_dma_ops' mode if it does not support default domains.
-        */
        dom = iommu_group_alloc_default_domain(group, req_type);
-       if (!dom) {
-               /* Once in default_domain mode we never leave */
-               if (group->default_domain)
-                       return -ENODEV;
-               group->default_domain = NULL;
-               return 0;
-       }
+       if (!dom)
+               return -ENODEV;
 
        if (group->default_domain == dom)
                return 0;
@@ -3114,24 +3185,6 @@ out_unlock:
        return ret ?: count;
 }
 
-static bool iommu_is_default_domain(struct iommu_group *group)
-{
-       if (group->domain == group->default_domain)
-               return true;
-
-       /*
-        * If the default domain was set to identity and it is still an identity
-        * domain then we consider this a pass. This happens because of
-        * amd_iommu_init_device() replacing the default idenytity domain with an
-        * identity domain that has a different configuration for AMDGPU.
-        */
-       if (group->default_domain &&
-           group->default_domain->type == IOMMU_DOMAIN_IDENTITY &&
-           group->domain && group->domain->type == IOMMU_DOMAIN_IDENTITY)
-               return true;
-       return false;
-}
-
 /**
  * iommu_device_use_default_domain() - Device driver wants to handle device
  *                                     DMA through the kernel DMA API.
@@ -3142,7 +3195,8 @@ static bool iommu_is_default_domain(struct iommu_group *group)
  */
 int iommu_device_use_default_domain(struct device *dev)
 {
-       struct iommu_group *group = iommu_group_get(dev);
+       /* Caller is the driver core during the pre-probe path */
+       struct iommu_group *group = dev->iommu_group;
        int ret = 0;
 
        if (!group)
@@ -3150,7 +3204,7 @@ int iommu_device_use_default_domain(struct device *dev)
 
        mutex_lock(&group->mutex);
        if (group->owner_cnt) {
-               if (group->owner || !iommu_is_default_domain(group) ||
+               if (group->domain != group->default_domain || group->owner ||
                    !xa_empty(&group->pasid_array)) {
                        ret = -EBUSY;
                        goto unlock_out;
@@ -3161,8 +3215,6 @@ int iommu_device_use_default_domain(struct device *dev)
 
 unlock_out:
        mutex_unlock(&group->mutex);
-       iommu_group_put(group);
-
        return ret;
 }
 
@@ -3176,7 +3228,8 @@ unlock_out:
  */
 void iommu_device_unuse_default_domain(struct device *dev)
 {
-       struct iommu_group *group = iommu_group_get(dev);
+       /* Caller is the driver core during the post-probe path */
+       struct iommu_group *group = dev->iommu_group;
 
        if (!group)
                return;
@@ -3186,26 +3239,22 @@ void iommu_device_unuse_default_domain(struct device *dev)
                group->owner_cnt--;
 
        mutex_unlock(&group->mutex);
-       iommu_group_put(group);
 }
 
 static int __iommu_group_alloc_blocking_domain(struct iommu_group *group)
 {
-       struct group_device *dev =
-               list_first_entry(&group->devices, struct group_device, list);
-
        if (group->blocking_domain)
                return 0;
 
        group->blocking_domain =
-               __iommu_domain_alloc(dev->dev->bus, IOMMU_DOMAIN_BLOCKED);
+               __iommu_group_domain_alloc(group, IOMMU_DOMAIN_BLOCKED);
        if (!group->blocking_domain) {
                /*
                 * For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED
                 * create an empty domain instead.
                 */
-               group->blocking_domain = __iommu_domain_alloc(
-                       dev->dev->bus, IOMMU_DOMAIN_UNMANAGED);
+               group->blocking_domain = __iommu_group_domain_alloc(
+                       group, IOMMU_DOMAIN_UNMANAGED);
                if (!group->blocking_domain)
                        return -EINVAL;
        }
@@ -3273,13 +3322,13 @@ EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner);
  */
 int iommu_device_claim_dma_owner(struct device *dev, void *owner)
 {
-       struct iommu_group *group;
+       /* Caller must be a probed driver on dev */
+       struct iommu_group *group = dev->iommu_group;
        int ret = 0;
 
        if (WARN_ON(!owner))
                return -EINVAL;
 
-       group = iommu_group_get(dev);
        if (!group)
                return -ENODEV;
 
@@ -3296,8 +3345,6 @@ int iommu_device_claim_dma_owner(struct device *dev, void *owner)
        ret = __iommu_take_dma_ownership(group, owner);
 unlock_out:
        mutex_unlock(&group->mutex);
-       iommu_group_put(group);
-
        return ret;
 }
 EXPORT_SYMBOL_GPL(iommu_device_claim_dma_owner);
@@ -3335,7 +3382,8 @@ EXPORT_SYMBOL_GPL(iommu_group_release_dma_owner);
  */
 void iommu_device_release_dma_owner(struct device *dev)
 {
-       struct iommu_group *group = iommu_group_get(dev);
+       /* Caller must be a probed driver on dev */
+       struct iommu_group *group = dev->iommu_group;
 
        mutex_lock(&group->mutex);
        if (group->owner_cnt > 1)
@@ -3343,7 +3391,6 @@ void iommu_device_release_dma_owner(struct device *dev)
        else
                __iommu_release_dma_ownership(group);
        mutex_unlock(&group->mutex);
-       iommu_group_put(group);
 }
 EXPORT_SYMBOL_GPL(iommu_device_release_dma_owner);
 
@@ -3404,14 +3451,14 @@ static void __iommu_remove_group_pasid(struct iommu_group *group,
 int iommu_attach_device_pasid(struct iommu_domain *domain,
                              struct device *dev, ioasid_t pasid)
 {
-       struct iommu_group *group;
+       /* Caller must be a probed driver on dev */
+       struct iommu_group *group = dev->iommu_group;
        void *curr;
        int ret;
 
        if (!domain->ops->set_dev_pasid)
                return -EOPNOTSUPP;
 
-       group = iommu_group_get(dev);
        if (!group)
                return -ENODEV;
 
@@ -3429,8 +3476,6 @@ int iommu_attach_device_pasid(struct iommu_domain *domain,
        }
 out_unlock:
        mutex_unlock(&group->mutex);
-       iommu_group_put(group);
-
        return ret;
 }
 EXPORT_SYMBOL_GPL(iommu_attach_device_pasid);
@@ -3447,14 +3492,13 @@ EXPORT_SYMBOL_GPL(iommu_attach_device_pasid);
 void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev,
                               ioasid_t pasid)
 {
-       struct iommu_group *group = iommu_group_get(dev);
+       /* Caller must be a probed driver on dev */
+       struct iommu_group *group = dev->iommu_group;
 
        mutex_lock(&group->mutex);
        __iommu_remove_group_pasid(group, pasid);
        WARN_ON(xa_erase(&group->pasid_array, pasid) != domain);
        mutex_unlock(&group->mutex);
-
-       iommu_group_put(group);
 }
 EXPORT_SYMBOL_GPL(iommu_detach_device_pasid);
 
@@ -3476,10 +3520,10 @@ struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev,
                                                    ioasid_t pasid,
                                                    unsigned int type)
 {
+       /* Caller must be a probed driver on dev */
+       struct iommu_group *group = dev->iommu_group;
        struct iommu_domain *domain;
-       struct iommu_group *group;
 
-       group = iommu_group_get(dev);
        if (!group)
                return NULL;
 
@@ -3488,7 +3532,6 @@ struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev,
        if (type && domain && domain->type != type)
                domain = ERR_PTR(-EBUSY);
        xa_unlock(&group->pasid_array);
-       iommu_group_put(group);
 
        return domain;
 }
index d43a87737c1e88bd8b2eff67822f5cc8b2b6be6e..5d93434003d8ad666af55e212372e37b81c06895 100644 (file)
@@ -123,10 +123,6 @@ struct selftest_obj {
        };
 };
 
-static void mock_domain_blocking_free(struct iommu_domain *domain)
-{
-}
-
 static int mock_domain_nop_attach(struct iommu_domain *domain,
                                  struct device *dev)
 {
@@ -139,7 +135,6 @@ static int mock_domain_nop_attach(struct iommu_domain *domain,
 }
 
 static const struct iommu_domain_ops mock_blocking_ops = {
-       .free = mock_domain_blocking_free,
        .attach_dev = mock_domain_nop_attach,
 };
 
@@ -258,15 +253,6 @@ __mock_domain_alloc_nested(struct mock_iommu_domain *mock_parent,
        return &mock_nested->domain;
 }
 
-static struct iommu_domain *mock_domain_alloc(unsigned int iommu_domain_type)
-{
-       if (iommu_domain_type == IOMMU_DOMAIN_BLOCKED)
-               return &mock_blocking_domain;
-       if (iommu_domain_type == IOMMU_DOMAIN_UNMANAGED)
-               return mock_domain_alloc_paging(NULL);
-       return NULL;
-}
-
 static struct iommu_domain *
 mock_domain_alloc_user(struct device *dev, u32 flags,
                       struct iommu_domain *parent,
@@ -446,14 +432,6 @@ static bool mock_domain_capable(struct device *dev, enum iommu_cap cap)
        return false;
 }
 
-static void mock_domain_set_plaform_dma_ops(struct device *dev)
-{
-       /*
-        * mock doesn't setup default domains because we can't hook into the
-        * normal probe path
-        */
-}
-
 static struct iommu_device mock_iommu_device = {
 };
 
@@ -463,13 +441,18 @@ static struct iommu_device *mock_probe_device(struct device *dev)
 }
 
 static const struct iommu_ops mock_ops = {
+       /*
+        * IOMMU_DOMAIN_BLOCKED cannot be returned from def_domain_type()
+        * because it is zero.
+        */
+       .default_domain = &mock_blocking_domain,
+       .blocked_domain = &mock_blocking_domain,
        .owner = THIS_MODULE,
        .pgsize_bitmap = MOCK_IO_PAGE_SIZE,
        .hw_info = mock_domain_hw_info,
-       .domain_alloc = mock_domain_alloc,
+       .domain_alloc_paging = mock_domain_alloc_paging,
        .domain_alloc_user = mock_domain_alloc_user,
        .capable = mock_domain_capable,
-       .set_platform_dma_ops = mock_domain_set_plaform_dma_ops,
        .device_group = generic_device_group,
        .probe_device = mock_probe_device,
        .default_domain_ops =
index 10b964600948c7f618c9fc6f2f6911b81ef15408..d30e453d0fb4b74a7eb47c77a86b3f11773c0742 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/smp.h>
 #include <linux/bitops.h>
 #include <linux/cpu.h>
+#include <linux/workqueue.h>
 
 /* The anchor node sits above the top of the usable address space */
 #define IOVA_ANCHOR    ~0UL
@@ -622,15 +623,21 @@ EXPORT_SYMBOL_GPL(reserve_iova);
 /*
  * As kmalloc's buffer size is fixed to power of 2, 127 is chosen to
  * assure size of 'iova_magazine' to be 1024 bytes, so that no memory
- * will be wasted.
+ * will be wasted. Since only full magazines are inserted into the depot,
+ * we don't need to waste PFN capacity on a separate list head either.
  */
 #define IOVA_MAG_SIZE 127
-#define MAX_GLOBAL_MAGS 32     /* magazines per bin */
+
+#define IOVA_DEPOT_DELAY msecs_to_jiffies(100)
 
 struct iova_magazine {
-       unsigned long size;
+       union {
+               unsigned long size;
+               struct iova_magazine *next;
+       };
        unsigned long pfns[IOVA_MAG_SIZE];
 };
+static_assert(!(sizeof(struct iova_magazine) & (sizeof(struct iova_magazine) - 1)));
 
 struct iova_cpu_rcache {
        spinlock_t lock;
@@ -640,9 +647,11 @@ struct iova_cpu_rcache {
 
 struct iova_rcache {
        spinlock_t lock;
-       unsigned long depot_size;
-       struct iova_magazine *depot[MAX_GLOBAL_MAGS];
+       unsigned int depot_size;
+       struct iova_magazine *depot;
        struct iova_cpu_rcache __percpu *cpu_rcaches;
+       struct iova_domain *iovad;
+       struct delayed_work work;
 };
 
 static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
@@ -717,6 +726,41 @@ static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
        mag->pfns[mag->size++] = pfn;
 }
 
+static struct iova_magazine *iova_depot_pop(struct iova_rcache *rcache)
+{
+       struct iova_magazine *mag = rcache->depot;
+
+       rcache->depot = mag->next;
+       mag->size = IOVA_MAG_SIZE;
+       rcache->depot_size--;
+       return mag;
+}
+
+static void iova_depot_push(struct iova_rcache *rcache, struct iova_magazine *mag)
+{
+       mag->next = rcache->depot;
+       rcache->depot = mag;
+       rcache->depot_size++;
+}
+
+static void iova_depot_work_func(struct work_struct *work)
+{
+       struct iova_rcache *rcache = container_of(work, typeof(*rcache), work.work);
+       struct iova_magazine *mag = NULL;
+       unsigned long flags;
+
+       spin_lock_irqsave(&rcache->lock, flags);
+       if (rcache->depot_size > num_online_cpus())
+               mag = iova_depot_pop(rcache);
+       spin_unlock_irqrestore(&rcache->lock, flags);
+
+       if (mag) {
+               iova_magazine_free_pfns(mag, rcache->iovad);
+               iova_magazine_free(mag);
+               schedule_delayed_work(&rcache->work, IOVA_DEPOT_DELAY);
+       }
+}
+
 int iova_domain_init_rcaches(struct iova_domain *iovad)
 {
        unsigned int cpu;
@@ -734,7 +778,8 @@ int iova_domain_init_rcaches(struct iova_domain *iovad)
 
                rcache = &iovad->rcaches[i];
                spin_lock_init(&rcache->lock);
-               rcache->depot_size = 0;
+               rcache->iovad = iovad;
+               INIT_DELAYED_WORK(&rcache->work, iova_depot_work_func);
                rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache),
                                                     cache_line_size());
                if (!rcache->cpu_rcaches) {
@@ -776,7 +821,6 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
                                 struct iova_rcache *rcache,
                                 unsigned long iova_pfn)
 {
-       struct iova_magazine *mag_to_free = NULL;
        struct iova_cpu_rcache *cpu_rcache;
        bool can_insert = false;
        unsigned long flags;
@@ -794,13 +838,9 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
 
                if (new_mag) {
                        spin_lock(&rcache->lock);
-                       if (rcache->depot_size < MAX_GLOBAL_MAGS) {
-                               rcache->depot[rcache->depot_size++] =
-                                               cpu_rcache->loaded;
-                       } else {
-                               mag_to_free = cpu_rcache->loaded;
-                       }
+                       iova_depot_push(rcache, cpu_rcache->loaded);
                        spin_unlock(&rcache->lock);
+                       schedule_delayed_work(&rcache->work, IOVA_DEPOT_DELAY);
 
                        cpu_rcache->loaded = new_mag;
                        can_insert = true;
@@ -812,11 +852,6 @@ static bool __iova_rcache_insert(struct iova_domain *iovad,
 
        spin_unlock_irqrestore(&cpu_rcache->lock, flags);
 
-       if (mag_to_free) {
-               iova_magazine_free_pfns(mag_to_free, iovad);
-               iova_magazine_free(mag_to_free);
-       }
-
        return can_insert;
 }
 
@@ -854,9 +889,9 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
                has_pfn = true;
        } else {
                spin_lock(&rcache->lock);
-               if (rcache->depot_size > 0) {
+               if (rcache->depot) {
                        iova_magazine_free(cpu_rcache->loaded);
-                       cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
+                       cpu_rcache->loaded = iova_depot_pop(rcache);
                        has_pfn = true;
                }
                spin_unlock(&rcache->lock);
@@ -895,9 +930,8 @@ static void free_iova_rcaches(struct iova_domain *iovad)
        struct iova_rcache *rcache;
        struct iova_cpu_rcache *cpu_rcache;
        unsigned int cpu;
-       int i, j;
 
-       for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+       for (int i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
                rcache = &iovad->rcaches[i];
                if (!rcache->cpu_rcaches)
                        break;
@@ -907,8 +941,9 @@ static void free_iova_rcaches(struct iova_domain *iovad)
                        iova_magazine_free(cpu_rcache->prev);
                }
                free_percpu(rcache->cpu_rcaches);
-               for (j = 0; j < rcache->depot_size; ++j)
-                       iova_magazine_free(rcache->depot[j]);
+               cancel_delayed_work_sync(&rcache->work);
+               while (rcache->depot)
+                       iova_magazine_free(iova_depot_pop(rcache));
        }
 
        kfree(iovad->rcaches);
@@ -942,16 +977,16 @@ static void free_global_cached_iovas(struct iova_domain *iovad)
 {
        struct iova_rcache *rcache;
        unsigned long flags;
-       int i, j;
 
-       for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+       for (int i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
                rcache = &iovad->rcaches[i];
                spin_lock_irqsave(&rcache->lock, flags);
-               for (j = 0; j < rcache->depot_size; ++j) {
-                       iova_magazine_free_pfns(rcache->depot[j], iovad);
-                       iova_magazine_free(rcache->depot[j]);
+               while (rcache->depot) {
+                       struct iova_magazine *mag = iova_depot_pop(rcache);
+
+                       iova_magazine_free_pfns(mag, iovad);
+                       iova_magazine_free(mag);
                }
-               rcache->depot_size = 0;
                spin_unlock_irqrestore(&rcache->lock, flags);
        }
 }
index 65ff69477c43e41b270f504b4f0434c2145c7c69..ace1fc4bd34b0fd934c26d8c431ef807d94fb13d 100644 (file)
@@ -64,7 +64,6 @@ struct ipmmu_vmsa_device {
        struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX];
        s8 utlb_ctx[IPMMU_UTLB_MAX];
 
-       struct iommu_group *group;
        struct dma_iommu_mapping *mapping;
 };
 
@@ -295,6 +294,18 @@ static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain,
        mmu->utlb_ctx[utlb] = domain->context_id;
 }
 
+/*
+ * Disable MMU translation for the microTLB.
+ */
+static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain,
+                              unsigned int utlb)
+{
+       struct ipmmu_vmsa_device *mmu = domain->mmu;
+
+       ipmmu_imuctr_write(mmu, utlb, 0);
+       mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID;
+}
+
 static void ipmmu_tlb_flush_all(void *cookie)
 {
        struct ipmmu_vmsa_domain *domain = cookie;
@@ -551,13 +562,10 @@ static irqreturn_t ipmmu_irq(int irq, void *dev)
  * IOMMU Operations
  */
 
-static struct iommu_domain *ipmmu_domain_alloc(unsigned type)
+static struct iommu_domain *ipmmu_domain_alloc_paging(struct device *dev)
 {
        struct ipmmu_vmsa_domain *domain;
 
-       if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
-               return NULL;
-
        domain = kzalloc(sizeof(*domain), GFP_KERNEL);
        if (!domain)
                return NULL;
@@ -627,6 +635,36 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain,
        return 0;
 }
 
+static int ipmmu_iommu_identity_attach(struct iommu_domain *identity_domain,
+                                      struct device *dev)
+{
+       struct iommu_domain *io_domain = iommu_get_domain_for_dev(dev);
+       struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+       struct ipmmu_vmsa_domain *domain;
+       unsigned int i;
+
+       if (io_domain == identity_domain || !io_domain)
+               return 0;
+
+       domain = to_vmsa_domain(io_domain);
+       for (i = 0; i < fwspec->num_ids; ++i)
+               ipmmu_utlb_disable(domain, fwspec->ids[i]);
+
+       /*
+        * TODO: Optimize by disabling the context when no device is attached.
+        */
+       return 0;
+}
+
+static struct iommu_domain_ops ipmmu_iommu_identity_ops = {
+       .attach_dev = ipmmu_iommu_identity_attach,
+};
+
+static struct iommu_domain ipmmu_iommu_identity_domain = {
+       .type = IOMMU_DOMAIN_IDENTITY,
+       .ops = &ipmmu_iommu_identity_ops,
+};
+
 static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
                     phys_addr_t paddr, size_t pgsize, size_t pgcount,
                     int prot, gfp_t gfp, size_t *mapped)
@@ -833,28 +871,18 @@ static void ipmmu_release_device(struct device *dev)
        arm_iommu_release_mapping(mmu->mapping);
 }
 
-static struct iommu_group *ipmmu_find_group(struct device *dev)
-{
-       struct ipmmu_vmsa_device *mmu = to_ipmmu(dev);
-       struct iommu_group *group;
-
-       if (mmu->group)
-               return iommu_group_ref_get(mmu->group);
-
-       group = iommu_group_alloc();
-       if (!IS_ERR(group))
-               mmu->group = group;
-
-       return group;
-}
-
 static const struct iommu_ops ipmmu_ops = {
-       .domain_alloc = ipmmu_domain_alloc,
+       .identity_domain = &ipmmu_iommu_identity_domain,
+       .domain_alloc_paging = ipmmu_domain_alloc_paging,
        .probe_device = ipmmu_probe_device,
        .release_device = ipmmu_release_device,
        .probe_finalize = ipmmu_probe_finalize,
+       /*
+        * FIXME: The device grouping is a fixed property of the hardware's
+        * ability to isolate and control DMA, it should not depend on kconfig.
+        */
        .device_group = IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_IOMMU_DMA)
-                       ? generic_device_group : ipmmu_find_group,
+                       ? generic_device_group : generic_single_device_group,
        .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
        .of_xlate = ipmmu_of_xlate,
        .default_domain_ops = &(const struct iommu_domain_ops) {
index 79d89bad5132b7bde0683c42a852e54bb08c1a94..f86af9815d6f9886c24faa598455968d5ff4f263 100644 (file)
@@ -302,13 +302,10 @@ static void __program_context(void __iomem *base, int ctx,
        SET_M(base, ctx, 1);
 }
 
-static struct iommu_domain *msm_iommu_domain_alloc(unsigned type)
+static struct iommu_domain *msm_iommu_domain_alloc_paging(struct device *dev)
 {
        struct msm_priv *priv;
 
-       if (type != IOMMU_DOMAIN_UNMANAGED)
-               return NULL;
-
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv)
                goto fail_nomem;
@@ -443,15 +440,20 @@ fail:
        return ret;
 }
 
-static void msm_iommu_set_platform_dma(struct device *dev)
+static int msm_iommu_identity_attach(struct iommu_domain *identity_domain,
+                                    struct device *dev)
 {
        struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
-       struct msm_priv *priv = to_msm_priv(domain);
+       struct msm_priv *priv;
        unsigned long flags;
        struct msm_iommu_dev *iommu;
        struct msm_iommu_ctx_dev *master;
-       int ret;
+       int ret = 0;
+
+       if (domain == identity_domain || !domain)
+               return 0;
 
+       priv = to_msm_priv(domain);
        free_io_pgtable_ops(priv->iop);
 
        spin_lock_irqsave(&msm_iommu_lock, flags);
@@ -468,8 +470,18 @@ static void msm_iommu_set_platform_dma(struct device *dev)
        }
 fail:
        spin_unlock_irqrestore(&msm_iommu_lock, flags);
+       return ret;
 }
 
+static struct iommu_domain_ops msm_iommu_identity_ops = {
+       .attach_dev = msm_iommu_identity_attach,
+};
+
+static struct iommu_domain msm_iommu_identity_domain = {
+       .type = IOMMU_DOMAIN_IDENTITY,
+       .ops = &msm_iommu_identity_ops,
+};
+
 static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
                         phys_addr_t pa, size_t pgsize, size_t pgcount,
                         int prot, gfp_t gfp, size_t *mapped)
@@ -486,12 +498,13 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
        return ret;
 }
 
-static void msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
-                              size_t size)
+static int msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
+                             size_t size)
 {
        struct msm_priv *priv = to_msm_priv(domain);
 
        __flush_iotlb_range(iova, size, SZ_4K, false, priv);
+       return 0;
 }
 
 static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
@@ -675,10 +688,10 @@ fail:
 }
 
 static struct iommu_ops msm_iommu_ops = {
-       .domain_alloc = msm_iommu_domain_alloc,
+       .identity_domain = &msm_iommu_identity_domain,
+       .domain_alloc_paging = msm_iommu_domain_alloc_paging,
        .probe_device = msm_iommu_probe_device,
        .device_group = generic_device_group,
-       .set_platform_dma_ops = msm_iommu_set_platform_dma,
        .pgsize_bitmap = MSM_IOMMU_PGSIZES,
        .of_xlate = qcom_iommu_of_xlate,
        .default_domain_ops = &(const struct iommu_domain_ops) {
index fab6c347ce578ec7c79131a9ceeaab3f41b0aafe..75279500a4a824f4246bd3fec4fdfb22b1c2cccc 100644 (file)
@@ -688,13 +688,10 @@ update_iova_region:
        return 0;
 }
 
-static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
+static struct iommu_domain *mtk_iommu_domain_alloc_paging(struct device *dev)
 {
        struct mtk_iommu_domain *dom;
 
-       if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED)
-               return NULL;
-
        dom = kzalloc(sizeof(*dom), GFP_KERNEL);
        if (!dom)
                return NULL;
@@ -776,6 +773,28 @@ err_unlock:
        return ret;
 }
 
+static int mtk_iommu_identity_attach(struct iommu_domain *identity_domain,
+                                    struct device *dev)
+{
+       struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+       struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
+
+       if (domain == identity_domain || !domain)
+               return 0;
+
+       mtk_iommu_config(data, dev, false, 0);
+       return 0;
+}
+
+static struct iommu_domain_ops mtk_iommu_identity_ops = {
+       .attach_dev = mtk_iommu_identity_attach,
+};
+
+static struct iommu_domain mtk_iommu_identity_domain = {
+       .type = IOMMU_DOMAIN_IDENTITY,
+       .ops = &mtk_iommu_identity_ops,
+};
+
 static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
                         phys_addr_t paddr, size_t pgsize, size_t pgcount,
                         int prot, gfp_t gfp, size_t *mapped)
@@ -817,12 +836,13 @@ static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
        mtk_iommu_tlb_flush_range_sync(gather->start, length, dom->bank);
 }
 
-static void mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
-                              size_t size)
+static int mtk_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
+                             size_t size)
 {
        struct mtk_iommu_domain *dom = to_mtk_domain(domain);
 
        mtk_iommu_tlb_flush_range_sync(iova, size, dom->bank);
+       return 0;
 }
 
 static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -995,7 +1015,8 @@ static void mtk_iommu_get_resv_regions(struct device *dev,
 }
 
 static const struct iommu_ops mtk_iommu_ops = {
-       .domain_alloc   = mtk_iommu_domain_alloc,
+       .identity_domain = &mtk_iommu_identity_domain,
+       .domain_alloc_paging = mtk_iommu_domain_alloc_paging,
        .probe_device   = mtk_iommu_probe_device,
        .release_device = mtk_iommu_release_device,
        .device_group   = mtk_iommu_device_group,
index 8a0a5e5d049f4aca4f3b53cd9f80900dfde5f4cf..67e044c1a7d93bc7e8c398445fd867008d349154 100644 (file)
@@ -270,13 +270,10 @@ static int mtk_iommu_v1_domain_finalise(struct mtk_iommu_v1_data *data)
        return 0;
 }
 
-static struct iommu_domain *mtk_iommu_v1_domain_alloc(unsigned type)
+static struct iommu_domain *mtk_iommu_v1_domain_alloc_paging(struct device *dev)
 {
        struct mtk_iommu_v1_domain *dom;
 
-       if (type != IOMMU_DOMAIN_UNMANAGED)
-               return NULL;
-
        dom = kzalloc(sizeof(*dom), GFP_KERNEL);
        if (!dom)
                return NULL;
@@ -319,13 +316,24 @@ static int mtk_iommu_v1_attach_device(struct iommu_domain *domain, struct device
        return 0;
 }
 
-static void mtk_iommu_v1_set_platform_dma(struct device *dev)
+static int mtk_iommu_v1_identity_attach(struct iommu_domain *identity_domain,
+                                       struct device *dev)
 {
        struct mtk_iommu_v1_data *data = dev_iommu_priv_get(dev);
 
        mtk_iommu_v1_config(data, dev, false);
+       return 0;
 }
 
+static struct iommu_domain_ops mtk_iommu_v1_identity_ops = {
+       .attach_dev = mtk_iommu_v1_identity_attach,
+};
+
+static struct iommu_domain mtk_iommu_v1_identity_domain = {
+       .type = IOMMU_DOMAIN_IDENTITY,
+       .ops = &mtk_iommu_v1_identity_ops,
+};
+
 static int mtk_iommu_v1_map(struct iommu_domain *domain, unsigned long iova,
                            phys_addr_t paddr, size_t pgsize, size_t pgcount,
                            int prot, gfp_t gfp, size_t *mapped)
@@ -441,11 +449,6 @@ static int mtk_iommu_v1_create_mapping(struct device *dev, struct of_phandle_arg
        return 0;
 }
 
-static int mtk_iommu_v1_def_domain_type(struct device *dev)
-{
-       return IOMMU_DOMAIN_UNMANAGED;
-}
-
 static struct iommu_device *mtk_iommu_v1_probe_device(struct device *dev)
 {
        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
@@ -578,14 +581,13 @@ static int mtk_iommu_v1_hw_init(const struct mtk_iommu_v1_data *data)
 }
 
 static const struct iommu_ops mtk_iommu_v1_ops = {
-       .domain_alloc   = mtk_iommu_v1_domain_alloc,
+       .identity_domain = &mtk_iommu_v1_identity_domain,
+       .domain_alloc_paging = mtk_iommu_v1_domain_alloc_paging,
        .probe_device   = mtk_iommu_v1_probe_device,
        .probe_finalize = mtk_iommu_v1_probe_finalize,
        .release_device = mtk_iommu_v1_release_device,
-       .def_domain_type = mtk_iommu_v1_def_domain_type,
        .device_group   = generic_device_group,
        .pgsize_bitmap  = MT2701_IOMMU_PAGE_SIZE,
-       .set_platform_dma_ops = mtk_iommu_v1_set_platform_dma,
        .owner          = THIS_MODULE,
        .default_domain_ops = &(const struct iommu_domain_ops) {
                .attach_dev     = mtk_iommu_v1_attach_device,
index 537e402f9bba97144792a3c66249f3e0f8bea102..c66b070841dd41e0c322f12515c7d8f919e5bd16 100644 (file)
@@ -1225,18 +1225,15 @@ static int omap_iommu_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, obj);
 
        if (omap_iommu_can_register(pdev)) {
-               obj->group = iommu_group_alloc();
-               if (IS_ERR(obj->group))
-                       return PTR_ERR(obj->group);
-
                err = iommu_device_sysfs_add(&obj->iommu, obj->dev, NULL,
                                             obj->name);
                if (err)
-                       goto out_group;
+                       return err;
 
                err = iommu_device_register(&obj->iommu, &omap_iommu_ops, &pdev->dev);
                if (err)
                        goto out_sysfs;
+               obj->has_iommu_driver = true;
        }
 
        pm_runtime_enable(obj->dev);
@@ -1252,8 +1249,6 @@ static int omap_iommu_probe(struct platform_device *pdev)
 
 out_sysfs:
        iommu_device_sysfs_remove(&obj->iommu);
-out_group:
-       iommu_group_put(obj->group);
        return err;
 }
 
@@ -1261,10 +1256,7 @@ static void omap_iommu_remove(struct platform_device *pdev)
 {
        struct omap_iommu *obj = platform_get_drvdata(pdev);
 
-       if (obj->group) {
-               iommu_group_put(obj->group);
-               obj->group = NULL;
-
+       if (obj->has_iommu_driver) {
                iommu_device_sysfs_remove(&obj->iommu);
                iommu_device_unregister(&obj->iommu);
        }
@@ -1318,7 +1310,8 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
 }
 
 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
-                         phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
+                         phys_addr_t pa, size_t bytes, size_t count,
+                         int prot, gfp_t gfp, size_t *mapped)
 {
        struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
        struct device *dev = omap_domain->dev;
@@ -1356,13 +1349,15 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
                        oiommu = iommu->iommu_dev;
                        iopgtable_clear_entry(oiommu, da);
                }
+       } else {
+               *mapped = bytes;
        }
 
        return ret;
 }
 
 static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
-                              size_t size, struct iommu_iotlb_gather *gather)
+                              size_t size, size_t count, struct iommu_iotlb_gather *gather)
 {
        struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
        struct device *dev = omap_domain->dev;
@@ -1555,23 +1550,35 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
        omap_domain->dev = NULL;
 }
 
-static void omap_iommu_set_platform_dma(struct device *dev)
+static int omap_iommu_identity_attach(struct iommu_domain *identity_domain,
+                                     struct device *dev)
 {
        struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
-       struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
+       struct omap_iommu_domain *omap_domain;
+
+       if (domain == identity_domain || !domain)
+               return 0;
 
+       omap_domain = to_omap_domain(domain);
        spin_lock(&omap_domain->lock);
        _omap_iommu_detach_dev(omap_domain, dev);
        spin_unlock(&omap_domain->lock);
+       return 0;
 }
 
-static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
+static struct iommu_domain_ops omap_iommu_identity_ops = {
+       .attach_dev = omap_iommu_identity_attach,
+};
+
+static struct iommu_domain omap_iommu_identity_domain = {
+       .type = IOMMU_DOMAIN_IDENTITY,
+       .ops = &omap_iommu_identity_ops,
+};
+
+static struct iommu_domain *omap_iommu_domain_alloc_paging(struct device *dev)
 {
        struct omap_iommu_domain *omap_domain;
 
-       if (type != IOMMU_DOMAIN_UNMANAGED)
-               return NULL;
-
        omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
        if (!omap_domain)
                return NULL;
@@ -1717,31 +1724,17 @@ static void omap_iommu_release_device(struct device *dev)
 
 }
 
-static struct iommu_group *omap_iommu_device_group(struct device *dev)
-{
-       struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev);
-       struct iommu_group *group = ERR_PTR(-EINVAL);
-
-       if (!arch_data)
-               return ERR_PTR(-ENODEV);
-
-       if (arch_data->iommu_dev)
-               group = iommu_group_ref_get(arch_data->iommu_dev->group);
-
-       return group;
-}
-
 static const struct iommu_ops omap_iommu_ops = {
-       .domain_alloc   = omap_iommu_domain_alloc,
+       .identity_domain = &omap_iommu_identity_domain,
+       .domain_alloc_paging = omap_iommu_domain_alloc_paging,
        .probe_device   = omap_iommu_probe_device,
        .release_device = omap_iommu_release_device,
-       .device_group   = omap_iommu_device_group,
-       .set_platform_dma_ops = omap_iommu_set_platform_dma,
+       .device_group   = generic_single_device_group,
        .pgsize_bitmap  = OMAP_IOMMU_PGSIZES,
        .default_domain_ops = &(const struct iommu_domain_ops) {
                .attach_dev     = omap_iommu_attach_dev,
-               .map            = omap_iommu_map,
-               .unmap          = omap_iommu_unmap,
+               .map_pages      = omap_iommu_map,
+               .unmap_pages    = omap_iommu_unmap,
                .iova_to_phys   = omap_iommu_iova_to_phys,
                .free           = omap_iommu_domain_free,
        }
index 18ee713ede784d16c884b9cca19ed8ce240706d4..27697109ec79a55953dd933302a27a76210a9858 100644 (file)
@@ -80,7 +80,7 @@ struct omap_iommu {
        u32 id;
 
        struct iommu_device iommu;
-       struct iommu_group *group;
+       bool has_iommu_driver;
 
        u8 pwrst;
 };
index 8ff69fbf9f65db876964a7a90ca9abe1d27f65d2..2685861c0a1262959e0883fa7fb15bc73063d51a 100644 (file)
@@ -113,7 +113,6 @@ struct rk_iommu {
        struct iommu_device iommu;
        struct list_head node; /* entry in rk_iommu_domain.iommus */
        struct iommu_domain *domain; /* domain to which iommu is attached */
-       struct iommu_group *group;
 };
 
 struct rk_iommudata {
@@ -817,7 +816,8 @@ unwind:
 }
 
 static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
-                       phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+                       phys_addr_t paddr, size_t size, size_t count,
+                       int prot, gfp_t gfp, size_t *mapped)
 {
        struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
        unsigned long flags;
@@ -850,12 +850,14 @@ static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
                                paddr, size, prot);
 
        spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+       if (!ret)
+               *mapped = size;
 
        return ret;
 }
 
 static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
-                            size_t size, struct iommu_iotlb_gather *gather)
+                            size_t size, size_t count, struct iommu_iotlb_gather *gather)
 {
        struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
        unsigned long flags;
@@ -989,13 +991,8 @@ static int rk_iommu_identity_attach(struct iommu_domain *identity_domain,
        return 0;
 }
 
-static void rk_iommu_identity_free(struct iommu_domain *domain)
-{
-}
-
 static struct iommu_domain_ops rk_identity_ops = {
        .attach_dev = rk_iommu_identity_attach,
-       .free = rk_iommu_identity_free,
 };
 
 static struct iommu_domain rk_identity_domain = {
@@ -1003,13 +1000,6 @@ static struct iommu_domain rk_identity_domain = {
        .ops = &rk_identity_ops,
 };
 
-#ifdef CONFIG_ARM
-static void rk_iommu_set_platform_dma(struct device *dev)
-{
-       WARN_ON(rk_iommu_identity_attach(&rk_identity_domain, dev));
-}
-#endif
-
 static int rk_iommu_attach_device(struct iommu_domain *domain,
                struct device *dev)
 {
@@ -1055,16 +1045,10 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
        return ret;
 }
 
-static struct iommu_domain *rk_iommu_domain_alloc(unsigned type)
+static struct iommu_domain *rk_iommu_domain_alloc_paging(struct device *dev)
 {
        struct rk_iommu_domain *rk_domain;
 
-       if (type == IOMMU_DOMAIN_IDENTITY)
-               return &rk_identity_domain;
-
-       if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
-               return NULL;
-
        if (!dma_dev)
                return NULL;
 
@@ -1155,15 +1139,6 @@ static void rk_iommu_release_device(struct device *dev)
        device_link_del(data->link);
 }
 
-static struct iommu_group *rk_iommu_device_group(struct device *dev)
-{
-       struct rk_iommu *iommu;
-
-       iommu = rk_iommu_from_dev(dev);
-
-       return iommu_group_ref_get(iommu->group);
-}
-
 static int rk_iommu_of_xlate(struct device *dev,
                             struct of_phandle_args *args)
 {
@@ -1186,19 +1161,17 @@ static int rk_iommu_of_xlate(struct device *dev,
 }
 
 static const struct iommu_ops rk_iommu_ops = {
-       .domain_alloc = rk_iommu_domain_alloc,
+       .identity_domain = &rk_identity_domain,
+       .domain_alloc_paging = rk_iommu_domain_alloc_paging,
        .probe_device = rk_iommu_probe_device,
        .release_device = rk_iommu_release_device,
-       .device_group = rk_iommu_device_group,
-#ifdef CONFIG_ARM
-       .set_platform_dma_ops = rk_iommu_set_platform_dma,
-#endif
+       .device_group = generic_single_device_group,
        .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
        .of_xlate = rk_iommu_of_xlate,
        .default_domain_ops = &(const struct iommu_domain_ops) {
                .attach_dev     = rk_iommu_attach_device,
-               .map            = rk_iommu_map,
-               .unmap          = rk_iommu_unmap,
+               .map_pages      = rk_iommu_map,
+               .unmap_pages    = rk_iommu_unmap,
                .iova_to_phys   = rk_iommu_iova_to_phys,
                .free           = rk_iommu_domain_free,
        }
@@ -1280,15 +1253,9 @@ static int rk_iommu_probe(struct platform_device *pdev)
        if (err)
                return err;
 
-       iommu->group = iommu_group_alloc();
-       if (IS_ERR(iommu->group)) {
-               err = PTR_ERR(iommu->group);
-               goto err_unprepare_clocks;
-       }
-
        err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev));
        if (err)
-               goto err_put_group;
+               goto err_unprepare_clocks;
 
        err = iommu_device_register(&iommu->iommu, &rk_iommu_ops, dev);
        if (err)
@@ -1325,8 +1292,6 @@ err_pm_disable:
        pm_runtime_disable(dev);
 err_remove_sysfs:
        iommu_device_sysfs_remove(&iommu->iommu);
-err_put_group:
-       iommu_group_put(iommu->group);
 err_unprepare_clocks:
        clk_bulk_unprepare(iommu->num_clocks, iommu->clocks);
        return err;
index fbf59a8db29b1101bb5c4b3ef16da6a94baf1095..9a5196f523de5f0f8902f054b1b3722d14b18cd2 100644 (file)
 #include <linux/rcupdate.h>
 #include <asm/pci_dma.h>
 
+#include "dma-iommu.h"
+
 static const struct iommu_ops s390_iommu_ops;
 
+static struct kmem_cache *dma_region_table_cache;
+static struct kmem_cache *dma_page_table_cache;
+
+static u64 s390_iommu_aperture;
+static u32 s390_iommu_aperture_factor = 1;
+
 struct s390_domain {
        struct iommu_domain     domain;
        struct list_head        devices;
+       struct zpci_iommu_ctrs  ctrs;
        unsigned long           *dma_table;
        spinlock_t              list_lock;
        struct rcu_head         rcu;
 };
 
+static inline unsigned int calc_rtx(dma_addr_t ptr)
+{
+       return ((unsigned long)ptr >> ZPCI_RT_SHIFT) & ZPCI_INDEX_MASK;
+}
+
+static inline unsigned int calc_sx(dma_addr_t ptr)
+{
+       return ((unsigned long)ptr >> ZPCI_ST_SHIFT) & ZPCI_INDEX_MASK;
+}
+
+static inline unsigned int calc_px(dma_addr_t ptr)
+{
+       return ((unsigned long)ptr >> PAGE_SHIFT) & ZPCI_PT_MASK;
+}
+
+static inline void set_pt_pfaa(unsigned long *entry, phys_addr_t pfaa)
+{
+       *entry &= ZPCI_PTE_FLAG_MASK;
+       *entry |= (pfaa & ZPCI_PTE_ADDR_MASK);
+}
+
+static inline void set_rt_sto(unsigned long *entry, phys_addr_t sto)
+{
+       *entry &= ZPCI_RTE_FLAG_MASK;
+       *entry |= (sto & ZPCI_RTE_ADDR_MASK);
+       *entry |= ZPCI_TABLE_TYPE_RTX;
+}
+
+static inline void set_st_pto(unsigned long *entry, phys_addr_t pto)
+{
+       *entry &= ZPCI_STE_FLAG_MASK;
+       *entry |= (pto & ZPCI_STE_ADDR_MASK);
+       *entry |= ZPCI_TABLE_TYPE_SX;
+}
+
+static inline void validate_rt_entry(unsigned long *entry)
+{
+       *entry &= ~ZPCI_TABLE_VALID_MASK;
+       *entry &= ~ZPCI_TABLE_OFFSET_MASK;
+       *entry |= ZPCI_TABLE_VALID;
+       *entry |= ZPCI_TABLE_LEN_RTX;
+}
+
+static inline void validate_st_entry(unsigned long *entry)
+{
+       *entry &= ~ZPCI_TABLE_VALID_MASK;
+       *entry |= ZPCI_TABLE_VALID;
+}
+
+static inline void invalidate_pt_entry(unsigned long *entry)
+{
+       WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_INVALID);
+       *entry &= ~ZPCI_PTE_VALID_MASK;
+       *entry |= ZPCI_PTE_INVALID;
+}
+
+static inline void validate_pt_entry(unsigned long *entry)
+{
+       WARN_ON_ONCE((*entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID);
+       *entry &= ~ZPCI_PTE_VALID_MASK;
+       *entry |= ZPCI_PTE_VALID;
+}
+
+static inline void entry_set_protected(unsigned long *entry)
+{
+       *entry &= ~ZPCI_TABLE_PROT_MASK;
+       *entry |= ZPCI_TABLE_PROTECTED;
+}
+
+static inline void entry_clr_protected(unsigned long *entry)
+{
+       *entry &= ~ZPCI_TABLE_PROT_MASK;
+       *entry |= ZPCI_TABLE_UNPROTECTED;
+}
+
+static inline int reg_entry_isvalid(unsigned long entry)
+{
+       return (entry & ZPCI_TABLE_VALID_MASK) == ZPCI_TABLE_VALID;
+}
+
+static inline int pt_entry_isvalid(unsigned long entry)
+{
+       return (entry & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID;
+}
+
+static inline unsigned long *get_rt_sto(unsigned long entry)
+{
+       if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_RTX)
+               return phys_to_virt(entry & ZPCI_RTE_ADDR_MASK);
+       else
+               return NULL;
+}
+
+static inline unsigned long *get_st_pto(unsigned long entry)
+{
+       if ((entry & ZPCI_TABLE_TYPE_MASK) == ZPCI_TABLE_TYPE_SX)
+               return phys_to_virt(entry & ZPCI_STE_ADDR_MASK);
+       else
+               return NULL;
+}
+
+static int __init dma_alloc_cpu_table_caches(void)
+{
+       dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
+                                                  ZPCI_TABLE_SIZE,
+                                                  ZPCI_TABLE_ALIGN,
+                                                  0, NULL);
+       if (!dma_region_table_cache)
+               return -ENOMEM;
+
+       dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
+                                                ZPCI_PT_SIZE,
+                                                ZPCI_PT_ALIGN,
+                                                0, NULL);
+       if (!dma_page_table_cache) {
+               kmem_cache_destroy(dma_region_table_cache);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+static unsigned long *dma_alloc_cpu_table(gfp_t gfp)
+{
+       unsigned long *table, *entry;
+
+       table = kmem_cache_alloc(dma_region_table_cache, gfp);
+       if (!table)
+               return NULL;
+
+       for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
+               *entry = ZPCI_TABLE_INVALID;
+       return table;
+}
+
+static void dma_free_cpu_table(void *table)
+{
+       kmem_cache_free(dma_region_table_cache, table);
+}
+
+static void dma_free_page_table(void *table)
+{
+       kmem_cache_free(dma_page_table_cache, table);
+}
+
+static void dma_free_seg_table(unsigned long entry)
+{
+       unsigned long *sto = get_rt_sto(entry);
+       int sx;
+
+       for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
+               if (reg_entry_isvalid(sto[sx]))
+                       dma_free_page_table(get_st_pto(sto[sx]));
+
+       dma_free_cpu_table(sto);
+}
+
+static void dma_cleanup_tables(unsigned long *table)
+{
+       int rtx;
+
+       if (!table)
+               return;
+
+       for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
+               if (reg_entry_isvalid(table[rtx]))
+                       dma_free_seg_table(table[rtx]);
+
+       dma_free_cpu_table(table);
+}
+
+static unsigned long *dma_alloc_page_table(gfp_t gfp)
+{
+       unsigned long *table, *entry;
+
+       table = kmem_cache_alloc(dma_page_table_cache, gfp);
+       if (!table)
+               return NULL;
+
+       for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
+               *entry = ZPCI_PTE_INVALID;
+       return table;
+}
+
+static unsigned long *dma_get_seg_table_origin(unsigned long *rtep, gfp_t gfp)
+{
+       unsigned long old_rte, rte;
+       unsigned long *sto;
+
+       rte = READ_ONCE(*rtep);
+       if (reg_entry_isvalid(rte)) {
+               sto = get_rt_sto(rte);
+       } else {
+               sto = dma_alloc_cpu_table(gfp);
+               if (!sto)
+                       return NULL;
+
+               set_rt_sto(&rte, virt_to_phys(sto));
+               validate_rt_entry(&rte);
+               entry_clr_protected(&rte);
+
+               old_rte = cmpxchg(rtep, ZPCI_TABLE_INVALID, rte);
+               if (old_rte != ZPCI_TABLE_INVALID) {
+                       /* Somone else was faster, use theirs */
+                       dma_free_cpu_table(sto);
+                       sto = get_rt_sto(old_rte);
+               }
+       }
+       return sto;
+}
+
+static unsigned long *dma_get_page_table_origin(unsigned long *step, gfp_t gfp)
+{
+       unsigned long old_ste, ste;
+       unsigned long *pto;
+
+       ste = READ_ONCE(*step);
+       if (reg_entry_isvalid(ste)) {
+               pto = get_st_pto(ste);
+       } else {
+               pto = dma_alloc_page_table(gfp);
+               if (!pto)
+                       return NULL;
+               set_st_pto(&ste, virt_to_phys(pto));
+               validate_st_entry(&ste);
+               entry_clr_protected(&ste);
+
+               old_ste = cmpxchg(step, ZPCI_TABLE_INVALID, ste);
+               if (old_ste != ZPCI_TABLE_INVALID) {
+                       /* Somone else was faster, use theirs */
+                       dma_free_page_table(pto);
+                       pto = get_st_pto(old_ste);
+               }
+       }
+       return pto;
+}
+
+static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr, gfp_t gfp)
+{
+       unsigned long *sto, *pto;
+       unsigned int rtx, sx, px;
+
+       rtx = calc_rtx(dma_addr);
+       sto = dma_get_seg_table_origin(&rto[rtx], gfp);
+       if (!sto)
+               return NULL;
+
+       sx = calc_sx(dma_addr);
+       pto = dma_get_page_table_origin(&sto[sx], gfp);
+       if (!pto)
+               return NULL;
+
+       px = calc_px(dma_addr);
+       return &pto[px];
+}
+
+static void dma_update_cpu_trans(unsigned long *ptep, phys_addr_t page_addr, int flags)
+{
+       unsigned long pte;
+
+       pte = READ_ONCE(*ptep);
+       if (flags & ZPCI_PTE_INVALID) {
+               invalidate_pt_entry(&pte);
+       } else {
+               set_pt_pfaa(&pte, page_addr);
+               validate_pt_entry(&pte);
+       }
+
+       if (flags & ZPCI_TABLE_PROTECTED)
+               entry_set_protected(&pte);
+       else
+               entry_clr_protected(&pte);
+
+       xchg(ptep, pte);
+}
+
 static struct s390_domain *to_s390_domain(struct iommu_domain *dom)
 {
        return container_of(dom, struct s390_domain, domain);
@@ -31,21 +315,22 @@ static struct s390_domain *to_s390_domain(struct iommu_domain *dom)
 
 static bool s390_iommu_capable(struct device *dev, enum iommu_cap cap)
 {
+       struct zpci_dev *zdev = to_zpci_dev(dev);
+
        switch (cap) {
        case IOMMU_CAP_CACHE_COHERENCY:
                return true;
+       case IOMMU_CAP_DEFERRED_FLUSH:
+               return zdev->pft != PCI_FUNC_TYPE_ISM;
        default:
                return false;
        }
 }
 
-static struct iommu_domain *s390_domain_alloc(unsigned domain_type)
+static struct iommu_domain *s390_domain_alloc_paging(struct device *dev)
 {
        struct s390_domain *s390_domain;
 
-       if (domain_type != IOMMU_DOMAIN_UNMANAGED)
-               return NULL;
-
        s390_domain = kzalloc(sizeof(*s390_domain), GFP_KERNEL);
        if (!s390_domain)
                return NULL;
@@ -84,14 +369,13 @@ static void s390_domain_free(struct iommu_domain *domain)
        call_rcu(&s390_domain->rcu, s390_iommu_rcu_free_domain);
 }
 
-static void __s390_iommu_detach_device(struct zpci_dev *zdev)
+static void s390_iommu_detach_device(struct iommu_domain *domain,
+                                    struct device *dev)
 {
-       struct s390_domain *s390_domain = zdev->s390_domain;
+       struct s390_domain *s390_domain = to_s390_domain(domain);
+       struct zpci_dev *zdev = to_zpci_dev(dev);
        unsigned long flags;
 
-       if (!s390_domain)
-               return;
-
        spin_lock_irqsave(&s390_domain->list_lock, flags);
        list_del_rcu(&zdev->iommu_list);
        spin_unlock_irqrestore(&s390_domain->list_lock, flags);
@@ -118,9 +402,7 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
                return -EINVAL;
 
        if (zdev->s390_domain)
-               __s390_iommu_detach_device(zdev);
-       else if (zdev->dma_table)
-               zpci_dma_exit_device(zdev);
+               s390_iommu_detach_device(&zdev->s390_domain->domain, dev);
 
        cc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
                                virt_to_phys(s390_domain->dma_table), &status);
@@ -130,7 +412,6 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
         */
        if (cc && status != ZPCI_PCI_ST_FUNC_NOT_AVAIL)
                return -EIO;
-       zdev->dma_table = s390_domain->dma_table;
 
        zdev->dma_table = s390_domain->dma_table;
        zdev->s390_domain = s390_domain;
@@ -142,14 +423,6 @@ static int s390_iommu_attach_device(struct iommu_domain *domain,
        return 0;
 }
 
-static void s390_iommu_set_platform_dma(struct device *dev)
-{
-       struct zpci_dev *zdev = to_zpci_dev(dev);
-
-       __s390_iommu_detach_device(zdev);
-       zpci_dma_init_device(zdev);
-}
-
 static void s390_iommu_get_resv_regions(struct device *dev,
                                        struct list_head *list)
 {
@@ -190,6 +463,9 @@ static struct iommu_device *s390_iommu_probe_device(struct device *dev)
        if (zdev->end_dma > ZPCI_TABLE_SIZE_RT - 1)
                zdev->end_dma = ZPCI_TABLE_SIZE_RT - 1;
 
+       if (zdev->tlb_refresh)
+               dev->iommu->shadow_on_flush = 1;
+
        return &zdev->iommu_dev;
 }
 
@@ -202,7 +478,13 @@ static void s390_iommu_release_device(struct device *dev)
         * to the device, but keep it attached to other devices in the group.
         */
        if (zdev)
-               __s390_iommu_detach_device(zdev);
+               s390_iommu_detach_device(&zdev->s390_domain->domain, dev);
+}
+
+static int zpci_refresh_all(struct zpci_dev *zdev)
+{
+       return zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
+                                 zdev->end_dma - zdev->start_dma + 1);
 }
 
 static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
@@ -212,8 +494,8 @@ static void s390_iommu_flush_iotlb_all(struct iommu_domain *domain)
 
        rcu_read_lock();
        list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
-               zpci_refresh_trans((u64)zdev->fh << 32, zdev->start_dma,
-                                  zdev->end_dma - zdev->start_dma + 1);
+               atomic64_inc(&s390_domain->ctrs.global_rpcits);
+               zpci_refresh_all(zdev);
        }
        rcu_read_unlock();
 }
@@ -231,26 +513,40 @@ static void s390_iommu_iotlb_sync(struct iommu_domain *domain,
 
        rcu_read_lock();
        list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
+               atomic64_inc(&s390_domain->ctrs.sync_rpcits);
                zpci_refresh_trans((u64)zdev->fh << 32, gather->start,
                                   size);
        }
        rcu_read_unlock();
 }
 
-static void s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
-                                     unsigned long iova, size_t size)
+static int s390_iommu_iotlb_sync_map(struct iommu_domain *domain,
+                                    unsigned long iova, size_t size)
 {
        struct s390_domain *s390_domain = to_s390_domain(domain);
        struct zpci_dev *zdev;
+       int ret = 0;
 
        rcu_read_lock();
        list_for_each_entry_rcu(zdev, &s390_domain->devices, iommu_list) {
                if (!zdev->tlb_refresh)
                        continue;
-               zpci_refresh_trans((u64)zdev->fh << 32,
-                                  iova, size);
+               atomic64_inc(&s390_domain->ctrs.sync_map_rpcits);
+               ret = zpci_refresh_trans((u64)zdev->fh << 32,
+                                        iova, size);
+               /*
+                * let the hypervisor discover invalidated entries
+                * allowing it to free IOVAs and unpin pages
+                */
+               if (ret == -ENOMEM) {
+                       ret = zpci_refresh_all(zdev);
+                       if (ret)
+                               break;
+               }
        }
        rcu_read_unlock();
+
+       return ret;
 }
 
 static int s390_iommu_validate_trans(struct s390_domain *s390_domain,
@@ -330,16 +626,15 @@ static int s390_iommu_map_pages(struct iommu_domain *domain,
        if (!IS_ALIGNED(iova | paddr, pgsize))
                return -EINVAL;
 
-       if (!(prot & IOMMU_READ))
-               return -EINVAL;
-
        if (!(prot & IOMMU_WRITE))
                flags |= ZPCI_TABLE_PROTECTED;
 
        rc = s390_iommu_validate_trans(s390_domain, paddr, iova,
-                                      pgcount, flags, gfp);
-       if (!rc)
+                                    pgcount, flags, gfp);
+       if (!rc) {
                *mapped = size;
+               atomic64_add(pgcount, &s390_domain->ctrs.mapped_pages);
+       }
 
        return rc;
 }
@@ -395,12 +690,26 @@ static size_t s390_iommu_unmap_pages(struct iommu_domain *domain,
                return 0;
 
        iommu_iotlb_gather_add_range(gather, iova, size);
+       atomic64_add(pgcount, &s390_domain->ctrs.unmapped_pages);
 
        return size;
 }
 
+static void s390_iommu_probe_finalize(struct device *dev)
+{
+       iommu_setup_dma_ops(dev, 0, U64_MAX);
+}
+
+struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev)
+{
+       if (!zdev || !zdev->s390_domain)
+               return NULL;
+       return &zdev->s390_domain->ctrs;
+}
+
 int zpci_init_iommu(struct zpci_dev *zdev)
 {
+       u64 aperture_size;
        int rc = 0;
 
        rc = iommu_device_sysfs_add(&zdev->iommu_dev, NULL, NULL,
@@ -412,6 +721,12 @@ int zpci_init_iommu(struct zpci_dev *zdev)
        if (rc)
                goto out_sysfs;
 
+       zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
+       aperture_size = min3(s390_iommu_aperture,
+                            ZPCI_TABLE_SIZE_RT - zdev->start_dma,
+                            zdev->end_dma - zdev->start_dma + 1);
+       zdev->end_dma = zdev->start_dma + aperture_size - 1;
+
        return 0;
 
 out_sysfs:
@@ -427,13 +742,52 @@ void zpci_destroy_iommu(struct zpci_dev *zdev)
        iommu_device_sysfs_remove(&zdev->iommu_dev);
 }
 
+static int __init s390_iommu_setup(char *str)
+{
+       if (!strcmp(str, "strict")) {
+               pr_warn("s390_iommu=strict deprecated; use iommu.strict=1 instead\n");
+               iommu_set_dma_strict();
+       }
+       return 1;
+}
+
+__setup("s390_iommu=", s390_iommu_setup);
+
+static int __init s390_iommu_aperture_setup(char *str)
+{
+       if (kstrtou32(str, 10, &s390_iommu_aperture_factor))
+               s390_iommu_aperture_factor = 1;
+       return 1;
+}
+
+__setup("s390_iommu_aperture=", s390_iommu_aperture_setup);
+
+static int __init s390_iommu_init(void)
+{
+       int rc;
+
+       iommu_dma_forcedac = true;
+       s390_iommu_aperture = (u64)virt_to_phys(high_memory);
+       if (!s390_iommu_aperture_factor)
+               s390_iommu_aperture = ULONG_MAX;
+       else
+               s390_iommu_aperture *= s390_iommu_aperture_factor;
+
+       rc = dma_alloc_cpu_table_caches();
+       if (rc)
+               return rc;
+
+       return rc;
+}
+subsys_initcall(s390_iommu_init);
+
 static const struct iommu_ops s390_iommu_ops = {
        .capable = s390_iommu_capable,
-       .domain_alloc = s390_domain_alloc,
+       .domain_alloc_paging = s390_domain_alloc_paging,
        .probe_device = s390_iommu_probe_device,
+       .probe_finalize = s390_iommu_probe_finalize,
        .release_device = s390_iommu_release_device,
        .device_group = generic_device_group,
-       .set_platform_dma_ops = s390_iommu_set_platform_dma,
        .pgsize_bitmap = SZ_4K,
        .get_resv_regions = s390_iommu_get_resv_regions,
        .default_domain_ops = &(const struct iommu_domain_ops) {
index 2fa9afebd4f5f023ef494e5e1e6d717de7c443c1..2eb9fb46703b3ae3d836b6c757a511f37d5998e5 100644 (file)
@@ -70,7 +70,6 @@ struct sprd_iommu_device {
        void __iomem            *base;
        struct device           *dev;
        struct iommu_device     iommu;
-       struct iommu_group      *group;
        struct clk              *eb;
 };
 
@@ -134,13 +133,10 @@ sprd_iommu_pgt_size(struct iommu_domain *domain)
                SPRD_IOMMU_PAGE_SHIFT) * sizeof(u32);
 }
 
-static struct iommu_domain *sprd_iommu_domain_alloc(unsigned int domain_type)
+static struct iommu_domain *sprd_iommu_domain_alloc_paging(struct device *dev)
 {
        struct sprd_iommu_domain *dom;
 
-       if (domain_type != IOMMU_DOMAIN_DMA && domain_type != IOMMU_DOMAIN_UNMANAGED)
-               return NULL;
-
        dom = kzalloc(sizeof(*dom), GFP_KERNEL);
        if (!dom)
                return NULL;
@@ -345,8 +341,8 @@ static size_t sprd_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
        return size;
 }
 
-static void sprd_iommu_sync_map(struct iommu_domain *domain,
-                               unsigned long iova, size_t size)
+static int sprd_iommu_sync_map(struct iommu_domain *domain,
+                              unsigned long iova, size_t size)
 {
        struct sprd_iommu_domain *dom = to_sprd_domain(domain);
        unsigned int reg;
@@ -358,6 +354,7 @@ static void sprd_iommu_sync_map(struct iommu_domain *domain,
 
        /* clear IOMMU TLB buffer after page table updated */
        sprd_iommu_write(dom->sdev, reg, 0xffffffff);
+       return 0;
 }
 
 static void sprd_iommu_sync(struct iommu_domain *domain,
@@ -399,13 +396,6 @@ static struct iommu_device *sprd_iommu_probe_device(struct device *dev)
        return &sdev->iommu;
 }
 
-static struct iommu_group *sprd_iommu_device_group(struct device *dev)
-{
-       struct sprd_iommu_device *sdev = dev_iommu_priv_get(dev);
-
-       return iommu_group_ref_get(sdev->group);
-}
-
 static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
 {
        struct platform_device *pdev;
@@ -421,9 +411,9 @@ static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
 
 
 static const struct iommu_ops sprd_iommu_ops = {
-       .domain_alloc   = sprd_iommu_domain_alloc,
+       .domain_alloc_paging = sprd_iommu_domain_alloc_paging,
        .probe_device   = sprd_iommu_probe_device,
-       .device_group   = sprd_iommu_device_group,
+       .device_group   = generic_single_device_group,
        .of_xlate       = sprd_iommu_of_xlate,
        .pgsize_bitmap  = SPRD_IOMMU_PAGE_SIZE,
        .owner          = THIS_MODULE,
@@ -496,16 +486,9 @@ static int sprd_iommu_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, sdev);
        sdev->dev = dev;
 
-       /* All the client devices are in the same iommu-group */
-       sdev->group = iommu_group_alloc();
-       if (IS_ERR(sdev->group)) {
-               ret = PTR_ERR(sdev->group);
-               goto free_page;
-       }
-
        ret = iommu_device_sysfs_add(&sdev->iommu, dev, NULL, dev_name(dev));
        if (ret)
-               goto put_group;
+               goto free_page;
 
        ret = iommu_device_register(&sdev->iommu, &sprd_iommu_ops, dev);
        if (ret)
@@ -530,8 +513,6 @@ unregister_iommu:
        iommu_device_unregister(&sdev->iommu);
 remove_sysfs:
        iommu_device_sysfs_remove(&sdev->iommu);
-put_group:
-       iommu_group_put(sdev->group);
 free_page:
        dma_free_coherent(sdev->dev, SPRD_IOMMU_PAGE_SIZE, sdev->prot_page_va, sdev->prot_page_pa);
        return ret;
@@ -543,9 +524,6 @@ static void sprd_iommu_remove(struct platform_device *pdev)
 
        dma_free_coherent(sdev->dev, SPRD_IOMMU_PAGE_SIZE, sdev->prot_page_va, sdev->prot_page_pa);
 
-       iommu_group_put(sdev->group);
-       sdev->group = NULL;
-
        platform_set_drvdata(pdev, NULL);
        iommu_device_sysfs_remove(&sdev->iommu);
        iommu_device_unregister(&sdev->iommu);
index 74c5cb93e9002748936e30f4083e2326077f6091..41484a5a399bb17c46a0efb8aa150350c8103a37 100644 (file)
@@ -107,7 +107,6 @@ struct sun50i_iommu {
        struct clk *clk;
 
        struct iommu_domain *domain;
-       struct iommu_group *group;
        struct kmem_cache *pt_pool;
 };
 
@@ -402,8 +401,8 @@ static void sun50i_iommu_flush_iotlb_all(struct iommu_domain *domain)
        spin_unlock_irqrestore(&iommu->iommu_lock, flags);
 }
 
-static void sun50i_iommu_iotlb_sync_map(struct iommu_domain *domain,
-                                       unsigned long iova, size_t size)
+static int sun50i_iommu_iotlb_sync_map(struct iommu_domain *domain,
+                                      unsigned long iova, size_t size)
 {
        struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
        struct sun50i_iommu *iommu = sun50i_domain->iommu;
@@ -412,6 +411,8 @@ static void sun50i_iommu_iotlb_sync_map(struct iommu_domain *domain,
        spin_lock_irqsave(&iommu->iommu_lock, flags);
        sun50i_iommu_zap_range(iommu, iova, size);
        spin_unlock_irqrestore(&iommu->iommu_lock, flags);
+
+       return 0;
 }
 
 static void sun50i_iommu_iotlb_sync(struct iommu_domain *domain,
@@ -589,7 +590,8 @@ static u32 *sun50i_dte_get_page_table(struct sun50i_iommu_domain *sun50i_domain,
 }
 
 static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long iova,
-                           phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+                           phys_addr_t paddr, size_t size, size_t count,
+                           int prot, gfp_t gfp, size_t *mapped)
 {
        struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
        struct sun50i_iommu *iommu = sun50i_domain->iommu;
@@ -616,13 +618,14 @@ static int sun50i_iommu_map(struct iommu_domain *domain, unsigned long iova,
 
        *pte_addr = sun50i_mk_pte(paddr, prot);
        sun50i_table_flush(sun50i_domain, pte_addr, 1);
+       *mapped = size;
 
 out:
        return ret;
 }
 
 static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
-                                size_t size, struct iommu_iotlb_gather *gather)
+                                size_t size, size_t count, struct iommu_iotlb_gather *gather)
 {
        struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
        phys_addr_t pt_phys;
@@ -667,14 +670,11 @@ static phys_addr_t sun50i_iommu_iova_to_phys(struct iommu_domain *domain,
                sun50i_iova_get_page_offset(iova);
 }
 
-static struct iommu_domain *sun50i_iommu_domain_alloc(unsigned type)
+static struct iommu_domain *
+sun50i_iommu_domain_alloc_paging(struct device *dev)
 {
        struct sun50i_iommu_domain *sun50i_domain;
 
-       if (type != IOMMU_DOMAIN_DMA &&
-           type != IOMMU_DOMAIN_UNMANAGED)
-               return NULL;
-
        sun50i_domain = kzalloc(sizeof(*sun50i_domain), GFP_KERNEL);
        if (!sun50i_domain)
                return NULL;
@@ -757,21 +757,32 @@ static void sun50i_iommu_detach_domain(struct sun50i_iommu *iommu,
        iommu->domain = NULL;
 }
 
-static void sun50i_iommu_detach_device(struct iommu_domain *domain,
-                                      struct device *dev)
+static int sun50i_iommu_identity_attach(struct iommu_domain *identity_domain,
+                                       struct device *dev)
 {
-       struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
        struct sun50i_iommu *iommu = dev_iommu_priv_get(dev);
+       struct sun50i_iommu_domain *sun50i_domain;
 
        dev_dbg(dev, "Detaching from IOMMU domain\n");
 
-       if (iommu->domain != domain)
-               return;
+       if (iommu->domain == identity_domain)
+               return 0;
 
+       sun50i_domain = to_sun50i_domain(iommu->domain);
        if (refcount_dec_and_test(&sun50i_domain->refcnt))
                sun50i_iommu_detach_domain(iommu, sun50i_domain);
+       return 0;
 }
 
+static struct iommu_domain_ops sun50i_iommu_identity_ops = {
+       .attach_dev = sun50i_iommu_identity_attach,
+};
+
+static struct iommu_domain sun50i_iommu_identity_domain = {
+       .type = IOMMU_DOMAIN_IDENTITY,
+       .ops = &sun50i_iommu_identity_ops,
+};
+
 static int sun50i_iommu_attach_device(struct iommu_domain *domain,
                                      struct device *dev)
 {
@@ -789,8 +800,7 @@ static int sun50i_iommu_attach_device(struct iommu_domain *domain,
        if (iommu->domain == domain)
                return 0;
 
-       if (iommu->domain)
-               sun50i_iommu_detach_device(iommu->domain, dev);
+       sun50i_iommu_identity_attach(&sun50i_iommu_identity_domain, dev);
 
        sun50i_iommu_attach_domain(iommu, sun50i_domain);
 
@@ -808,13 +818,6 @@ static struct iommu_device *sun50i_iommu_probe_device(struct device *dev)
        return &iommu->iommu;
 }
 
-static struct iommu_group *sun50i_iommu_device_group(struct device *dev)
-{
-       struct sun50i_iommu *iommu = sun50i_iommu_from_dev(dev);
-
-       return iommu_group_ref_get(iommu->group);
-}
-
 static int sun50i_iommu_of_xlate(struct device *dev,
                                 struct of_phandle_args *args)
 {
@@ -827,9 +830,10 @@ static int sun50i_iommu_of_xlate(struct device *dev,
 }
 
 static const struct iommu_ops sun50i_iommu_ops = {
+       .identity_domain = &sun50i_iommu_identity_domain,
        .pgsize_bitmap  = SZ_4K,
-       .device_group   = sun50i_iommu_device_group,
-       .domain_alloc   = sun50i_iommu_domain_alloc,
+       .device_group   = generic_single_device_group,
+       .domain_alloc_paging = sun50i_iommu_domain_alloc_paging,
        .of_xlate       = sun50i_iommu_of_xlate,
        .probe_device   = sun50i_iommu_probe_device,
        .default_domain_ops = &(const struct iommu_domain_ops) {
@@ -838,8 +842,8 @@ static const struct iommu_ops sun50i_iommu_ops = {
                .iotlb_sync_map = sun50i_iommu_iotlb_sync_map,
                .iotlb_sync     = sun50i_iommu_iotlb_sync,
                .iova_to_phys   = sun50i_iommu_iova_to_phys,
-               .map            = sun50i_iommu_map,
-               .unmap          = sun50i_iommu_unmap,
+               .map_pages      = sun50i_iommu_map,
+               .unmap_pages    = sun50i_iommu_unmap,
                .free           = sun50i_iommu_domain_free,
        }
 };
@@ -985,6 +989,7 @@ static int sun50i_iommu_probe(struct platform_device *pdev)
        if (!iommu)
                return -ENOMEM;
        spin_lock_init(&iommu->iommu_lock);
+       iommu->domain = &sun50i_iommu_identity_domain;
        platform_set_drvdata(pdev, iommu);
        iommu->dev = &pdev->dev;
 
@@ -995,42 +1000,36 @@ static int sun50i_iommu_probe(struct platform_device *pdev)
        if (!iommu->pt_pool)
                return -ENOMEM;
 
-       iommu->group = iommu_group_alloc();
-       if (IS_ERR(iommu->group)) {
-               ret = PTR_ERR(iommu->group);
-               goto err_free_cache;
-       }
-
        iommu->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(iommu->base)) {
                ret = PTR_ERR(iommu->base);
-               goto err_free_group;
+               goto err_free_cache;
        }
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
                ret = irq;
-               goto err_free_group;
+               goto err_free_cache;
        }
 
        iommu->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(iommu->clk)) {
                dev_err(&pdev->dev, "Couldn't get our clock.\n");
                ret = PTR_ERR(iommu->clk);
-               goto err_free_group;
+               goto err_free_cache;
        }
 
        iommu->reset = devm_reset_control_get(&pdev->dev, NULL);
        if (IS_ERR(iommu->reset)) {
                dev_err(&pdev->dev, "Couldn't get our reset line.\n");
                ret = PTR_ERR(iommu->reset);
-               goto err_free_group;
+               goto err_free_cache;
        }
 
        ret = iommu_device_sysfs_add(&iommu->iommu, &pdev->dev,
                                     NULL, dev_name(&pdev->dev));
        if (ret)
-               goto err_free_group;
+               goto err_free_cache;
 
        ret = iommu_device_register(&iommu->iommu, &sun50i_iommu_ops, &pdev->dev);
        if (ret)
@@ -1049,9 +1048,6 @@ err_unregister:
 err_remove_sysfs:
        iommu_device_sysfs_remove(&iommu->iommu);
 
-err_free_group:
-       iommu_group_put(iommu->group);
-
 err_free_cache:
        kmem_cache_destroy(iommu->pt_pool);
 
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
deleted file mode 100644 (file)
index a482ff8..0000000
+++ /dev/null
@@ -1,371 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * IOMMU API for Graphics Address Relocation Table on Tegra20
- *
- * Copyright (c) 2010-2012, NVIDIA CORPORATION.  All rights reserved.
- *
- * Author: Hiroshi DOYU <hdoyu@nvidia.com>
- */
-
-#define dev_fmt(fmt)   "gart: " fmt
-
-#include <linux/io.h>
-#include <linux/iommu.h>
-#include <linux/moduleparam.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/vmalloc.h>
-
-#include <soc/tegra/mc.h>
-
-#define GART_REG_BASE          0x24
-#define GART_CONFIG            (0x24 - GART_REG_BASE)
-#define GART_ENTRY_ADDR                (0x28 - GART_REG_BASE)
-#define GART_ENTRY_DATA                (0x2c - GART_REG_BASE)
-
-#define GART_ENTRY_PHYS_ADDR_VALID     BIT(31)
-
-#define GART_PAGE_SHIFT                12
-#define GART_PAGE_SIZE         (1 << GART_PAGE_SHIFT)
-#define GART_PAGE_MASK         GENMASK(30, GART_PAGE_SHIFT)
-
-/* bitmap of the page sizes currently supported */
-#define GART_IOMMU_PGSIZES     (GART_PAGE_SIZE)
-
-struct gart_device {
-       void __iomem            *regs;
-       u32                     *savedata;
-       unsigned long           iovmm_base;     /* offset to vmm_area start */
-       unsigned long           iovmm_end;      /* offset to vmm_area end */
-       spinlock_t              pte_lock;       /* for pagetable */
-       spinlock_t              dom_lock;       /* for active domain */
-       unsigned int            active_devices; /* number of active devices */
-       struct iommu_domain     *active_domain; /* current active domain */
-       struct iommu_device     iommu;          /* IOMMU Core handle */
-       struct device           *dev;
-};
-
-static struct gart_device *gart_handle; /* unique for a system */
-
-static bool gart_debug;
-
-/*
- * Any interaction between any block on PPSB and a block on APB or AHB
- * must have these read-back to ensure the APB/AHB bus transaction is
- * complete before initiating activity on the PPSB block.
- */
-#define FLUSH_GART_REGS(gart)  readl_relaxed((gart)->regs + GART_CONFIG)
-
-#define for_each_gart_pte(gart, iova)                                  \
-       for (iova = gart->iovmm_base;                                   \
-            iova < gart->iovmm_end;                                    \
-            iova += GART_PAGE_SIZE)
-
-static inline void gart_set_pte(struct gart_device *gart,
-                               unsigned long iova, unsigned long pte)
-{
-       writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
-       writel_relaxed(pte, gart->regs + GART_ENTRY_DATA);
-}
-
-static inline unsigned long gart_read_pte(struct gart_device *gart,
-                                         unsigned long iova)
-{
-       unsigned long pte;
-
-       writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
-       pte = readl_relaxed(gart->regs + GART_ENTRY_DATA);
-
-       return pte;
-}
-
-static void do_gart_setup(struct gart_device *gart, const u32 *data)
-{
-       unsigned long iova;
-
-       for_each_gart_pte(gart, iova)
-               gart_set_pte(gart, iova, data ? *(data++) : 0);
-
-       writel_relaxed(1, gart->regs + GART_CONFIG);
-       FLUSH_GART_REGS(gart);
-}
-
-static inline bool gart_iova_range_invalid(struct gart_device *gart,
-                                          unsigned long iova, size_t bytes)
-{
-       return unlikely(iova < gart->iovmm_base || bytes != GART_PAGE_SIZE ||
-                       iova + bytes > gart->iovmm_end);
-}
-
-static inline bool gart_pte_valid(struct gart_device *gart, unsigned long iova)
-{
-       return !!(gart_read_pte(gart, iova) & GART_ENTRY_PHYS_ADDR_VALID);
-}
-
-static int gart_iommu_attach_dev(struct iommu_domain *domain,
-                                struct device *dev)
-{
-       struct gart_device *gart = gart_handle;
-       int ret = 0;
-
-       spin_lock(&gart->dom_lock);
-
-       if (gart->active_domain && gart->active_domain != domain) {
-               ret = -EINVAL;
-       } else if (dev_iommu_priv_get(dev) != domain) {
-               dev_iommu_priv_set(dev, domain);
-               gart->active_domain = domain;
-               gart->active_devices++;
-       }
-
-       spin_unlock(&gart->dom_lock);
-
-       return ret;
-}
-
-static void gart_iommu_set_platform_dma(struct device *dev)
-{
-       struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
-       struct gart_device *gart = gart_handle;
-
-       spin_lock(&gart->dom_lock);
-
-       if (dev_iommu_priv_get(dev) == domain) {
-               dev_iommu_priv_set(dev, NULL);
-
-               if (--gart->active_devices == 0)
-                       gart->active_domain = NULL;
-       }
-
-       spin_unlock(&gart->dom_lock);
-}
-
-static struct iommu_domain *gart_iommu_domain_alloc(unsigned type)
-{
-       struct iommu_domain *domain;
-
-       if (type != IOMMU_DOMAIN_UNMANAGED)
-               return NULL;
-
-       domain = kzalloc(sizeof(*domain), GFP_KERNEL);
-       if (domain) {
-               domain->geometry.aperture_start = gart_handle->iovmm_base;
-               domain->geometry.aperture_end = gart_handle->iovmm_end - 1;
-               domain->geometry.force_aperture = true;
-       }
-
-       return domain;
-}
-
-static void gart_iommu_domain_free(struct iommu_domain *domain)
-{
-       WARN_ON(gart_handle->active_domain == domain);
-       kfree(domain);
-}
-
-static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova,
-                                  unsigned long pa)
-{
-       if (unlikely(gart_debug && gart_pte_valid(gart, iova))) {
-               dev_err(gart->dev, "Page entry is in-use\n");
-               return -EINVAL;
-       }
-
-       gart_set_pte(gart, iova, GART_ENTRY_PHYS_ADDR_VALID | pa);
-
-       return 0;
-}
-
-static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
-                         phys_addr_t pa, size_t bytes, int prot, gfp_t gfp)
-{
-       struct gart_device *gart = gart_handle;
-       int ret;
-
-       if (gart_iova_range_invalid(gart, iova, bytes))
-               return -EINVAL;
-
-       spin_lock(&gart->pte_lock);
-       ret = __gart_iommu_map(gart, iova, (unsigned long)pa);
-       spin_unlock(&gart->pte_lock);
-
-       return ret;
-}
-
-static inline int __gart_iommu_unmap(struct gart_device *gart,
-                                    unsigned long iova)
-{
-       if (unlikely(gart_debug && !gart_pte_valid(gart, iova))) {
-               dev_err(gart->dev, "Page entry is invalid\n");
-               return -EINVAL;
-       }
-
-       gart_set_pte(gart, iova, 0);
-
-       return 0;
-}
-
-static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
-                              size_t bytes, struct iommu_iotlb_gather *gather)
-{
-       struct gart_device *gart = gart_handle;
-       int err;
-
-       if (gart_iova_range_invalid(gart, iova, bytes))
-               return 0;
-
-       spin_lock(&gart->pte_lock);
-       err = __gart_iommu_unmap(gart, iova);
-       spin_unlock(&gart->pte_lock);
-
-       return err ? 0 : bytes;
-}
-
-static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
-                                          dma_addr_t iova)
-{
-       struct gart_device *gart = gart_handle;
-       unsigned long pte;
-
-       if (gart_iova_range_invalid(gart, iova, GART_PAGE_SIZE))
-               return -EINVAL;
-
-       spin_lock(&gart->pte_lock);
-       pte = gart_read_pte(gart, iova);
-       spin_unlock(&gart->pte_lock);
-
-       return pte & GART_PAGE_MASK;
-}
-
-static struct iommu_device *gart_iommu_probe_device(struct device *dev)
-{
-       if (!dev_iommu_fwspec_get(dev))
-               return ERR_PTR(-ENODEV);
-
-       return &gart_handle->iommu;
-}
-
-static int gart_iommu_of_xlate(struct device *dev,
-                              struct of_phandle_args *args)
-{
-       return 0;
-}
-
-static void gart_iommu_sync_map(struct iommu_domain *domain, unsigned long iova,
-                               size_t size)
-{
-       FLUSH_GART_REGS(gart_handle);
-}
-
-static void gart_iommu_sync(struct iommu_domain *domain,
-                           struct iommu_iotlb_gather *gather)
-{
-       size_t length = gather->end - gather->start + 1;
-
-       gart_iommu_sync_map(domain, gather->start, length);
-}
-
-static const struct iommu_ops gart_iommu_ops = {
-       .domain_alloc   = gart_iommu_domain_alloc,
-       .probe_device   = gart_iommu_probe_device,
-       .device_group   = generic_device_group,
-       .set_platform_dma_ops = gart_iommu_set_platform_dma,
-       .pgsize_bitmap  = GART_IOMMU_PGSIZES,
-       .of_xlate       = gart_iommu_of_xlate,
-       .default_domain_ops = &(const struct iommu_domain_ops) {
-               .attach_dev     = gart_iommu_attach_dev,
-               .map            = gart_iommu_map,
-               .unmap          = gart_iommu_unmap,
-               .iova_to_phys   = gart_iommu_iova_to_phys,
-               .iotlb_sync_map = gart_iommu_sync_map,
-               .iotlb_sync     = gart_iommu_sync,
-               .free           = gart_iommu_domain_free,
-       }
-};
-
-int tegra_gart_suspend(struct gart_device *gart)
-{
-       u32 *data = gart->savedata;
-       unsigned long iova;
-
-       /*
-        * All GART users shall be suspended at this point. Disable
-        * address translation to trap all GART accesses as invalid
-        * memory accesses.
-        */
-       writel_relaxed(0, gart->regs + GART_CONFIG);
-       FLUSH_GART_REGS(gart);
-
-       for_each_gart_pte(gart, iova)
-               *(data++) = gart_read_pte(gart, iova);
-
-       return 0;
-}
-
-int tegra_gart_resume(struct gart_device *gart)
-{
-       do_gart_setup(gart, gart->savedata);
-
-       return 0;
-}
-
-struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
-{
-       struct gart_device *gart;
-       struct resource *res;
-       int err;
-
-       BUILD_BUG_ON(PAGE_SHIFT != GART_PAGE_SHIFT);
-
-       /* the GART memory aperture is required */
-       res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 1);
-       if (!res) {
-               dev_err(dev, "Memory aperture resource unavailable\n");
-               return ERR_PTR(-ENXIO);
-       }
-
-       gart = kzalloc(sizeof(*gart), GFP_KERNEL);
-       if (!gart)
-               return ERR_PTR(-ENOMEM);
-
-       gart_handle = gart;
-
-       gart->dev = dev;
-       gart->regs = mc->regs + GART_REG_BASE;
-       gart->iovmm_base = res->start;
-       gart->iovmm_end = res->end + 1;
-       spin_lock_init(&gart->pte_lock);
-       spin_lock_init(&gart->dom_lock);
-
-       do_gart_setup(gart, NULL);
-
-       err = iommu_device_sysfs_add(&gart->iommu, dev, NULL, "gart");
-       if (err)
-               goto free_gart;
-
-       err = iommu_device_register(&gart->iommu, &gart_iommu_ops, dev);
-       if (err)
-               goto remove_sysfs;
-
-       gart->savedata = vmalloc(resource_size(res) / GART_PAGE_SIZE *
-                                sizeof(u32));
-       if (!gart->savedata) {
-               err = -ENOMEM;
-               goto unregister_iommu;
-       }
-
-       return gart;
-
-unregister_iommu:
-       iommu_device_unregister(&gart->iommu);
-remove_sysfs:
-       iommu_device_sysfs_remove(&gart->iommu);
-free_gart:
-       kfree(gart);
-
-       return ERR_PTR(err);
-}
-
-module_param(gart_debug, bool, 0644);
-MODULE_PARM_DESC(gart_debug, "Enable GART debugging");
index e445f80d02263b405f9ed9a8218418dd6c9585a5..310871728ab4b6f24d270e2f3bb0e2247df05d0b 100644 (file)
@@ -272,13 +272,10 @@ static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
        clear_bit(id, smmu->asids);
 }
 
-static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
+static struct iommu_domain *tegra_smmu_domain_alloc_paging(struct device *dev)
 {
        struct tegra_smmu_as *as;
 
-       if (type != IOMMU_DOMAIN_UNMANAGED)
-               return NULL;
-
        as = kzalloc(sizeof(*as), GFP_KERNEL);
        if (!as)
                return NULL;
@@ -511,23 +508,39 @@ disable:
        return err;
 }
 
-static void tegra_smmu_set_platform_dma(struct device *dev)
+static int tegra_smmu_identity_attach(struct iommu_domain *identity_domain,
+                                     struct device *dev)
 {
        struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
-       struct tegra_smmu_as *as = to_smmu_as(domain);
-       struct tegra_smmu *smmu = as->smmu;
+       struct tegra_smmu_as *as;
+       struct tegra_smmu *smmu;
        unsigned int index;
 
        if (!fwspec)
-               return;
+               return -ENODEV;
 
+       if (domain == identity_domain || !domain)
+               return 0;
+
+       as = to_smmu_as(domain);
+       smmu = as->smmu;
        for (index = 0; index < fwspec->num_ids; index++) {
                tegra_smmu_disable(smmu, fwspec->ids[index], as->id);
                tegra_smmu_as_unprepare(smmu, as);
        }
+       return 0;
 }
 
+static struct iommu_domain_ops tegra_smmu_identity_ops = {
+       .attach_dev = tegra_smmu_identity_attach,
+};
+
+static struct iommu_domain tegra_smmu_identity_domain = {
+       .type = IOMMU_DOMAIN_IDENTITY,
+       .ops = &tegra_smmu_identity_ops,
+};
+
 static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
                               u32 value)
 {
@@ -751,7 +764,8 @@ __tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
 }
 
 static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
-                         phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
+                         phys_addr_t paddr, size_t size, size_t count,
+                         int prot, gfp_t gfp, size_t *mapped)
 {
        struct tegra_smmu_as *as = to_smmu_as(domain);
        unsigned long flags;
@@ -761,11 +775,14 @@ static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
        ret = __tegra_smmu_map(domain, iova, paddr, size, prot, gfp, &flags);
        spin_unlock_irqrestore(&as->lock, flags);
 
+       if (!ret)
+               *mapped = size;
+
        return ret;
 }
 
 static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
-                              size_t size, struct iommu_iotlb_gather *gather)
+                              size_t size, size_t count, struct iommu_iotlb_gather *gather)
 {
        struct tegra_smmu_as *as = to_smmu_as(domain);
        unsigned long flags;
@@ -962,17 +979,28 @@ static int tegra_smmu_of_xlate(struct device *dev,
        return iommu_fwspec_add_ids(dev, &id, 1);
 }
 
+static int tegra_smmu_def_domain_type(struct device *dev)
+{
+       /*
+        * FIXME: For now we want to run all translation in IDENTITY mode, due
+        * to some device quirks. Better would be to just quirk the troubled
+        * devices.
+        */
+       return IOMMU_DOMAIN_IDENTITY;
+}
+
 static const struct iommu_ops tegra_smmu_ops = {
-       .domain_alloc = tegra_smmu_domain_alloc,
+       .identity_domain = &tegra_smmu_identity_domain,
+       .def_domain_type = &tegra_smmu_def_domain_type,
+       .domain_alloc_paging = tegra_smmu_domain_alloc_paging,
        .probe_device = tegra_smmu_probe_device,
        .device_group = tegra_smmu_device_group,
-       .set_platform_dma_ops = tegra_smmu_set_platform_dma,
        .of_xlate = tegra_smmu_of_xlate,
        .pgsize_bitmap = SZ_4K,
        .default_domain_ops = &(const struct iommu_domain_ops) {
                .attach_dev     = tegra_smmu_attach_dev,
-               .map            = tegra_smmu_map,
-               .unmap          = tegra_smmu_unmap,
+               .map_pages      = tegra_smmu_map,
+               .unmap_pages    = tegra_smmu_unmap,
                .iova_to_phys   = tegra_smmu_iova_to_phys,
                .free           = tegra_smmu_domain_free,
        }
@@ -1056,8 +1084,6 @@ DEFINE_SHOW_ATTRIBUTE(tegra_smmu_clients);
 static void tegra_smmu_debugfs_init(struct tegra_smmu *smmu)
 {
        smmu->debugfs = debugfs_create_dir("smmu", NULL);
-       if (!smmu->debugfs)
-               return;
 
        debugfs_create_file("swgroups", S_IRUGO, smmu->debugfs, smmu,
                            &tegra_smmu_swgroups_fops);
index 17dcd826f5c20e16b62daf837c2dc88ad5275d5f..379ebe03efb6d45b42afd8a63b4fcb830bb37903 100644 (file)
@@ -85,7 +85,7 @@ struct viommu_request {
        void                            *writeback;
        unsigned int                    write_offset;
        unsigned int                    len;
-       char                            buf[];
+       char                            buf[] __counted_by(len);
 };
 
 #define VIOMMU_FAULT_RESV_MASK         0xffffff00
@@ -230,7 +230,7 @@ static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len,
        if (write_offset <= 0)
                return -EINVAL;
 
-       req = kzalloc(sizeof(*req) + len, GFP_ATOMIC);
+       req = kzalloc(struct_size(req, buf, len), GFP_ATOMIC);
        if (!req)
                return -ENOMEM;
 
index 68d11ccee44120829051b803fe593827440ac0bf..98b0329b7154a4377afaa6b3cf5622e304dd2110 100644 (file)
@@ -39,8 +39,7 @@
 
 #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996    (1ULL << 0)
 #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539  (1ULL << 1)
-#define FLAGS_WORKAROUND_MTK_GICR_SAVE         (1ULL << 2)
-#define FLAGS_WORKAROUND_ASR_ERRATUM_8601001   (1ULL << 3)
+#define FLAGS_WORKAROUND_ASR_ERRATUM_8601001   (1ULL << 2)
 
 #define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
 
@@ -106,7 +105,7 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
  * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
  *   interrupt.
  */
-DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
+static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
 
 DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities);
 EXPORT_SYMBOL(gic_nonsecure_priorities);
@@ -1779,15 +1778,6 @@ static bool gic_enable_quirk_msm8996(void *data)
        return true;
 }
 
-static bool gic_enable_quirk_mtk_gicr(void *data)
-{
-       struct gic_chip_data *d = data;
-
-       d->flags |= FLAGS_WORKAROUND_MTK_GICR_SAVE;
-
-       return true;
-}
-
 static bool gic_enable_quirk_cavium_38539(void *data)
 {
        struct gic_chip_data *d = data;
@@ -1888,11 +1878,6 @@ static const struct gic_quirk gic_quirks[] = {
                .compatible = "asr,asr8601-gic-v3",
                .init   = gic_enable_quirk_asr8601,
        },
-       {
-               .desc   = "GICv3: Mediatek Chromebook GICR save problem",
-               .property = "mediatek,broken-save-restore-fw",
-               .init   = gic_enable_quirk_mtk_gicr,
-       },
        {
                .desc   = "GICv3: HIP06 erratum 161010803",
                .iidr   = 0x0204043b,
@@ -1959,11 +1944,6 @@ static void gic_enable_nmi_support(void)
        if (!gic_prio_masking_enabled())
                return;
 
-       if (gic_data.flags & FLAGS_WORKAROUND_MTK_GICR_SAVE) {
-               pr_warn("Skipping NMI enable due to firmware issues\n");
-               return;
-       }
-
        rdist_nmi_refs = kcalloc(gic_data.ppi_nr + SGI_NR,
                                 sizeof(*rdist_nmi_refs), GFP_KERNEL);
        if (!rdist_nmi_refs)
index bf03abb94e684bd55938bae4642428ea869c936a..68d82a682bf67f93b50c8df96fc463fcf92437f0 100644 (file)
@@ -1085,7 +1085,6 @@ static const struct pwm_ops lpg_pwm_ops = {
        .request = lpg_pwm_request,
        .apply = lpg_pwm_apply,
        .get_state = lpg_pwm_get_state,
-       .owner = THIS_MODULE,
 };
 
 static int lpg_add_pwm(struct lpg *lpg)
index d67db63b482d8073a89180f9ac4ce0a4f46415ed..778faeced81e1d6c409b74a551f8b755dbb48553 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/wait.h>
 #include <linux/platform_device.h>
+#include <linux/property.h>
 #include <linux/io.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/mailbox_controller.h>
 #include <linux/mailbox/brcm-message.h>
@@ -1494,7 +1493,6 @@ static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs)
 {
        struct device *dev = &pdev->dev;
        struct device_node *dn = pdev->dev.of_node;
-       const struct of_device_id *match;
        const int *hw_type;
        int err;
 
@@ -1509,11 +1507,9 @@ static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs)
 
        pdcs->hw_type = PDC_HW;
 
-       match = of_match_device(of_match_ptr(pdc_mbox_of_match), dev);
-       if (match != NULL) {
-               hw_type = match->data;
+       hw_type = device_get_match_data(dev);
+       if (hw_type)
                pdcs->hw_type = *hw_type;
-       }
 
        return 0;
 }
index 3ef4dd8adf5db1bd3f28a5c2560377007dde0e00..0af739ab571cd8736b47c0cee1d7b7620002dfea 100644 (file)
@@ -20,7 +20,9 @@
 #include <linux/suspend.h>
 #include <linux/slab.h>
 
-#define IMX_MU_CHANS           17
+#include "mailbox.h"
+
+#define IMX_MU_CHANS           24
 /* TX0/RX0/RXDB[0-3] */
 #define IMX_MU_SCU_CHANS       6
 /* TX0/RX0 */
@@ -39,6 +41,7 @@ enum imx_mu_chan_type {
        IMX_MU_TYPE_TXDB        = 2, /* Tx doorbell */
        IMX_MU_TYPE_RXDB        = 3, /* Rx doorbell */
        IMX_MU_TYPE_RST         = 4, /* Reset */
+       IMX_MU_TYPE_TXDB_V2     = 5, /* Tx doorbell with S/W ACK */
 };
 
 enum imx_mu_xcr {
@@ -226,6 +229,9 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv,
                imx_mu_xcr_rmw(priv, IMX_MU_GCR, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
                tasklet_schedule(&cp->txdb_tasklet);
                break;
+       case IMX_MU_TYPE_TXDB_V2:
+               imx_mu_xcr_rmw(priv, IMX_MU_GCR, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
+               break;
        default:
                dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
                return -EINVAL;
@@ -554,6 +560,9 @@ static int imx_mu_startup(struct mbox_chan *chan)
        int ret;
 
        pm_runtime_get_sync(priv->dev);
+       if (cp->type == IMX_MU_TYPE_TXDB_V2)
+               return 0;
+
        if (cp->type == IMX_MU_TYPE_TXDB) {
                /* Tx doorbell don't have ACK support */
                tasklet_init(&cp->txdb_tasklet, imx_mu_txdb_tasklet,
@@ -595,6 +604,11 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
        int ret;
        u32 sr;
 
+       if (cp->type == IMX_MU_TYPE_TXDB_V2) {
+               pm_runtime_put_sync(priv->dev);
+               return;
+       }
+
        if (cp->type == IMX_MU_TYPE_TXDB) {
                tasklet_kill(&cp->txdb_tasklet);
                pm_runtime_put_sync(priv->dev);
@@ -671,6 +685,7 @@ static struct mbox_chan *imx_mu_specific_xlate(struct mbox_controller *mbox,
 static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
                                       const struct of_phandle_args *sp)
 {
+       struct mbox_chan *p_chan;
        u32 type, idx, chan;
 
        if (sp->args_count != 2) {
@@ -680,14 +695,25 @@ static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
 
        type = sp->args[0]; /* channel type */
        idx = sp->args[1]; /* index */
-       chan = type * 4 + idx;
 
+       /* RST only supports 1 channel */
+       if ((type == IMX_MU_TYPE_RST) && idx) {
+               dev_err(mbox->dev, "Invalid RST channel %d\n", idx);
+               return ERR_PTR(-EINVAL);
+       }
+
+       chan = type * 4 + idx;
        if (chan >= mbox->num_chans) {
                dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
                return ERR_PTR(-EINVAL);
        }
 
-       return &mbox->chans[chan];
+       p_chan = &mbox->chans[chan];
+
+       if (type == IMX_MU_TYPE_TXDB_V2)
+               p_chan->txdone_method = TXDONE_BY_ACK;
+
+       return p_chan;
 }
 
 static struct mbox_chan *imx_mu_seco_xlate(struct mbox_controller *mbox,
index 823061dd8c8ea25d63ea65bfcd0ff2f5554131c7..b4b5bdd503cfa38cfea5813015391ac7a900f944 100644 (file)
@@ -17,8 +17,8 @@
 #include <linux/mailbox_controller.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/platform_device.h>
+#include <linux/property.h>
 #include <linux/slab.h>
 
 #include "mailbox.h"
@@ -403,7 +403,6 @@ MODULE_DEVICE_TABLE(of, sti_mailbox_match);
 
 static int sti_mbox_probe(struct platform_device *pdev)
 {
-       const struct of_device_id *match;
        struct mbox_controller *mbox;
        struct sti_mbox_device *mdev;
        struct device_node *np = pdev->dev.of_node;
@@ -411,12 +410,11 @@ static int sti_mbox_probe(struct platform_device *pdev)
        int irq;
        int ret;
 
-       match = of_match_device(sti_mailbox_match, &pdev->dev);
-       if (!match) {
+       pdev->dev.platform_data = (struct sti_mbox_pdata *)device_get_match_data(&pdev->dev);
+       if (!pdev->dev.platform_data) {
                dev_err(&pdev->dev, "No configuration found\n");
                return -ENODEV;
        }
-       pdev->dev.platform_data = (struct sti_mbox_pdata *) match->data;
 
        mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL);
        if (!mdev)
index 4d62b07c14118ab4e413a653792caac8ec18121e..de862e9137d5fa0c706c699e7186e44efaabd2b5 100644 (file)
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
 #include <linux/mailbox_controller.h>
 #include <linux/mailbox/mtk-cmdq-mailbox.h>
 #include <linux/of.h>
 
+#define CMDQ_MBOX_AUTOSUSPEND_DELAY_MS 100
+
 #define CMDQ_OP_CODE_MASK              (0xff << CMDQ_OP_CODE_SHIFT)
 #define CMDQ_NUM_CMD(t)                        (t->cmd_buf_size / CMDQ_INST_SIZE)
 #define CMDQ_GCE_NUM_MAX               (2)
@@ -283,10 +286,8 @@ static void cmdq_thread_irq_handler(struct cmdq *cmdq,
                        break;
        }
 
-       if (list_empty(&thread->task_busy_list)) {
+       if (list_empty(&thread->task_busy_list))
                cmdq_thread_disable(cmdq, thread);
-               clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
-       }
 }
 
 static irqreturn_t cmdq_irq_handler(int irq, void *dev)
@@ -307,9 +308,26 @@ static irqreturn_t cmdq_irq_handler(int irq, void *dev)
                spin_unlock_irqrestore(&thread->chan->lock, flags);
        }
 
+       pm_runtime_mark_last_busy(cmdq->mbox.dev);
+
        return IRQ_HANDLED;
 }
 
+static int cmdq_runtime_resume(struct device *dev)
+{
+       struct cmdq *cmdq = dev_get_drvdata(dev);
+
+       return clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks);
+}
+
+static int cmdq_runtime_suspend(struct device *dev)
+{
+       struct cmdq *cmdq = dev_get_drvdata(dev);
+
+       clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
+       return 0;
+}
+
 static int cmdq_suspend(struct device *dev)
 {
        struct cmdq *cmdq = dev_get_drvdata(dev);
@@ -333,16 +351,14 @@ static int cmdq_suspend(struct device *dev)
        if (cmdq->pdata->sw_ddr_en)
                cmdq_sw_ddr_enable(cmdq, false);
 
-       clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks);
-
-       return 0;
+       return pm_runtime_force_suspend(dev);
 }
 
 static int cmdq_resume(struct device *dev)
 {
        struct cmdq *cmdq = dev_get_drvdata(dev);
 
-       WARN_ON(clk_bulk_prepare(cmdq->pdata->gce_num, cmdq->clocks));
+       WARN_ON(pm_runtime_force_resume(dev));
        cmdq->suspended = false;
 
        if (cmdq->pdata->sw_ddr_en)
@@ -358,6 +374,9 @@ static int cmdq_remove(struct platform_device *pdev)
        if (cmdq->pdata->sw_ddr_en)
                cmdq_sw_ddr_enable(cmdq, false);
 
+       if (!IS_ENABLED(CONFIG_PM))
+               cmdq_runtime_suspend(&pdev->dev);
+
        clk_bulk_unprepare(cmdq->pdata->gce_num, cmdq->clocks);
        return 0;
 }
@@ -369,13 +388,20 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
        struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
        struct cmdq_task *task;
        unsigned long curr_pa, end_pa;
+       int ret;
 
        /* Client should not flush new tasks if suspended. */
        WARN_ON(cmdq->suspended);
 
+       ret = pm_runtime_get_sync(cmdq->mbox.dev);
+       if (ret < 0)
+               return ret;
+
        task = kzalloc(sizeof(*task), GFP_ATOMIC);
-       if (!task)
+       if (!task) {
+               pm_runtime_put_autosuspend(cmdq->mbox.dev);
                return -ENOMEM;
+       }
 
        task->cmdq = cmdq;
        INIT_LIST_HEAD(&task->list_entry);
@@ -384,8 +410,6 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
        task->pkt = pkt;
 
        if (list_empty(&thread->task_busy_list)) {
-               WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks));
-
                /*
                 * The thread reset will clear thread related register to 0,
                 * including pc, end, priority, irq, suspend and enable. Thus
@@ -424,6 +448,9 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
        }
        list_move_tail(&task->list_entry, &thread->task_busy_list);
 
+       pm_runtime_mark_last_busy(cmdq->mbox.dev);
+       pm_runtime_put_autosuspend(cmdq->mbox.dev);
+
        return 0;
 }
 
@@ -439,6 +466,8 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan)
        struct cmdq_task *task, *tmp;
        unsigned long flags;
 
+       WARN_ON(pm_runtime_get_sync(cmdq->mbox.dev));
+
        spin_lock_irqsave(&thread->chan->lock, flags);
        if (list_empty(&thread->task_busy_list))
                goto done;
@@ -457,7 +486,6 @@ static void cmdq_mbox_shutdown(struct mbox_chan *chan)
        }
 
        cmdq_thread_disable(cmdq, thread);
-       clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
 
 done:
        /*
@@ -467,6 +495,9 @@ done:
         * to do any operation here, only unlock and leave.
         */
        spin_unlock_irqrestore(&thread->chan->lock, flags);
+
+       pm_runtime_mark_last_busy(cmdq->mbox.dev);
+       pm_runtime_put_autosuspend(cmdq->mbox.dev);
 }
 
 static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
@@ -477,6 +508,11 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
        struct cmdq_task *task, *tmp;
        unsigned long flags;
        u32 enable;
+       int ret;
+
+       ret = pm_runtime_get_sync(cmdq->mbox.dev);
+       if (ret < 0)
+               return ret;
 
        spin_lock_irqsave(&thread->chan->lock, flags);
        if (list_empty(&thread->task_busy_list))
@@ -497,10 +533,12 @@ static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
 
        cmdq_thread_resume(thread);
        cmdq_thread_disable(cmdq, thread);
-       clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks);
 
 out:
        spin_unlock_irqrestore(&thread->chan->lock, flags);
+       pm_runtime_mark_last_busy(cmdq->mbox.dev);
+       pm_runtime_put_autosuspend(cmdq->mbox.dev);
+
        return 0;
 
 wait:
@@ -513,6 +551,8 @@ wait:
 
                return -EFAULT;
        }
+       pm_runtime_mark_last_busy(cmdq->mbox.dev);
+       pm_runtime_put_autosuspend(cmdq->mbox.dev);
        return 0;
 }
 
@@ -642,12 +682,28 @@ static int cmdq_probe(struct platform_device *pdev)
                return err;
        }
 
+       /* If Runtime PM is not available enable the clocks now. */
+       if (!IS_ENABLED(CONFIG_PM)) {
+               err = cmdq_runtime_resume(dev);
+               if (err)
+                       return err;
+       }
+
+       err = devm_pm_runtime_enable(dev);
+       if (err)
+               return err;
+
+       pm_runtime_set_autosuspend_delay(dev, CMDQ_MBOX_AUTOSUSPEND_DELAY_MS);
+       pm_runtime_use_autosuspend(dev);
+
        return 0;
 }
 
 static const struct dev_pm_ops cmdq_pm_ops = {
        .suspend = cmdq_suspend,
        .resume = cmdq_resume,
+       SET_RUNTIME_PM_OPS(cmdq_runtime_suspend,
+                          cmdq_runtime_resume, NULL)
 };
 
 static const struct gce_plat gce_plat_v2 = {
index a94577f16a4730ab3f20e9100251806d1632a692..9d2d4ff6cda403b1dae8f195fe223db2ed28b657 100644 (file)
 #include <linux/kernel.h>
 #include <linux/mailbox_controller.h>
 #include <linux/module.h>
-#include <linux/of_device.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/platform_device.h>
+#include <linux/property.h>
 #include <linux/soc/ti/ti-msgmgr.h>
 
 #define Q_DATA_OFFSET(proxy, queue, reg)       \
@@ -810,7 +810,6 @@ MODULE_DEVICE_TABLE(of, ti_msgmgr_of_match);
 static int ti_msgmgr_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       const struct of_device_id *of_id;
        struct device_node *np;
        const struct ti_msgmgr_desc *desc;
        struct ti_msgmgr_inst *inst;
@@ -828,19 +827,12 @@ static int ti_msgmgr_probe(struct platform_device *pdev)
        }
        np = dev->of_node;
 
-       of_id = of_match_device(ti_msgmgr_of_match, dev);
-       if (!of_id) {
-               dev_err(dev, "OF data missing\n");
-               return -EINVAL;
-       }
-       desc = of_id->data;
-
        inst = devm_kzalloc(dev, sizeof(*inst), GFP_KERNEL);
        if (!inst)
                return -ENOMEM;
 
        inst->dev = dev;
-       inst->desc = desc;
+       inst->desc = desc = device_get_match_data(dev);
 
        inst->queue_proxy_region =
                devm_platform_ioremap_resource_byname(pdev, desc->data_region_name);
index 26d2bc7783944e8809439b61d74d958100f0a9d1..a51e98ab4958dd2d1b447559cbf8075f3fd354bb 100644 (file)
@@ -6,7 +6,7 @@
 # Please keep it in alphabetic order
 obj-$(CONFIG_CEC_CROS_EC) += cros-ec/
 obj-$(CONFIG_CEC_GPIO) += cec-gpio/
-obj-$(CONFIG_CEC_MESON_AO) += meson/
+obj-y += meson/
 obj-$(CONFIG_CEC_SAMSUNG_S5P) += s5p/
 obj-$(CONFIG_CEC_SECO) += seco/
 obj-$(CONFIG_CEC_STI) += sti/
index c17faf002877d463ba7b24fbb6cbb84f964395c0..42dde3f0dbdef5d30629c7b8989fbe56b3d9e739 100644 (file)
 #define DRV_NAME       "cros-ec-cec"
 
 /**
- * struct cros_ec_cec - Driver data for EC CEC
+ * struct cros_ec_cec_port - Driver data for a single EC CEC port
  *
- * @cros_ec: Pointer to EC device
- * @notifier: Notifier info for responding to EC events
+ * @port_num: port number
  * @adap: CEC adapter
  * @notify: CEC notifier pointer
  * @rx_msg: storage for a received message
+ * @cros_ec_cec: pointer to the parent struct
  */
-struct cros_ec_cec {
-       struct cros_ec_device *cros_ec;
-       struct notifier_block notifier;
+struct cros_ec_cec_port {
+       int port_num;
        struct cec_adapter *adap;
        struct cec_notifier *notify;
        struct cec_msg rx_msg;
+       struct cros_ec_cec *cros_ec_cec;
+};
+
+/**
+ * struct cros_ec_cec - Driver data for EC CEC
+ *
+ * @cros_ec: Pointer to EC device
+ * @notifier: Notifier info for responding to EC events
+ * @write_cmd_version: Highest supported version of EC_CMD_CEC_WRITE_MSG.
+ * @num_ports: Number of CEC ports
+ * @ports: Array of ports
+ */
+struct cros_ec_cec {
+       struct cros_ec_device *cros_ec;
+       struct notifier_block notifier;
+       int write_cmd_version;
+       int num_ports;
+       struct cros_ec_cec_port *ports[EC_CEC_MAX_PORTS];
 };
 
+static void cros_ec_cec_received_message(struct cros_ec_cec_port *port,
+                                        uint8_t *msg, uint8_t len)
+{
+       if (len > CEC_MAX_MSG_SIZE)
+               len = CEC_MAX_MSG_SIZE;
+
+       port->rx_msg.len = len;
+       memcpy(port->rx_msg.msg, msg, len);
+
+       cec_received_msg(port->adap, &port->rx_msg);
+}
+
 static void handle_cec_message(struct cros_ec_cec *cros_ec_cec)
 {
        struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
        uint8_t *cec_message = cros_ec->event_data.data.cec_message;
        unsigned int len = cros_ec->event_size;
+       struct cros_ec_cec_port *port;
+       /*
+        * There are two ways of receiving CEC messages:
+        * 1. Old EC firmware which only supports one port sends the data in a
+        *    cec_message MKBP event.
+        * 2. New EC firmware which supports multiple ports uses
+        *    EC_MKBP_CEC_HAVE_DATA to notify that data is ready and
+        *    EC_CMD_CEC_READ_MSG to read it.
+        * Check that the EC only has one CEC port, and then we can assume the
+        * message is from port 0.
+        */
+       if (cros_ec_cec->num_ports != 1) {
+               dev_err(cros_ec->dev,
+                       "received cec_message on device with %d ports\n",
+                       cros_ec_cec->num_ports);
+               return;
+       }
+       port = cros_ec_cec->ports[0];
 
-       if (len > CEC_MAX_MSG_SIZE)
-               len = CEC_MAX_MSG_SIZE;
-       cros_ec_cec->rx_msg.len = len;
-       memcpy(cros_ec_cec->rx_msg.msg, cec_message, len);
+       cros_ec_cec_received_message(port, cec_message, len);
+}
+
+static void cros_ec_cec_read_message(struct cros_ec_cec_port *port)
+{
+       struct cros_ec_device *cros_ec = port->cros_ec_cec->cros_ec;
+       struct ec_params_cec_read params = {
+               .port = port->port_num,
+       };
+       struct ec_response_cec_read response;
+       int ret;
 
-       cec_received_msg(cros_ec_cec->adap, &cros_ec_cec->rx_msg);
+       ret = cros_ec_cmd(cros_ec, 0, EC_CMD_CEC_READ_MSG, &params,
+                         sizeof(params), &response, sizeof(response));
+       if (ret < 0) {
+               dev_err(cros_ec->dev,
+                       "error reading CEC message on EC: %d\n", ret);
+               return;
+       }
+
+       cros_ec_cec_received_message(port, response.msg, response.msg_len);
 }
 
 static void handle_cec_event(struct cros_ec_cec *cros_ec_cec)
 {
        struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
-       uint32_t events = cros_ec->event_data.data.cec_events;
+       uint32_t cec_events = cros_ec->event_data.data.cec_events;
+       uint32_t port_num = EC_MKBP_EVENT_CEC_GET_PORT(cec_events);
+       uint32_t events = EC_MKBP_EVENT_CEC_GET_EVENTS(cec_events);
+       struct cros_ec_cec_port *port;
+
+       if (port_num >= cros_ec_cec->num_ports) {
+               dev_err(cros_ec->dev,
+                       "received CEC event for invalid port %d\n", port_num);
+               return;
+       }
+       port = cros_ec_cec->ports[port_num];
 
        if (events & EC_MKBP_CEC_SEND_OK)
-               cec_transmit_attempt_done(cros_ec_cec->adap,
-                                         CEC_TX_STATUS_OK);
+               cec_transmit_attempt_done(port->adap, CEC_TX_STATUS_OK);
 
        /* FW takes care of all retries, tell core to avoid more retries */
        if (events & EC_MKBP_CEC_SEND_FAILED)
-               cec_transmit_attempt_done(cros_ec_cec->adap,
+               cec_transmit_attempt_done(port->adap,
                                          CEC_TX_STATUS_MAX_RETRIES |
                                          CEC_TX_STATUS_NACK);
+
+       if (events & EC_MKBP_CEC_HAVE_DATA)
+               cros_ec_cec_read_message(port);
 }
 
 static int cros_ec_cec_event(struct notifier_block *nb,
@@ -93,20 +167,18 @@ static int cros_ec_cec_event(struct notifier_block *nb,
 
 static int cros_ec_cec_set_log_addr(struct cec_adapter *adap, u8 logical_addr)
 {
-       struct cros_ec_cec *cros_ec_cec = adap->priv;
+       struct cros_ec_cec_port *port = adap->priv;
+       struct cros_ec_cec *cros_ec_cec = port->cros_ec_cec;
        struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
-       struct {
-               struct cros_ec_command msg;
-               struct ec_params_cec_set data;
-       } __packed msg = {};
+       struct ec_params_cec_set params = {
+               .cmd = CEC_CMD_LOGICAL_ADDRESS,
+               .port = port->port_num,
+               .val = logical_addr,
+       };
        int ret;
 
-       msg.msg.command = EC_CMD_CEC_SET;
-       msg.msg.outsize = sizeof(msg.data);
-       msg.data.cmd = CEC_CMD_LOGICAL_ADDRESS;
-       msg.data.val = logical_addr;
-
-       ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg);
+       ret = cros_ec_cmd(cros_ec, 0, EC_CMD_CEC_SET, &params, sizeof(params),
+                         NULL, 0);
        if (ret < 0) {
                dev_err(cros_ec->dev,
                        "error setting CEC logical address on EC: %d\n", ret);
@@ -119,19 +191,26 @@ static int cros_ec_cec_set_log_addr(struct cec_adapter *adap, u8 logical_addr)
 static int cros_ec_cec_transmit(struct cec_adapter *adap, u8 attempts,
                                u32 signal_free_time, struct cec_msg *cec_msg)
 {
-       struct cros_ec_cec *cros_ec_cec = adap->priv;
+       struct cros_ec_cec_port *port = adap->priv;
+       struct cros_ec_cec *cros_ec_cec = port->cros_ec_cec;
        struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
-       struct {
-               struct cros_ec_command msg;
-               struct ec_params_cec_write data;
-       } __packed msg = {};
+       struct ec_params_cec_write params;
+       struct ec_params_cec_write_v1 params_v1;
        int ret;
 
-       msg.msg.command = EC_CMD_CEC_WRITE_MSG;
-       msg.msg.outsize = cec_msg->len;
-       memcpy(msg.data.msg, cec_msg->msg, cec_msg->len);
+       if (cros_ec_cec->write_cmd_version == 0) {
+               memcpy(params.msg, cec_msg->msg, cec_msg->len);
+               ret = cros_ec_cmd(cros_ec, 0, EC_CMD_CEC_WRITE_MSG, &params,
+                                 cec_msg->len, NULL, 0);
+       } else {
+               params_v1.port = port->port_num;
+               params_v1.msg_len = cec_msg->len;
+               memcpy(params_v1.msg, cec_msg->msg, cec_msg->len);
+               ret = cros_ec_cmd(cros_ec, cros_ec_cec->write_cmd_version,
+                                 EC_CMD_CEC_WRITE_MSG, &params_v1,
+                                 sizeof(params_v1), NULL, 0);
+       }
 
-       ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg);
        if (ret < 0) {
                dev_err(cros_ec->dev,
                        "error writing CEC msg on EC: %d\n", ret);
@@ -143,20 +222,18 @@ static int cros_ec_cec_transmit(struct cec_adapter *adap, u8 attempts,
 
 static int cros_ec_cec_adap_enable(struct cec_adapter *adap, bool enable)
 {
-       struct cros_ec_cec *cros_ec_cec = adap->priv;
+       struct cros_ec_cec_port *port = adap->priv;
+       struct cros_ec_cec *cros_ec_cec = port->cros_ec_cec;
        struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
-       struct {
-               struct cros_ec_command msg;
-               struct ec_params_cec_set data;
-       } __packed msg = {};
+       struct ec_params_cec_set params = {
+               .cmd = CEC_CMD_ENABLE,
+               .port = port->port_num,
+               .val = enable,
+       };
        int ret;
 
-       msg.msg.command = EC_CMD_CEC_SET;
-       msg.msg.outsize = sizeof(msg.data);
-       msg.data.cmd = CEC_CMD_ENABLE;
-       msg.data.val = enable;
-
-       ret = cros_ec_cmd_xfer_status(cros_ec, &msg.msg);
+       ret = cros_ec_cmd(cros_ec, 0, EC_CMD_CEC_SET, &params, sizeof(params),
+                         NULL, 0);
        if (ret < 0) {
                dev_err(cros_ec->dev,
                        "error %sabling CEC on EC: %d\n",
@@ -203,38 +280,54 @@ static SIMPLE_DEV_PM_OPS(cros_ec_cec_pm_ops,
 #if IS_ENABLED(CONFIG_PCI) && IS_ENABLED(CONFIG_DMI)
 
 /*
- * The Firmware only handles a single CEC interface tied to a single HDMI
- * connector we specify along with the DRM device name handling the HDMI output
+ * Specify the DRM device name handling the HDMI output and the HDMI connector
+ * corresponding to each CEC port. The order of connectors must match the order
+ * in the EC (first connector is EC port 0, ...), and the number of connectors
+ * must match the number of ports in the EC (which can be queried using the
+ * EC_CMD_CEC_PORT_COUNT host command).
  */
 
 struct cec_dmi_match {
        const char *sys_vendor;
        const char *product_name;
        const char *devname;
-       const char *conn;
+       const char *const *conns;
 };
 
+static const char *const port_b_conns[] = { "Port B", NULL };
+static const char *const port_db_conns[] = { "Port D", "Port B", NULL };
+static const char *const port_ba_conns[] = { "Port B", "Port A", NULL };
+static const char *const port_d_conns[] = { "Port D", NULL };
+
 static const struct cec_dmi_match cec_dmi_match_table[] = {
        /* Google Fizz */
-       { "Google", "Fizz", "0000:00:02.0", "Port B" },
+       { "Google", "Fizz", "0000:00:02.0", port_b_conns },
        /* Google Brask */
-       { "Google", "Brask", "0000:00:02.0", "Port B" },
+       { "Google", "Brask", "0000:00:02.0", port_b_conns },
        /* Google Moli */
-       { "Google", "Moli", "0000:00:02.0", "Port B" },
+       { "Google", "Moli", "0000:00:02.0", port_b_conns },
        /* Google Kinox */
-       { "Google", "Kinox", "0000:00:02.0", "Port B" },
+       { "Google", "Kinox", "0000:00:02.0", port_b_conns },
        /* Google Kuldax */
-       { "Google", "Kuldax", "0000:00:02.0", "Port B" },
+       { "Google", "Kuldax", "0000:00:02.0", port_b_conns },
        /* Google Aurash */
-       { "Google", "Aurash", "0000:00:02.0", "Port B" },
+       { "Google", "Aurash", "0000:00:02.0", port_b_conns },
        /* Google Gladios */
-       { "Google", "Gladios", "0000:00:02.0", "Port B" },
+       { "Google", "Gladios", "0000:00:02.0", port_b_conns },
        /* Google Lisbon */
-       { "Google", "Lisbon", "0000:00:02.0", "Port B" },
+       { "Google", "Lisbon", "0000:00:02.0", port_b_conns },
+       /* Google Dibbi */
+       { "Google", "Dibbi", "0000:00:02.0", port_db_conns },
+       /* Google Constitution */
+       { "Google", "Constitution", "0000:00:02.0", port_ba_conns },
+       /* Google Boxy */
+       { "Google", "Boxy", "0000:00:02.0", port_d_conns },
+       /* Google Taranza */
+       { "Google", "Taranza", "0000:00:02.0", port_db_conns },
 };
 
 static struct device *cros_ec_cec_find_hdmi_dev(struct device *dev,
-                                               const char **conn)
+                                               const char * const **conns)
 {
        int i;
 
@@ -251,7 +344,7 @@ static struct device *cros_ec_cec_find_hdmi_dev(struct device *dev,
                        if (!d)
                                return ERR_PTR(-EPROBE_DEFER);
                        put_device(d);
-                       *conn = m->conn;
+                       *conns = m->conns;
                        return d;
                }
        }
@@ -265,23 +358,137 @@ static struct device *cros_ec_cec_find_hdmi_dev(struct device *dev,
 #else
 
 static struct device *cros_ec_cec_find_hdmi_dev(struct device *dev,
-                                               const char **conn)
+                                               const char * const **conns)
 {
        return ERR_PTR(-ENODEV);
 }
 
 #endif
 
+static int cros_ec_cec_get_num_ports(struct cros_ec_cec *cros_ec_cec)
+{
+       struct ec_response_cec_port_count response;
+       int ret;
+
+       ret = cros_ec_cmd(cros_ec_cec->cros_ec, 0, EC_CMD_CEC_PORT_COUNT, NULL,
+                         0, &response, sizeof(response));
+       if (ret < 0) {
+               /*
+                * Old EC firmware only supports one port and does not support
+                * the port count command, so fall back to assuming one port.
+                */
+               cros_ec_cec->num_ports = 1;
+               return 0;
+       }
+
+       if (response.port_count == 0) {
+               dev_err(cros_ec_cec->cros_ec->dev,
+                       "EC reports 0 CEC ports\n");
+               return -ENODEV;
+       }
+
+       if (response.port_count > EC_CEC_MAX_PORTS) {
+               dev_err(cros_ec_cec->cros_ec->dev,
+                       "EC reports too many ports: %d\n", response.port_count);
+               return -EINVAL;
+       }
+
+       cros_ec_cec->num_ports = response.port_count;
+       return 0;
+}
+
+static int cros_ec_cec_get_write_cmd_version(struct cros_ec_cec *cros_ec_cec)
+{
+       struct cros_ec_device *cros_ec = cros_ec_cec->cros_ec;
+       struct ec_params_get_cmd_versions_v1 params = {
+               .cmd = EC_CMD_CEC_WRITE_MSG,
+       };
+       struct ec_response_get_cmd_versions response;
+       int ret;
+
+       ret = cros_ec_cmd(cros_ec, 1, EC_CMD_GET_CMD_VERSIONS, &params,
+                         sizeof(params), &response, sizeof(response));
+       if (ret < 0) {
+               dev_err(cros_ec->dev,
+                       "error getting CEC write command version: %d\n", ret);
+               return ret;
+       }
+
+       if (response.version_mask & EC_VER_MASK(1)) {
+               cros_ec_cec->write_cmd_version = 1;
+       } else {
+               if (cros_ec_cec->num_ports != 1) {
+                       dev_err(cros_ec->dev,
+                               "v0 write command only supports 1 port, %d reported\n",
+                               cros_ec_cec->num_ports);
+                       return -EINVAL;
+               }
+               cros_ec_cec->write_cmd_version = 0;
+       }
+
+       return 0;
+}
+
+static int cros_ec_cec_init_port(struct device *dev,
+                                struct cros_ec_cec *cros_ec_cec,
+                                int port_num, struct device *hdmi_dev,
+                                const char * const *conns)
+{
+       struct cros_ec_cec_port *port;
+       int ret;
+
+       port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+       if (!port)
+               return -ENOMEM;
+
+       port->cros_ec_cec = cros_ec_cec;
+       port->port_num = port_num;
+
+       port->adap = cec_allocate_adapter(&cros_ec_cec_ops, port, DRV_NAME,
+                                         CEC_CAP_DEFAULTS |
+                                         CEC_CAP_CONNECTOR_INFO, 1);
+       if (IS_ERR(port->adap))
+               return PTR_ERR(port->adap);
+
+       if (!conns[port_num]) {
+               dev_err(dev, "no conn for port %d\n", port_num);
+               ret = -ENODEV;
+               goto out_probe_adapter;
+       }
+
+       port->notify = cec_notifier_cec_adap_register(hdmi_dev, conns[port_num],
+                                                     port->adap);
+       if (!port->notify) {
+               ret = -ENOMEM;
+               goto out_probe_adapter;
+       }
+
+       ret = cec_register_adapter(port->adap, dev);
+       if (ret < 0)
+               goto out_probe_notify;
+
+       cros_ec_cec->ports[port_num] = port;
+
+       return 0;
+
+out_probe_notify:
+       cec_notifier_cec_adap_unregister(port->notify, port->adap);
+out_probe_adapter:
+       cec_delete_adapter(port->adap);
+       return ret;
+}
+
 static int cros_ec_cec_probe(struct platform_device *pdev)
 {
        struct cros_ec_dev *ec_dev = dev_get_drvdata(pdev->dev.parent);
        struct cros_ec_device *cros_ec = ec_dev->ec_dev;
        struct cros_ec_cec *cros_ec_cec;
+       struct cros_ec_cec_port *port;
        struct device *hdmi_dev;
-       const char *conn = NULL;
+       const char * const *conns = NULL;
        int ret;
 
-       hdmi_dev = cros_ec_cec_find_hdmi_dev(&pdev->dev, &conn);
+       hdmi_dev = cros_ec_cec_find_hdmi_dev(&pdev->dev, &conns);
        if (IS_ERR(hdmi_dev))
                return PTR_ERR(hdmi_dev);
 
@@ -295,18 +502,19 @@ static int cros_ec_cec_probe(struct platform_device *pdev)
 
        device_init_wakeup(&pdev->dev, 1);
 
-       cros_ec_cec->adap = cec_allocate_adapter(&cros_ec_cec_ops, cros_ec_cec,
-                                                DRV_NAME,
-                                                CEC_CAP_DEFAULTS |
-                                                CEC_CAP_CONNECTOR_INFO, 1);
-       if (IS_ERR(cros_ec_cec->adap))
-               return PTR_ERR(cros_ec_cec->adap);
+       ret = cros_ec_cec_get_num_ports(cros_ec_cec);
+       if (ret)
+               return ret;
 
-       cros_ec_cec->notify = cec_notifier_cec_adap_register(hdmi_dev, conn,
-                                                            cros_ec_cec->adap);
-       if (!cros_ec_cec->notify) {
-               ret = -ENOMEM;
-               goto out_probe_adapter;
+       ret = cros_ec_cec_get_write_cmd_version(cros_ec_cec);
+       if (ret)
+               return ret;
+
+       for (int i = 0; i < cros_ec_cec->num_ports; i++) {
+               ret = cros_ec_cec_init_port(&pdev->dev, cros_ec_cec, i,
+                                           hdmi_dev, conns);
+               if (ret)
+                       goto unregister_ports;
        }
 
        /* Get CEC events from the EC. */
@@ -315,20 +523,24 @@ static int cros_ec_cec_probe(struct platform_device *pdev)
                                               &cros_ec_cec->notifier);
        if (ret) {
                dev_err(&pdev->dev, "failed to register notifier\n");
-               goto out_probe_notify;
+               goto unregister_ports;
        }
 
-       ret = cec_register_adapter(cros_ec_cec->adap, &pdev->dev);
-       if (ret < 0)
-               goto out_probe_notify;
-
        return 0;
 
-out_probe_notify:
-       cec_notifier_cec_adap_unregister(cros_ec_cec->notify,
-                                        cros_ec_cec->adap);
-out_probe_adapter:
-       cec_delete_adapter(cros_ec_cec->adap);
+unregister_ports:
+       /*
+        * Unregister any adapters which have been registered. We don't add the
+        * port to the array until the adapter has been registered successfully,
+        * so any non-NULL ports must have been registered.
+        */
+       for (int i = 0; i < cros_ec_cec->num_ports; i++) {
+               port = cros_ec_cec->ports[i];
+               if (!port)
+                       break;
+               cec_notifier_cec_adap_unregister(port->notify, port->adap);
+               cec_unregister_adapter(port->adap);
+       }
        return ret;
 }
 
@@ -336,6 +548,7 @@ static void cros_ec_cec_remove(struct platform_device *pdev)
 {
        struct cros_ec_cec *cros_ec_cec = platform_get_drvdata(pdev);
        struct device *dev = &pdev->dev;
+       struct cros_ec_cec_port *port;
        int ret;
 
        /*
@@ -349,9 +562,11 @@ static void cros_ec_cec_remove(struct platform_device *pdev)
        if (ret)
                dev_err(dev, "failed to unregister notifier\n");
 
-       cec_notifier_cec_adap_unregister(cros_ec_cec->notify,
-                                        cros_ec_cec->adap);
-       cec_unregister_adapter(cros_ec_cec->adap);
+       for (int i = 0; i < cros_ec_cec->num_ports; i++) {
+               port = cros_ec_cec->ports[i];
+               cec_notifier_cec_adap_unregister(port->notify, port->adap);
+               cec_unregister_adapter(port->adap);
+       }
 }
 
 static struct platform_driver cros_ec_cec_driver = {
index e0beefd80d7bc9b7a70cd0e09acbea184044a6d0..73990e469df9da993af59e6ae0ac1c62e76b4a72 100644 (file)
@@ -353,31 +353,21 @@ static const struct file_operations debugfs_stats_ops = {
 int smsdvb_debugfs_create(struct smsdvb_client_t *client)
 {
        struct smscore_device_t *coredev = client->coredev;
-       struct dentry *d;
        struct smsdvb_debugfs *debug_data;
 
        if (!smsdvb_debugfs_usb_root || !coredev->is_usb_device)
                return -ENODEV;
 
-       client->debugfs = debugfs_create_dir(coredev->devpath,
-                                            smsdvb_debugfs_usb_root);
-       if (IS_ERR_OR_NULL(client->debugfs)) {
-               pr_info("Unable to create debugfs %s directory.\n",
-                       coredev->devpath);
-               return -ENODEV;
-       }
-
-       d = debugfs_create_file("stats", S_IRUGO | S_IWUSR, client->debugfs,
-                               client, &debugfs_stats_ops);
-       if (!d) {
-               debugfs_remove(client->debugfs);
-               return -ENOMEM;
-       }
-
        debug_data = kzalloc(sizeof(*client->debug_data), GFP_KERNEL);
        if (!debug_data)
                return -ENOMEM;
 
+       client->debugfs = debugfs_create_dir(coredev->devpath,
+                                            smsdvb_debugfs_usb_root);
+
+       debugfs_create_file("stats", S_IRUGO | S_IWUSR, client->debugfs,
+                           client, &debugfs_stats_ops);
+
        client->debug_data        = debug_data;
        client->prt_dvb_stats     = smsdvb_print_dvb_stats;
        client->prt_isdb_stats    = smsdvb_print_isdb_stats;
index fd87747be9b177c07c25048a62fbbded363fab5e..41f289c75cbb6e17c928265a226fc9cb3cef31c2 100644 (file)
@@ -159,7 +159,7 @@ EXPORT_SYMBOL(frame_vector_to_pfns);
 struct frame_vector *frame_vector_create(unsigned int nr_frames)
 {
        struct frame_vector *vec;
-       int size = sizeof(struct frame_vector) + sizeof(void *) * nr_frames;
+       int size = struct_size(vec, ptrs, nr_frames);
 
        if (WARN_ON_ONCE(nr_frames == 0))
                return NULL;
index cf6727d9c81f36b1fb6a4fc3850e63cfb860963c..27aee92f3eea40e649ca9d32ef0dfd376e2c1d60 100644 (file)
@@ -2890,7 +2890,7 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
                if (copy_timestamp)
                        b->timestamp = ktime_get_ns();
                ret = vb2_core_qbuf(q, index, NULL, NULL);
-               dprintk(q, 5, "vb2_dbuf result: %d\n", ret);
+               dprintk(q, 5, "vb2_qbuf result: %d\n", ret);
                if (ret)
                        return ret;
 
index 2fa455d4a0480e95fd8c42b3e00db321370d396b..3d4fd4ef53107c6519442d24d11e7c449b0fa8ba 100644 (file)
@@ -542,13 +542,14 @@ static void vb2_dc_put_userptr(void *buf_priv)
                 */
                dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
                                  DMA_ATTR_SKIP_CPU_SYNC);
-               pages = frame_vector_pages(buf->vec);
-               /* sgt should exist only if vector contains pages... */
-               BUG_ON(IS_ERR(pages));
                if (buf->dma_dir == DMA_FROM_DEVICE ||
-                   buf->dma_dir == DMA_BIDIRECTIONAL)
-                       for (i = 0; i < frame_vector_count(buf->vec); i++)
-                               set_page_dirty_lock(pages[i]);
+                               buf->dma_dir == DMA_BIDIRECTIONAL) {
+                       pages = frame_vector_pages(buf->vec);
+                       /* sgt should exist only if vector contains pages... */
+                       if (!WARN_ON_ONCE(IS_ERR(pages)))
+                               for (i = 0; i < frame_vector_count(buf->vec); i++)
+                                       set_page_dirty_lock(pages[i]);
+               }
                sg_free_table(sgt);
                kfree(sgt);
        } else {
index 7c635e29210623a6b7f57bc87b0bcc97f8f8b6ac..7d953706f3f8002f7463273aaf5a145e17339b96 100644 (file)
@@ -133,13 +133,15 @@ static void vb2_vmalloc_put_userptr(void *buf_priv)
 
        if (!buf->vec->is_pfns) {
                n_pages = frame_vector_count(buf->vec);
-               pages = frame_vector_pages(buf->vec);
                if (vaddr)
                        vm_unmap_ram((void *)vaddr, n_pages);
                if (buf->dma_dir == DMA_FROM_DEVICE ||
-                   buf->dma_dir == DMA_BIDIRECTIONAL)
-                       for (i = 0; i < n_pages; i++)
-                               set_page_dirty_lock(pages[i]);
+                   buf->dma_dir == DMA_BIDIRECTIONAL) {
+                       pages = frame_vector_pages(buf->vec);
+                       if (!WARN_ON_ONCE(IS_ERR(pages)))
+                               for (i = 0; i < n_pages; i++)
+                                       set_page_dirty_lock(pages[i]);
+               }
        } else {
                iounmap((__force void __iomem *)buf->vaddr);
        }
index a738573c8cd7a020a6b0e3d680594e7f275b5bd9..19d8de400a68759dc12610ee32c3d6f12020e397 100644 (file)
@@ -4779,8 +4779,8 @@ set_frequency(struct drx_demod_instance *demod,
        bool image_to_select;
        s32 fm_frequency_shift = 0;
 
-       rf_mirror = (ext_attr->mirror == DRX_MIRROR_YES) ? true : false;
-       tuner_mirror = demod->my_common_attr->mirror_freq_spect ? false : true;
+       rf_mirror = ext_attr->mirror == DRX_MIRROR_YES;
+       tuner_mirror = !demod->my_common_attr->mirror_freq_spect;
        /*
           Program frequency shifter
           No need to account for mirroring on RF
@@ -8765,7 +8765,7 @@ static int qam_flip_spec(struct drx_demod_instance *demod, struct drx_channel *c
                goto rw_error;
        }
        ext_attr->iqm_fs_rate_ofs = iqm_fs_rate_ofs;
-       ext_attr->pos_image = (ext_attr->pos_image) ? false : true;
+       ext_attr->pos_image = !ext_attr->pos_image;
 
        /* freeze dq/fq updating */
        rc = drxj_dap_read_reg16(dev_addr, QAM_DQ_MODE__A, &data, 0);
index cf037b61b226bc1bcf8d0bdfe4a8bb757a154d5a..26c67ef05d1350db3041c62db31412fdd0f4ef19 100644 (file)
@@ -1920,8 +1920,7 @@ static void m88ds3103_remove(struct i2c_client *client)
 
        dev_dbg(&client->dev, "\n");
 
-       if (dev->dt_client)
-               i2c_unregister_device(dev->dt_client);
+       i2c_unregister_device(dev->dt_client);
 
        i2c_mux_del_adapters(dev->muxc);
 
index 74ff833ff48cab48aab0023ff41c92d2ac739fc3..59ee0ca2c978c25116036c77191f743e549d39a5 100644 (file)
@@ -99,6 +99,7 @@ config VIDEO_IMX214
 
 config VIDEO_IMX219
        tristate "Sony IMX219 sensor support"
+       select V4L2_CCI_I2C
        help
          This is a Video4Linux2 sensor driver for the Sony
          IMX219 camera.
@@ -215,6 +216,16 @@ config VIDEO_MT9M111
          This driver supports MT9M111, MT9M112 and MT9M131 cameras from
          Micron/Aptina
 
+config VIDEO_MT9M114
+       tristate "onsemi MT9M114 sensor support"
+       select V4L2_CCI_I2C
+       help
+         This is a Video4Linux2 sensor-level driver for the onsemi MT9M114
+         camera.
+
+         To compile this driver as a module, choose M here: the
+         module will be called mt9m114.
+
 config VIDEO_MT9P031
        tristate "Aptina MT9P031 support"
        select VIDEO_APTINA_PLL
index 80b00d39b48f0c764f1d1c874e5cd7750220ed45..f5010f80a21f3f631df20653636768ad3a092ec2 100644 (file)
@@ -65,6 +65,7 @@ obj-$(CONFIG_VIDEO_ML86V7667) += ml86v7667.o
 obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o
 obj-$(CONFIG_VIDEO_MT9M001) += mt9m001.o
 obj-$(CONFIG_VIDEO_MT9M111) += mt9m111.o
+obj-$(CONFIG_VIDEO_MT9M114) += mt9m114.o
 obj-$(CONFIG_VIDEO_MT9P031) += mt9p031.o
 obj-$(CONFIG_VIDEO_MT9T112) += mt9t112.o
 obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o
index 98ca417b8004cfb73df8813df65fe8b324e89da3..5ace7b5804d44caf49120313ed6a94e670ca7129 100644 (file)
@@ -411,43 +411,44 @@ static int adp1653_of_init(struct i2c_client *client,
                           struct device_node *node)
 {
        struct adp1653_platform_data *pd;
-       struct device_node *child;
+       struct device_node *node_indicator = NULL;
+       struct device_node *node_flash;
 
        pd = devm_kzalloc(&client->dev, sizeof(*pd), GFP_KERNEL);
        if (!pd)
                return -ENOMEM;
        flash->platform_data = pd;
 
-       child = of_get_child_by_name(node, "flash");
-       if (!child)
+       node_flash = of_get_child_by_name(node, "flash");
+       if (!node_flash)
                return -EINVAL;
 
-       if (of_property_read_u32(child, "flash-timeout-us",
+       if (of_property_read_u32(node_flash, "flash-timeout-us",
                                 &pd->max_flash_timeout))
                goto err;
 
-       if (of_property_read_u32(child, "flash-max-microamp",
+       if (of_property_read_u32(node_flash, "flash-max-microamp",
                                 &pd->max_flash_intensity))
                goto err;
 
        pd->max_flash_intensity /= 1000;
 
-       if (of_property_read_u32(child, "led-max-microamp",
+       if (of_property_read_u32(node_flash, "led-max-microamp",
                                 &pd->max_torch_intensity))
                goto err;
 
        pd->max_torch_intensity /= 1000;
-       of_node_put(child);
 
-       child = of_get_child_by_name(node, "indicator");
-       if (!child)
-               return -EINVAL;
+       node_indicator = of_get_child_by_name(node, "indicator");
+       if (!node_indicator)
+               goto err;
 
-       if (of_property_read_u32(child, "led-max-microamp",
+       if (of_property_read_u32(node_indicator, "led-max-microamp",
                                 &pd->max_indicator_intensity))
                goto err;
 
-       of_node_put(child);
+       of_node_put(node_flash);
+       of_node_put(node_indicator);
 
        pd->enable_gpio = devm_gpiod_get(&client->dev, "enable", GPIOD_OUT_LOW);
        if (IS_ERR(pd->enable_gpio)) {
@@ -458,7 +459,8 @@ static int adp1653_of_init(struct i2c_client *client,
        return 0;
 err:
        dev_err(&client->dev, "Required property not found\n");
-       of_node_put(child);
+       of_node_put(node_flash);
+       of_node_put(node_indicator);
        return -EINVAL;
 }
 
index 99ba925e8ec8e673138b50a7a7da6070f939b8a1..54134473186bf9b812af0ed7808d0cbc45a5c822 100644 (file)
@@ -5,6 +5,7 @@
  * Copyright (C) 2013 Cogent Embedded, Inc.
  * Copyright (C) 2013 Renesas Solutions Corp.
  */
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/errno.h>
@@ -1395,7 +1396,6 @@ out_unlock:
 
 static int adv7180_probe(struct i2c_client *client)
 {
-       const struct i2c_device_id *id = i2c_client_get_device_id(client);
        struct device_node *np = client->dev.of_node;
        struct adv7180_state *state;
        struct v4l2_subdev *sd;
@@ -1411,7 +1411,7 @@ static int adv7180_probe(struct i2c_client *client)
 
        state->client = client;
        state->field = V4L2_FIELD_ALTERNATE;
-       state->chip_info = (struct adv7180_chip_info *)id->driver_data;
+       state->chip_info = i2c_get_match_data(client);
 
        state->pwdn_gpio = devm_gpiod_get_optional(&client->dev, "powerdown",
                                                   GPIOD_OUT_HIGH);
@@ -1536,22 +1536,6 @@ static void adv7180_remove(struct i2c_client *client)
        mutex_destroy(&state->mutex);
 }
 
-static const struct i2c_device_id adv7180_id[] = {
-       { "adv7180", (kernel_ulong_t)&adv7180_info },
-       { "adv7180cp", (kernel_ulong_t)&adv7180_info },
-       { "adv7180st", (kernel_ulong_t)&adv7180_info },
-       { "adv7182", (kernel_ulong_t)&adv7182_info },
-       { "adv7280", (kernel_ulong_t)&adv7280_info },
-       { "adv7280-m", (kernel_ulong_t)&adv7280_m_info },
-       { "adv7281", (kernel_ulong_t)&adv7281_info },
-       { "adv7281-m", (kernel_ulong_t)&adv7281_m_info },
-       { "adv7281-ma", (kernel_ulong_t)&adv7281_ma_info },
-       { "adv7282", (kernel_ulong_t)&adv7282_info },
-       { "adv7282-m", (kernel_ulong_t)&adv7282_m_info },
-       {},
-};
-MODULE_DEVICE_TABLE(i2c, adv7180_id);
-
 #ifdef CONFIG_PM_SLEEP
 static int adv7180_suspend(struct device *dev)
 {
@@ -1585,30 +1569,43 @@ static SIMPLE_DEV_PM_OPS(adv7180_pm_ops, adv7180_suspend, adv7180_resume);
 #define ADV7180_PM_OPS NULL
 #endif
 
-#ifdef CONFIG_OF
-static const struct of_device_id adv7180_of_id[] = {
-       { .compatible = "adi,adv7180", },
-       { .compatible = "adi,adv7180cp", },
-       { .compatible = "adi,adv7180st", },
-       { .compatible = "adi,adv7182", },
-       { .compatible = "adi,adv7280", },
-       { .compatible = "adi,adv7280-m", },
-       { .compatible = "adi,adv7281", },
-       { .compatible = "adi,adv7281-m", },
-       { .compatible = "adi,adv7281-ma", },
-       { .compatible = "adi,adv7282", },
-       { .compatible = "adi,adv7282-m", },
-       { },
+static const struct i2c_device_id adv7180_id[] = {
+       { "adv7180", (kernel_ulong_t)&adv7180_info },
+       { "adv7180cp", (kernel_ulong_t)&adv7180_info },
+       { "adv7180st", (kernel_ulong_t)&adv7180_info },
+       { "adv7182", (kernel_ulong_t)&adv7182_info },
+       { "adv7280", (kernel_ulong_t)&adv7280_info },
+       { "adv7280-m", (kernel_ulong_t)&adv7280_m_info },
+       { "adv7281", (kernel_ulong_t)&adv7281_info },
+       { "adv7281-m", (kernel_ulong_t)&adv7281_m_info },
+       { "adv7281-ma", (kernel_ulong_t)&adv7281_ma_info },
+       { "adv7282", (kernel_ulong_t)&adv7282_info },
+       { "adv7282-m", (kernel_ulong_t)&adv7282_m_info },
+       {}
 };
+MODULE_DEVICE_TABLE(i2c, adv7180_id);
 
+static const struct of_device_id adv7180_of_id[] = {
+       { .compatible = "adi,adv7180", &adv7180_info },
+       { .compatible = "adi,adv7180cp", &adv7180_info },
+       { .compatible = "adi,adv7180st", &adv7180_info },
+       { .compatible = "adi,adv7182", &adv7182_info },
+       { .compatible = "adi,adv7280", &adv7280_info },
+       { .compatible = "adi,adv7280-m", &adv7280_m_info },
+       { .compatible = "adi,adv7281", &adv7281_info },
+       { .compatible = "adi,adv7281-m", &adv7281_m_info },
+       { .compatible = "adi,adv7281-ma", &adv7281_ma_info },
+       { .compatible = "adi,adv7282", &adv7282_info },
+       { .compatible = "adi,adv7282-m", &adv7282_m_info },
+       {}
+};
 MODULE_DEVICE_TABLE(of, adv7180_of_id);
-#endif
 
 static struct i2c_driver adv7180_driver = {
        .driver = {
                   .name = KBUILD_MODNAME,
                   .pm = ADV7180_PM_OPS,
-                  .of_match_table = of_match_ptr(adv7180_of_id),
+                  .of_match_table = adv7180_of_id,
                   },
        .probe = adv7180_probe,
        .remove = adv7180_remove,
index a4e39871e8f75c558af3f0e244aee985425102c1..701f36345f1e767212bc6b6b7a547c553e1c3ae4 100644 (file)
@@ -133,8 +133,6 @@ struct ar0521_dev {
                u16 mult2;
                u16 vt_pix;
        } pll;
-
-       bool streaming;
 };
 
 static inline struct ar0521_dev *to_ar0521_dev(struct v4l2_subdev *sd)
@@ -991,12 +989,9 @@ static int ar0521_s_stream(struct v4l2_subdev *sd, int enable)
        int ret;
 
        mutex_lock(&sensor->lock);
-
        ret = ar0521_set_stream(sensor, enable);
-       if (!ret)
-               sensor->streaming = enable;
-
        mutex_unlock(&sensor->lock);
+
        return ret;
 }
 
@@ -1023,28 +1018,6 @@ static const struct v4l2_subdev_ops ar0521_subdev_ops = {
        .pad = &ar0521_pad_ops,
 };
 
-static int __maybe_unused ar0521_suspend(struct device *dev)
-{
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct ar0521_dev *sensor = to_ar0521_dev(sd);
-
-       if (sensor->streaming)
-               ar0521_set_stream(sensor, 0);
-
-       return 0;
-}
-
-static int __maybe_unused ar0521_resume(struct device *dev)
-{
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct ar0521_dev *sensor = to_ar0521_dev(sd);
-
-       if (sensor->streaming)
-               return ar0521_set_stream(sensor, 1);
-
-       return 0;
-}
-
 static int ar0521_probe(struct i2c_client *client)
 {
        struct v4l2_fwnode_endpoint ep = {
@@ -1183,7 +1156,6 @@ static void ar0521_remove(struct i2c_client *client)
 }
 
 static const struct dev_pm_ops ar0521_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(ar0521_suspend, ar0521_resume)
        SET_RUNTIME_PM_OPS(ar0521_power_off, ar0521_power_on, NULL)
 };
 static const struct of_device_id ar0521_dt_ids[] = {
index 49e0d9a0953028b0fdc5b00d8b45d14cff7dfd71..12e6f0a26fc817edf3d24e022c81b3351c579a6a 100644 (file)
@@ -508,9 +508,8 @@ static void __ccs_update_exposure_limits(struct ccs_sensor *sensor)
        struct v4l2_ctrl *ctrl = sensor->exposure;
        int max;
 
-       max = sensor->pixel_array->crop[CCS_PA_PAD_SRC].height
-               + sensor->vblank->val
-               - CCS_LIM(sensor, COARSE_INTEGRATION_TIME_MAX_MARGIN);
+       max = sensor->pa_src.height + sensor->vblank->val -
+               CCS_LIM(sensor, COARSE_INTEGRATION_TIME_MAX_MARGIN);
 
        __v4l2_ctrl_modify_range(ctrl, ctrl->minimum, max, ctrl->step, max);
 }
@@ -728,15 +727,12 @@ static int ccs_set_ctrl(struct v4l2_ctrl *ctrl)
                break;
        case V4L2_CID_VBLANK:
                rval = ccs_write(sensor, FRAME_LENGTH_LINES,
-                                sensor->pixel_array->crop[
-                                        CCS_PA_PAD_SRC].height
-                                + ctrl->val);
+                                sensor->pa_src.height + ctrl->val);
 
                break;
        case V4L2_CID_HBLANK:
                rval = ccs_write(sensor, LINE_LENGTH_PCK,
-                                sensor->pixel_array->crop[CCS_PA_PAD_SRC].width
-                                + ctrl->val);
+                                sensor->pa_src.width + ctrl->val);
 
                break;
        case V4L2_CID_TEST_PATTERN:
@@ -1214,15 +1210,13 @@ static void ccs_update_blanking(struct ccs_sensor *sensor)
 
        min = max_t(int,
                    CCS_LIM(sensor, MIN_FRAME_BLANKING_LINES),
-                   min_fll - sensor->pixel_array->crop[CCS_PA_PAD_SRC].height);
-       max = max_fll - sensor->pixel_array->crop[CCS_PA_PAD_SRC].height;
+                   min_fll - sensor->pa_src.height);
+       max = max_fll - sensor->pa_src.height;
 
        __v4l2_ctrl_modify_range(vblank, min, max, vblank->step, min);
 
-       min = max_t(int,
-                   min_llp - sensor->pixel_array->crop[CCS_PA_PAD_SRC].width,
-                   min_lbp);
-       max = max_llp - sensor->pixel_array->crop[CCS_PA_PAD_SRC].width;
+       min = max_t(int, min_llp - sensor->pa_src.width, min_lbp);
+       max = max_llp - sensor->pa_src.width;
 
        __v4l2_ctrl_modify_range(hblank, min, max, hblank->step, min);
 
@@ -1246,10 +1240,8 @@ static int ccs_pll_blanking_update(struct ccs_sensor *sensor)
 
        dev_dbg(&client->dev, "real timeperframe\t100/%d\n",
                sensor->pll.pixel_rate_pixel_array /
-               ((sensor->pixel_array->crop[CCS_PA_PAD_SRC].width
-                 + sensor->hblank->val) *
-                (sensor->pixel_array->crop[CCS_PA_PAD_SRC].height
-                 + sensor->vblank->val) / 100));
+               ((sensor->pa_src.width + sensor->hblank->val) *
+                (sensor->pa_src.height + sensor->vblank->val) / 100));
 
        return 0;
 }
@@ -1756,28 +1748,22 @@ static int ccs_start_streaming(struct ccs_sensor *sensor)
                goto out;
 
        /* Analog crop start coordinates */
-       rval = ccs_write(sensor, X_ADDR_START,
-                        sensor->pixel_array->crop[CCS_PA_PAD_SRC].left);
+       rval = ccs_write(sensor, X_ADDR_START, sensor->pa_src.left);
        if (rval < 0)
                goto out;
 
-       rval = ccs_write(sensor, Y_ADDR_START,
-                        sensor->pixel_array->crop[CCS_PA_PAD_SRC].top);
+       rval = ccs_write(sensor, Y_ADDR_START, sensor->pa_src.top);
        if (rval < 0)
                goto out;
 
        /* Analog crop end coordinates */
-       rval = ccs_write(
-               sensor, X_ADDR_END,
-               sensor->pixel_array->crop[CCS_PA_PAD_SRC].left
-               + sensor->pixel_array->crop[CCS_PA_PAD_SRC].width - 1);
+       rval = ccs_write(sensor, X_ADDR_END,
+                        sensor->pa_src.left + sensor->pa_src.width - 1);
        if (rval < 0)
                goto out;
 
-       rval = ccs_write(
-               sensor, Y_ADDR_END,
-               sensor->pixel_array->crop[CCS_PA_PAD_SRC].top
-               + sensor->pixel_array->crop[CCS_PA_PAD_SRC].height - 1);
+       rval = ccs_write(sensor, Y_ADDR_END,
+                        sensor->pa_src.top + sensor->pa_src.height - 1);
        if (rval < 0)
                goto out;
 
@@ -1789,27 +1775,23 @@ static int ccs_start_streaming(struct ccs_sensor *sensor)
        /* Digital crop */
        if (CCS_LIM(sensor, DIGITAL_CROP_CAPABILITY)
            == CCS_DIGITAL_CROP_CAPABILITY_INPUT_CROP) {
-               rval = ccs_write(
-                       sensor, DIGITAL_CROP_X_OFFSET,
-                       sensor->scaler->crop[CCS_PAD_SINK].left);
+               rval = ccs_write(sensor, DIGITAL_CROP_X_OFFSET,
+                                sensor->scaler_sink.left);
                if (rval < 0)
                        goto out;
 
-               rval = ccs_write(
-                       sensor, DIGITAL_CROP_Y_OFFSET,
-                       sensor->scaler->crop[CCS_PAD_SINK].top);
+               rval = ccs_write(sensor, DIGITAL_CROP_Y_OFFSET,
+                                sensor->scaler_sink.top);
                if (rval < 0)
                        goto out;
 
-               rval = ccs_write(
-                       sensor, DIGITAL_CROP_IMAGE_WIDTH,
-                       sensor->scaler->crop[CCS_PAD_SINK].width);
+               rval = ccs_write(sensor, DIGITAL_CROP_IMAGE_WIDTH,
+                                sensor->scaler_sink.width);
                if (rval < 0)
                        goto out;
 
-               rval = ccs_write(
-                       sensor, DIGITAL_CROP_IMAGE_HEIGHT,
-                       sensor->scaler->crop[CCS_PAD_SINK].height);
+               rval = ccs_write(sensor, DIGITAL_CROP_IMAGE_HEIGHT,
+                                sensor->scaler_sink.height);
                if (rval < 0)
                        goto out;
        }
@@ -1827,12 +1809,10 @@ static int ccs_start_streaming(struct ccs_sensor *sensor)
        }
 
        /* Output size from sensor */
-       rval = ccs_write(sensor, X_OUTPUT_SIZE,
-                        sensor->src->crop[CCS_PAD_SRC].width);
+       rval = ccs_write(sensor, X_OUTPUT_SIZE, sensor->src_src.width);
        if (rval < 0)
                goto out;
-       rval = ccs_write(sensor, Y_OUTPUT_SIZE,
-                        sensor->src->crop[CCS_PAD_SRC].height);
+       rval = ccs_write(sensor, Y_OUTPUT_SIZE, sensor->src_src.height);
        if (rval < 0)
                goto out;
 
@@ -1923,9 +1903,6 @@ static int ccs_set_stream(struct v4l2_subdev *subdev, int enable)
        struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
        int rval;
 
-       if (sensor->streaming == enable)
-               return 0;
-
        if (!enable) {
                ccs_stop_streaming(sensor);
                sensor->streaming = false;
@@ -2053,24 +2030,8 @@ static int __ccs_get_format(struct v4l2_subdev *subdev,
                            struct v4l2_subdev_state *sd_state,
                            struct v4l2_subdev_format *fmt)
 {
-       struct ccs_subdev *ssd = to_ccs_subdev(subdev);
-
-       if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
-               fmt->format = *v4l2_subdev_get_try_format(subdev, sd_state,
-                                                         fmt->pad);
-       } else {
-               struct v4l2_rect *r;
-
-               if (fmt->pad == ssd->source_pad)
-                       r = &ssd->crop[ssd->source_pad];
-               else
-                       r = &ssd->sink_fmt;
-
-               fmt->format.code = __ccs_get_mbus_code(subdev, fmt->pad);
-               fmt->format.width = r->width;
-               fmt->format.height = r->height;
-               fmt->format.field = V4L2_FIELD_NONE;
-       }
+       fmt->format = *v4l2_subdev_get_pad_format(subdev, sd_state, fmt->pad);
+       fmt->format.code = __ccs_get_mbus_code(subdev, fmt->pad);
 
        return 0;
 }
@@ -2092,28 +2053,18 @@ static int ccs_get_format(struct v4l2_subdev *subdev,
 static void ccs_get_crop_compose(struct v4l2_subdev *subdev,
                                 struct v4l2_subdev_state *sd_state,
                                 struct v4l2_rect **crops,
-                                struct v4l2_rect **comps, int which)
+                                struct v4l2_rect **comps)
 {
        struct ccs_subdev *ssd = to_ccs_subdev(subdev);
        unsigned int i;
 
-       if (which == V4L2_SUBDEV_FORMAT_ACTIVE) {
-               if (crops)
-                       for (i = 0; i < subdev->entity.num_pads; i++)
-                               crops[i] = &ssd->crop[i];
-               if (comps)
-                       *comps = &ssd->compose;
-       } else {
-               if (crops) {
-                       for (i = 0; i < subdev->entity.num_pads; i++)
-                               crops[i] = v4l2_subdev_get_try_crop(subdev,
-                                                                   sd_state,
-                                                                   i);
-               }
-               if (comps)
-                       *comps = v4l2_subdev_get_try_compose(subdev, sd_state,
-                                                            CCS_PAD_SINK);
-       }
+       if (crops)
+               for (i = 0; i < subdev->entity.num_pads; i++)
+                       crops[i] =
+                               v4l2_subdev_get_pad_crop(subdev, sd_state, i);
+       if (comps)
+               *comps = v4l2_subdev_get_pad_compose(subdev, sd_state,
+                                                    ssd->sink_pad);
 }
 
 /* Changes require propagation only on sink pad. */
@@ -2124,8 +2075,9 @@ static void ccs_propagate(struct v4l2_subdev *subdev,
        struct ccs_sensor *sensor = to_ccs_sensor(subdev);
        struct ccs_subdev *ssd = to_ccs_subdev(subdev);
        struct v4l2_rect *comp, *crops[CCS_PADS];
+       struct v4l2_mbus_framefmt *fmt;
 
-       ccs_get_crop_compose(subdev, sd_state, crops, &comp, which);
+       ccs_get_crop_compose(subdev, sd_state, crops, &comp);
 
        switch (target) {
        case V4L2_SEL_TGT_CROP:
@@ -2136,6 +2088,7 @@ static void ccs_propagate(struct v4l2_subdev *subdev,
                                sensor->scale_m = CCS_LIM(sensor, SCALER_N_MIN);
                                sensor->scaling_mode =
                                        CCS_SCALING_MODE_NO_SCALING;
+                               sensor->scaler_sink = *comp;
                        } else if (ssd == sensor->binner) {
                                sensor->binning_horizontal = 1;
                                sensor->binning_vertical = 1;
@@ -2144,6 +2097,11 @@ static void ccs_propagate(struct v4l2_subdev *subdev,
                fallthrough;
        case V4L2_SEL_TGT_COMPOSE:
                *crops[CCS_PAD_SRC] = *comp;
+               fmt = v4l2_subdev_get_pad_format(subdev, sd_state, CCS_PAD_SRC);
+               fmt->width = comp->width;
+               fmt->height = comp->height;
+               if (which == V4L2_SUBDEV_FORMAT_ACTIVE && ssd == sensor->src)
+                       sensor->src_src = *crops[CCS_PAD_SRC];
                break;
        default:
                WARN_ON_ONCE(1);
@@ -2252,14 +2210,12 @@ static int ccs_set_format(struct v4l2_subdev *subdev,
                      CCS_LIM(sensor, MIN_Y_OUTPUT_SIZE),
                      CCS_LIM(sensor, MAX_Y_OUTPUT_SIZE));
 
-       ccs_get_crop_compose(subdev, sd_state, crops, NULL, fmt->which);
+       ccs_get_crop_compose(subdev, sd_state, crops, NULL);
 
        crops[ssd->sink_pad]->left = 0;
        crops[ssd->sink_pad]->top = 0;
        crops[ssd->sink_pad]->width = fmt->format.width;
        crops[ssd->sink_pad]->height = fmt->format.height;
-       if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
-               ssd->sink_fmt = *crops[ssd->sink_pad];
        ccs_propagate(subdev, sd_state, fmt->which, V4L2_SEL_TGT_CROP);
 
        mutex_unlock(&sensor->mutex);
@@ -2482,7 +2438,7 @@ static int ccs_set_compose(struct v4l2_subdev *subdev,
        struct ccs_subdev *ssd = to_ccs_subdev(subdev);
        struct v4l2_rect *comp, *crops[CCS_PADS];
 
-       ccs_get_crop_compose(subdev, sd_state, crops, &comp, sel->which);
+       ccs_get_crop_compose(subdev, sd_state, crops, &comp);
 
        sel->r.top = 0;
        sel->r.left = 0;
@@ -2501,8 +2457,8 @@ static int ccs_set_compose(struct v4l2_subdev *subdev,
        return 0;
 }
 
-static int __ccs_sel_supported(struct v4l2_subdev *subdev,
-                              struct v4l2_subdev_selection *sel)
+static int ccs_sel_supported(struct v4l2_subdev *subdev,
+                            struct v4l2_subdev_selection *sel)
 {
        struct ccs_sensor *sensor = to_ccs_sensor(subdev);
        struct ccs_subdev *ssd = to_ccs_subdev(subdev);
@@ -2545,33 +2501,18 @@ static int ccs_set_crop(struct v4l2_subdev *subdev,
 {
        struct ccs_sensor *sensor = to_ccs_sensor(subdev);
        struct ccs_subdev *ssd = to_ccs_subdev(subdev);
-       struct v4l2_rect *src_size, *crops[CCS_PADS];
-       struct v4l2_rect _r;
+       struct v4l2_rect src_size = { 0 }, *crops[CCS_PADS], *comp;
 
-       ccs_get_crop_compose(subdev, sd_state, crops, NULL, sel->which);
+       ccs_get_crop_compose(subdev, sd_state, crops, &comp);
 
-       if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
-               if (sel->pad == ssd->sink_pad)
-                       src_size = &ssd->sink_fmt;
-               else
-                       src_size = &ssd->compose;
+       if (sel->pad == ssd->sink_pad) {
+               struct v4l2_mbus_framefmt *mfmt =
+                       v4l2_subdev_get_pad_format(subdev, sd_state, sel->pad);
+
+               src_size.width = mfmt->width;
+               src_size.height = mfmt->height;
        } else {
-               if (sel->pad == ssd->sink_pad) {
-                       _r.left = 0;
-                       _r.top = 0;
-                       _r.width = v4l2_subdev_get_try_format(subdev,
-                                                             sd_state,
-                                                             sel->pad)
-                               ->width;
-                       _r.height = v4l2_subdev_get_try_format(subdev,
-                                                              sd_state,
-                                                              sel->pad)
-                               ->height;
-                       src_size = &_r;
-               } else {
-                       src_size = v4l2_subdev_get_try_compose(
-                               subdev, sd_state, ssd->sink_pad);
-               }
+               src_size = *comp;
        }
 
        if (ssd == sensor->src && sel->pad == CCS_PAD_SRC) {
@@ -2579,16 +2520,19 @@ static int ccs_set_crop(struct v4l2_subdev *subdev,
                sel->r.top = 0;
        }
 
-       sel->r.width = min(sel->r.width, src_size->width);
-       sel->r.height = min(sel->r.height, src_size->height);
+       sel->r.width = min(sel->r.width, src_size.width);
+       sel->r.height = min(sel->r.height, src_size.height);
 
-       sel->r.left = min_t(int, sel->r.left, src_size->width - sel->r.width);
-       sel->r.top = min_t(int, sel->r.top, src_size->height - sel->r.height);
+       sel->r.left = min_t(int, sel->r.left, src_size.width - sel->r.width);
+       sel->r.top = min_t(int, sel->r.top, src_size.height - sel->r.height);
 
        *crops[sel->pad] = sel->r;
 
        if (ssd != sensor->pixel_array && sel->pad == CCS_PAD_SINK)
                ccs_propagate(subdev, sd_state, sel->which, V4L2_SEL_TGT_CROP);
+       else if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE &&
+                ssd == sensor->pixel_array)
+               sensor->pa_src = sel->r;
 
        return 0;
 }
@@ -2601,44 +2545,36 @@ static void ccs_get_native_size(struct ccs_subdev *ssd, struct v4l2_rect *r)
        r->height = CCS_LIM(ssd->sensor, Y_ADDR_MAX) + 1;
 }
 
-static int __ccs_get_selection(struct v4l2_subdev *subdev,
-                              struct v4l2_subdev_state *sd_state,
-                              struct v4l2_subdev_selection *sel)
+static int ccs_get_selection(struct v4l2_subdev *subdev,
+                            struct v4l2_subdev_state *sd_state,
+                            struct v4l2_subdev_selection *sel)
 {
        struct ccs_sensor *sensor = to_ccs_sensor(subdev);
        struct ccs_subdev *ssd = to_ccs_subdev(subdev);
        struct v4l2_rect *comp, *crops[CCS_PADS];
-       struct v4l2_rect sink_fmt;
        int ret;
 
-       ret = __ccs_sel_supported(subdev, sel);
+       ret = ccs_sel_supported(subdev, sel);
        if (ret)
                return ret;
 
-       ccs_get_crop_compose(subdev, sd_state, crops, &comp, sel->which);
-
-       if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
-               sink_fmt = ssd->sink_fmt;
-       } else {
-               struct v4l2_mbus_framefmt *fmt =
-                       v4l2_subdev_get_try_format(subdev, sd_state,
-                                                  ssd->sink_pad);
-
-               sink_fmt.left = 0;
-               sink_fmt.top = 0;
-               sink_fmt.width = fmt->width;
-               sink_fmt.height = fmt->height;
-       }
+       ccs_get_crop_compose(subdev, sd_state, crops, &comp);
 
        switch (sel->target) {
        case V4L2_SEL_TGT_CROP_BOUNDS:
        case V4L2_SEL_TGT_NATIVE_SIZE:
-               if (ssd == sensor->pixel_array)
+               if (ssd == sensor->pixel_array) {
                        ccs_get_native_size(ssd, &sel->r);
-               else if (sel->pad == ssd->sink_pad)
-                       sel->r = sink_fmt;
-               else
+               } else if (sel->pad == ssd->sink_pad) {
+                       struct v4l2_mbus_framefmt *sink_fmt =
+                               v4l2_subdev_get_pad_format(subdev, sd_state,
+                                                          ssd->sink_pad);
+                       sel->r.top = sel->r.left = 0;
+                       sel->r.width = sink_fmt->width;
+                       sel->r.height = sink_fmt->height;
+               } else {
                        sel->r = *comp;
+               }
                break;
        case V4L2_SEL_TGT_CROP:
        case V4L2_SEL_TGT_COMPOSE_BOUNDS:
@@ -2652,20 +2588,6 @@ static int __ccs_get_selection(struct v4l2_subdev *subdev,
        return 0;
 }
 
-static int ccs_get_selection(struct v4l2_subdev *subdev,
-                            struct v4l2_subdev_state *sd_state,
-                            struct v4l2_subdev_selection *sel)
-{
-       struct ccs_sensor *sensor = to_ccs_sensor(subdev);
-       int rval;
-
-       mutex_lock(&sensor->mutex);
-       rval = __ccs_get_selection(subdev, sd_state, sel);
-       mutex_unlock(&sensor->mutex);
-
-       return rval;
-}
-
 static int ccs_set_selection(struct v4l2_subdev *subdev,
                             struct v4l2_subdev_state *sd_state,
                             struct v4l2_subdev_selection *sel)
@@ -2673,7 +2595,7 @@ static int ccs_set_selection(struct v4l2_subdev *subdev,
        struct ccs_sensor *sensor = to_ccs_sensor(subdev);
        int ret;
 
-       ret = __ccs_sel_supported(subdev, sel);
+       ret = ccs_sel_supported(subdev, sel);
        if (ret)
                return ret;
 
@@ -2945,7 +2867,6 @@ static int ccs_identify_module(struct ccs_sensor *sensor)
 }
 
 static const struct v4l2_subdev_ops ccs_ops;
-static const struct v4l2_subdev_internal_ops ccs_internal_ops;
 static const struct media_entity_operations ccs_entity_ops;
 
 static int ccs_register_subdev(struct ccs_sensor *sensor,
@@ -2959,12 +2880,6 @@ static int ccs_register_subdev(struct ccs_sensor *sensor,
        if (!sink_ssd)
                return 0;
 
-       rval = media_entity_pads_init(&ssd->sd.entity, ssd->npads, ssd->pads);
-       if (rval) {
-               dev_err(&client->dev, "media_entity_pads_init failed\n");
-               return rval;
-       }
-
        rval = v4l2_device_register_subdev(sensor->src->sd.v4l2_dev, &ssd->sd);
        if (rval) {
                dev_err(&client->dev, "v4l2_device_register_subdev failed\n");
@@ -3025,6 +2940,12 @@ out_err:
 static void ccs_cleanup(struct ccs_sensor *sensor)
 {
        struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+       unsigned int i;
+
+       for (i = 0; i < sensor->ssds_used; i++) {
+               v4l2_subdev_cleanup(&sensor->ssds[2].sd);
+               media_entity_cleanup(&sensor->ssds[i].sd.entity);
+       }
 
        device_remove_file(&client->dev, &dev_attr_nvm);
        device_remove_file(&client->dev, &dev_attr_ident);
@@ -3032,14 +2953,17 @@ static void ccs_cleanup(struct ccs_sensor *sensor)
        ccs_free_controls(sensor);
 }
 
-static void ccs_create_subdev(struct ccs_sensor *sensor,
-                             struct ccs_subdev *ssd, const char *name,
-                             unsigned short num_pads, u32 function)
+static int ccs_init_subdev(struct ccs_sensor *sensor,
+                          struct ccs_subdev *ssd, const char *name,
+                          unsigned short num_pads, u32 function,
+                          const char *lock_name,
+                          struct lock_class_key *lock_key)
 {
        struct i2c_client *client = v4l2_get_subdevdata(&sensor->src->sd);
+       int rval;
 
        if (!ssd)
-               return;
+               return 0;
 
        if (ssd != sensor->src)
                v4l2_subdev_init(&ssd->sd, &ccs_ops);
@@ -3053,57 +2977,70 @@ static void ccs_create_subdev(struct ccs_sensor *sensor,
 
        v4l2_i2c_subdev_set_name(&ssd->sd, client, sensor->minfo.name, name);
 
-       ccs_get_native_size(ssd, &ssd->sink_fmt);
-
-       ssd->compose.width = ssd->sink_fmt.width;
-       ssd->compose.height = ssd->sink_fmt.height;
-       ssd->crop[ssd->source_pad] = ssd->compose;
        ssd->pads[ssd->source_pad].flags = MEDIA_PAD_FL_SOURCE;
-       if (ssd != sensor->pixel_array) {
-               ssd->crop[ssd->sink_pad] = ssd->compose;
+       if (ssd != sensor->pixel_array)
                ssd->pads[ssd->sink_pad].flags = MEDIA_PAD_FL_SINK;
-       }
 
        ssd->sd.entity.ops = &ccs_entity_ops;
 
-       if (ssd == sensor->src)
-               return;
+       if (ssd != sensor->src) {
+               ssd->sd.owner = THIS_MODULE;
+               ssd->sd.dev = &client->dev;
+               v4l2_set_subdevdata(&ssd->sd, client);
+       }
+
+       rval = media_entity_pads_init(&ssd->sd.entity, ssd->npads, ssd->pads);
+       if (rval) {
+               dev_err(&client->dev, "media_entity_pads_init failed\n");
+               return rval;
+       }
 
-       ssd->sd.internal_ops = &ccs_internal_ops;
-       ssd->sd.owner = THIS_MODULE;
-       ssd->sd.dev = &client->dev;
-       v4l2_set_subdevdata(&ssd->sd, client);
+       rval = __v4l2_subdev_init_finalize(&ssd->sd, lock_name, lock_key);
+       if (rval) {
+               media_entity_cleanup(&ssd->sd.entity);
+               return rval;
+       }
+
+       return 0;
 }
 
-static int ccs_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+static int ccs_init_cfg(struct v4l2_subdev *sd,
+                       struct v4l2_subdev_state *sd_state)
 {
        struct ccs_subdev *ssd = to_ccs_subdev(sd);
        struct ccs_sensor *sensor = ssd->sensor;
-       unsigned int i;
+       unsigned int pad = ssd == sensor->pixel_array ?
+               CCS_PA_PAD_SRC : CCS_PAD_SINK;
+       struct v4l2_mbus_framefmt *fmt =
+               v4l2_subdev_get_pad_format(sd, sd_state, pad);
+       struct v4l2_rect *crop =
+               v4l2_subdev_get_pad_crop(sd, sd_state, pad);
+       bool is_active = !sd->active_state || sd->active_state == sd_state;
 
        mutex_lock(&sensor->mutex);
 
-       for (i = 0; i < ssd->npads; i++) {
-               struct v4l2_mbus_framefmt *try_fmt =
-                       v4l2_subdev_get_try_format(sd, fh->state, i);
-               struct v4l2_rect *try_crop =
-                       v4l2_subdev_get_try_crop(sd, fh->state, i);
-               struct v4l2_rect *try_comp;
+       ccs_get_native_size(ssd, crop);
 
-               ccs_get_native_size(ssd, try_crop);
+       fmt->width = crop->width;
+       fmt->height = crop->height;
+       fmt->code = sensor->internal_csi_format->code;
+       fmt->field = V4L2_FIELD_NONE;
 
-               try_fmt->width = try_crop->width;
-               try_fmt->height = try_crop->height;
-               try_fmt->code = sensor->internal_csi_format->code;
-               try_fmt->field = V4L2_FIELD_NONE;
+       if (ssd == sensor->pixel_array) {
+               if (is_active)
+                       sensor->pa_src = *crop;
 
-               if (ssd != sensor->pixel_array)
-                       continue;
-
-               try_comp = v4l2_subdev_get_try_compose(sd, fh->state, i);
-               *try_comp = *try_crop;
+               mutex_unlock(&sensor->mutex);
+               return 0;
        }
 
+       fmt = v4l2_subdev_get_pad_format(sd, sd_state, CCS_PAD_SRC);
+       fmt->code = ssd == sensor->src ?
+               sensor->csi_format->code : sensor->internal_csi_format->code;
+       fmt->field = V4L2_FIELD_NONE;
+
+       ccs_propagate(sd, sd_state, is_active, V4L2_SEL_TGT_CROP);
+
        mutex_unlock(&sensor->mutex);
 
        return 0;
@@ -3116,6 +3053,7 @@ static const struct v4l2_subdev_video_ops ccs_video_ops = {
 };
 
 static const struct v4l2_subdev_pad_ops ccs_pad_ops = {
+       .init_cfg = ccs_init_cfg,
        .enum_mbus_code = ccs_enum_mbus_code,
        .get_fmt = ccs_get_format,
        .set_fmt = ccs_set_format,
@@ -3141,53 +3079,12 @@ static const struct media_entity_operations ccs_entity_ops = {
 static const struct v4l2_subdev_internal_ops ccs_internal_src_ops = {
        .registered = ccs_registered,
        .unregistered = ccs_unregistered,
-       .open = ccs_open,
-};
-
-static const struct v4l2_subdev_internal_ops ccs_internal_ops = {
-       .open = ccs_open,
 };
 
 /* -----------------------------------------------------------------------------
  * I2C Driver
  */
 
-static int __maybe_unused ccs_suspend(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct v4l2_subdev *subdev = i2c_get_clientdata(client);
-       struct ccs_sensor *sensor = to_ccs_sensor(subdev);
-       bool streaming = sensor->streaming;
-       int rval;
-
-       rval = pm_runtime_resume_and_get(dev);
-       if (rval < 0)
-               return rval;
-
-       if (sensor->streaming)
-               ccs_stop_streaming(sensor);
-
-       /* save state for resume */
-       sensor->streaming = streaming;
-
-       return 0;
-}
-
-static int __maybe_unused ccs_resume(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct v4l2_subdev *subdev = i2c_get_clientdata(client);
-       struct ccs_sensor *sensor = to_ccs_sensor(subdev);
-       int rval = 0;
-
-       pm_runtime_put(dev);
-
-       if (sensor->streaming)
-               rval = ccs_start_streaming(sensor);
-
-       return rval;
-}
-
 static int ccs_get_hwconfig(struct ccs_sensor *sensor, struct device *dev)
 {
        struct ccs_hwconfig *hwcfg = &sensor->hwcfg;
@@ -3311,6 +3208,8 @@ static int ccs_firmware_name(struct i2c_client *client,
 
 static int ccs_probe(struct i2c_client *client)
 {
+       static struct lock_class_key pixel_array_lock_key, binner_lock_key,
+               scaler_lock_key;
        const struct ccs_device *ccsdev = device_get_match_data(&client->dev);
        struct ccs_sensor *sensor;
        const struct firmware *fw;
@@ -3587,12 +3486,27 @@ static int ccs_probe(struct i2c_client *client)
        sensor->pll.ext_clk_freq_hz = sensor->hwcfg.ext_clk;
        sensor->pll.scale_n = CCS_LIM(sensor, SCALER_N_MIN);
 
-       ccs_create_subdev(sensor, sensor->scaler, " scaler", 2,
-                         MEDIA_ENT_F_PROC_VIDEO_SCALER);
-       ccs_create_subdev(sensor, sensor->binner, " binner", 2,
-                         MEDIA_ENT_F_PROC_VIDEO_SCALER);
-       ccs_create_subdev(sensor, sensor->pixel_array, " pixel_array", 1,
-                         MEDIA_ENT_F_CAM_SENSOR);
+       rval = ccs_get_mbus_formats(sensor);
+       if (rval) {
+               rval = -ENODEV;
+               goto out_cleanup;
+       }
+
+       rval = ccs_init_subdev(sensor, sensor->scaler, " scaler", 2,
+                              MEDIA_ENT_F_PROC_VIDEO_SCALER,
+                              "ccs scaler mutex", &scaler_lock_key);
+       if (rval)
+               goto out_cleanup;
+       rval = ccs_init_subdev(sensor, sensor->binner, " binner", 2,
+                              MEDIA_ENT_F_PROC_VIDEO_SCALER,
+                              "ccs binner mutex", &binner_lock_key);
+       if (rval)
+               goto out_cleanup;
+       rval = ccs_init_subdev(sensor, sensor->pixel_array, " pixel_array", 1,
+                              MEDIA_ENT_F_CAM_SENSOR, "ccs pixel array mutex",
+                              &pixel_array_lock_key);
+       if (rval)
+               goto out_cleanup;
 
        rval = ccs_init_controls(sensor);
        if (rval < 0)
@@ -3602,12 +3516,6 @@ static int ccs_probe(struct i2c_client *client)
        if (rval)
                goto out_cleanup;
 
-       rval = ccs_get_mbus_formats(sensor);
-       if (rval) {
-               rval = -ENODEV;
-               goto out_cleanup;
-       }
-
        rval = ccs_init_late_controls(sensor);
        if (rval) {
                rval = -ENODEV;
@@ -3625,14 +3533,9 @@ static int ccs_probe(struct i2c_client *client)
        sensor->streaming = false;
        sensor->dev_init_done = true;
 
-       rval = media_entity_pads_init(&sensor->src->sd.entity, 2,
-                                sensor->src->pads);
-       if (rval < 0)
-               goto out_media_entity_cleanup;
-
        rval = ccs_write_msr_regs(sensor);
        if (rval)
-               goto out_media_entity_cleanup;
+               goto out_cleanup;
 
        pm_runtime_set_active(&client->dev);
        pm_runtime_get_noresume(&client->dev);
@@ -3652,9 +3555,6 @@ out_disable_runtime_pm:
        pm_runtime_put_noidle(&client->dev);
        pm_runtime_disable(&client->dev);
 
-out_media_entity_cleanup:
-       media_entity_cleanup(&sensor->src->sd.entity);
-
 out_cleanup:
        ccs_cleanup(sensor);
 
@@ -3687,10 +3587,8 @@ static void ccs_remove(struct i2c_client *client)
                ccs_power_off(&client->dev);
        pm_runtime_set_suspended(&client->dev);
 
-       for (i = 0; i < sensor->ssds_used; i++) {
+       for (i = 0; i < sensor->ssds_used; i++)
                v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
-               media_entity_cleanup(&sensor->ssds[i].sd.entity);
-       }
        ccs_cleanup(sensor);
        mutex_destroy(&sensor->mutex);
        kfree(sensor->ccs_limits);
@@ -3720,7 +3618,6 @@ static const struct of_device_id ccs_of_table[] = {
 MODULE_DEVICE_TABLE(of, ccs_of_table);
 
 static const struct dev_pm_ops ccs_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(ccs_suspend, ccs_resume)
        SET_RUNTIME_PM_OPS(ccs_power_off, ccs_power_on, NULL)
 };
 
index 5838fcda92fd490ce6d4fd0f73540eca18ec251b..0b1a64958d714ed783e60b6fadbcae6a716deaa9 100644 (file)
@@ -32,12 +32,10 @@ struct ccs_sensor;
  *             @reg: Pointer to the register to access
  *             @value: Register value, set by the caller on write, or
  *                     by the quirk on read
- *
- * @flags: Quirk flags
- *
  *             @return: 0 on success, -ENOIOCTLCMD if no register
  *                      access may be done by the caller (default read
  *                      value is zero), else negative error code on error
+ * @flags: Quirk flags
  */
 struct ccs_quirk {
        int (*limits)(struct ccs_sensor *sensor);
index a94c796cea487153b013301a37276d02b60cbd40..9c3587b2fbe7311d77fb1d4895438a36410c05df 100644 (file)
@@ -182,9 +182,6 @@ struct ccs_binning_subtype {
 struct ccs_subdev {
        struct v4l2_subdev sd;
        struct media_pad pads[CCS_PADS];
-       struct v4l2_rect sink_fmt;
-       struct v4l2_rect crop[CCS_PADS];
-       struct v4l2_rect compose; /* compose on sink */
        unsigned short sink_pad;
        unsigned short source_pad;
        int npads;
@@ -220,6 +217,7 @@ struct ccs_sensor {
        u32 mbus_frame_fmts;
        const struct ccs_csi_data_format *csi_format;
        const struct ccs_csi_data_format *internal_csi_format;
+       struct v4l2_rect pa_src, scaler_sink, src_src;
        u32 default_mbus_frame_fmts;
        int default_pixel_order;
        struct ccs_data_container sdata, mdata;
index 5aec252890624044242d9a04bc11e2ad2f041c06..04461c893d9068af975bf6168ff99bf99da35849 100644 (file)
@@ -2738,10 +2738,801 @@ static int cx25840_irq_handler(struct v4l2_subdev *sd, u32 status,
 #define DIF_BPF_COEFF3435      (0x38c)
 #define DIF_BPF_COEFF36                (0x390)
 
+static const u32 ifhz_coeffs[][19] = {
+       {       // 3.0 MHz
+               0x00000002, 0x00080012, 0x001e0024, 0x001bfff8,
+               0xffb4ff50, 0xfed8fe68, 0xfe24fe34, 0xfebaffc7,
+               0x014d031f, 0x04f0065d, 0x07010688, 0x04c901d6,
+               0xfe00f9d3, 0xf600f342, 0xf235f337, 0xf64efb22,
+               0x0105070f, 0x0c460fce, 0x110d0000,
+       }, {    // 3.1 MHz
+               0x00000001, 0x00070012, 0x00220032, 0x00370026,
+               0xfff0ff91, 0xff0efe7c, 0xfe01fdcc, 0xfe0afedb,
+               0x00440224, 0x0434060c, 0x0738074e, 0x06090361,
+               0xff99fb39, 0xf6fef3b6, 0xf21af2a5, 0xf573fa33,
+               0x0034067d, 0x0bfb0fb9, 0x110d0000,
+       }, {    // 3.2 MHz
+               0x00000000, 0x0004000e, 0x00200038, 0x004c004f,
+               0x002fffdf, 0xff5cfeb6, 0xfe0dfd92, 0xfd7ffe03,
+               0xff36010a, 0x03410575, 0x072607d2, 0x071804d5,
+               0x0134fcb7, 0xf81ff451, 0xf223f22e, 0xf4a7f94b,
+               0xff6405e8, 0x0bae0fa4, 0x110d0000,
+       }, {    // 3.3 MHz
+               0x0000ffff, 0x00000008, 0x001a0036, 0x0056006d,
+               0x00670030, 0xffbdff10, 0xfe46fd8d, 0xfd25fd4f,
+               0xfe35ffe0, 0x0224049f, 0x06c9080e, 0x07ef0627,
+               0x02c9fe45, 0xf961f513, 0xf250f1d2, 0xf3ecf869,
+               0xfe930552, 0x0b5f0f8f, 0x110d0000,
+       }, {    // 3.4 MHz
+               0xfffffffe, 0xfffd0001, 0x000f002c, 0x0054007d,
+               0x0093007c, 0x0024ff82, 0xfea6fdbb, 0xfd03fcca,
+               0xfd51feb9, 0x00eb0392, 0x06270802, 0x08880750,
+               0x044dffdb, 0xfabdf5f8, 0xf2a0f193, 0xf342f78f,
+               0xfdc404b9, 0x0b0e0f78, 0x110d0000,
+       }, {    // 3.5 MHz
+               0xfffffffd, 0xfffafff9, 0x0002001b, 0x0046007d,
+               0x00ad00ba, 0x00870000, 0xff26fe1a, 0xfd1bfc7e,
+               0xfc99fda4, 0xffa5025c, 0x054507ad, 0x08dd0847,
+               0x05b80172, 0xfc2ef6ff, 0xf313f170, 0xf2abf6bd,
+               0xfcf6041f, 0x0abc0f61, 0x110d0000,
+       }, {    // 3.6 MHz
+               0xfffffffd, 0xfff8fff3, 0xfff50006, 0x002f006c,
+               0x00b200e3, 0x00dc007e, 0xffb9fea0, 0xfd6bfc71,
+               0xfc17fcb1, 0xfe65010b, 0x042d0713, 0x08ec0906,
+               0x07020302, 0xfdaff823, 0xf3a7f16a, 0xf228f5f5,
+               0xfc2a0384, 0x0a670f4a, 0x110d0000,
+       }, {    // 3.7 MHz
+               0x0000fffd, 0xfff7ffef, 0xffe9fff1, 0x0010004d,
+               0x00a100f2, 0x011a00f0, 0x0053ff44, 0xfdedfca2,
+               0xfbd3fbef, 0xfd39ffae, 0x02ea0638, 0x08b50987,
+               0x08230483, 0xff39f960, 0xf45bf180, 0xf1b8f537,
+               0xfb6102e7, 0x0a110f32, 0x110d0000,
+       }, {    // 3.8 MHz
+               0x0000fffe, 0xfff9ffee, 0xffe1ffdd, 0xfff00024,
+               0x007c00e5, 0x013a014a, 0x00e6fff8, 0xfe98fd0f,
+               0xfbd3fb67, 0xfc32fe54, 0x01880525, 0x083909c7,
+               0x091505ee, 0x00c7fab3, 0xf52df1b4, 0xf15df484,
+               0xfa9b0249, 0x09ba0f19, 0x110d0000,
+       }, {    // 3.9 MHz
+               0x00000000, 0xfffbfff0, 0xffdeffcf, 0xffd1fff6,
+               0x004800be, 0x01390184, 0x016300ac, 0xff5efdb1,
+               0xfc17fb23, 0xfb5cfd0d, 0x001703e4, 0x077b09c4,
+               0x09d2073c, 0x0251fc18, 0xf61cf203, 0xf118f3dc,
+               0xf9d801aa, 0x09600eff, 0x110d0000,
+       }, {    // 4.0 MHz
+               0x00000001, 0xfffefff4, 0xffe1ffc8, 0xffbaffca,
+               0x000b0082, 0x01170198, 0x01c10152, 0x0030fe7b,
+               0xfc99fb24, 0xfac3fbe9, 0xfea5027f, 0x0683097f,
+               0x0a560867, 0x03d2fd89, 0xf723f26f, 0xf0e8f341,
+               0xf919010a, 0x09060ee5, 0x110d0000,
+       }, {    // 4.1 MHz
+               0x00010002, 0x0002fffb, 0xffe8ffca, 0xffacffa4,
+               0xffcd0036, 0x00d70184, 0x01f601dc, 0x00ffff60,
+               0xfd51fb6d, 0xfa6efaf5, 0xfd410103, 0x055708f9,
+               0x0a9e0969, 0x0543ff02, 0xf842f2f5, 0xf0cef2b2,
+               0xf85e006b, 0x08aa0ecb, 0x110d0000,
+       }, {    // 4.2 MHz
+               0x00010003, 0x00050003, 0xfff3ffd3, 0xffaaff8b,
+               0xff95ffe5, 0x0080014a, 0x01fe023f, 0x01ba0050,
+               0xfe35fbf8, 0xfa62fa3b, 0xfbf9ff7e, 0x04010836,
+               0x0aa90a3d, 0x069f007f, 0xf975f395, 0xf0cbf231,
+               0xf7a9ffcb, 0x084c0eaf, 0x110d0000,
+       }, {    // 4.3 MHz
+               0x00010003, 0x0008000a, 0x0000ffe4, 0xffb4ff81,
+               0xff6aff96, 0x001c00f0, 0x01d70271, 0x0254013b,
+               0xff36fcbd, 0xfa9ff9c5, 0xfadbfdfe, 0x028c073b,
+               0x0a750adf, 0x07e101fa, 0xfab8f44e, 0xf0ddf1be,
+               0xf6f9ff2b, 0x07ed0e94, 0x110d0000,
+       }, {    // 4.4 MHz
+               0x00000003, 0x0009000f, 0x000efff8, 0xffc9ff87,
+               0xff52ff54, 0xffb5007e, 0x01860270, 0x02c00210,
+               0x0044fdb2, 0xfb22f997, 0xf9f2fc90, 0x0102060f,
+               0x0a050b4c, 0x0902036e, 0xfc0af51e, 0xf106f15a,
+               0xf64efe8b, 0x078d0e77, 0x110d0000,
+       }, {    // 4.5 MHz
+               0x00000002, 0x00080012, 0x0019000e, 0xffe5ff9e,
+               0xff4fff25, 0xff560000, 0x0112023b, 0x02f702c0,
+               0x014dfec8, 0xfbe5f9b3, 0xf947fb41, 0xff7004b9,
+               0x095a0b81, 0x0a0004d8, 0xfd65f603, 0xf144f104,
+               0xf5aafdec, 0x072b0e5a, 0x110d0000,
+       }, {    // 4.6 MHz
+               0x00000001, 0x00060012, 0x00200022, 0x0005ffc1,
+               0xff61ff10, 0xff09ff82, 0x008601d7, 0x02f50340,
+               0x0241fff0, 0xfcddfa19, 0xf8e2fa1e, 0xfde30343,
+               0x08790b7f, 0x0ad50631, 0xfec7f6fc, 0xf198f0bd,
+               0xf50dfd4e, 0x06c90e3d, 0x110d0000,
+       }, {    // 4.7 MHz
+               0x0000ffff, 0x0003000f, 0x00220030, 0x0025ffed,
+               0xff87ff15, 0xfed6ff10, 0xffed014c, 0x02b90386,
+               0x03110119, 0xfdfefac4, 0xf8c6f92f, 0xfc6701b7,
+               0x07670b44, 0x0b7e0776, 0x002df807, 0xf200f086,
+               0xf477fcb1, 0x06650e1e, 0x110d0000,
+       }, {    // 4.8 MHz
+               0xfffffffe, 0xffff0009, 0x001e0038, 0x003f001b,
+               0xffbcff36, 0xfec2feb6, 0xff5600a5, 0x0248038d,
+               0x03b00232, 0xff39fbab, 0xf8f4f87f, 0xfb060020,
+               0x062a0ad2, 0x0bf908a3, 0x0192f922, 0xf27df05e,
+               0xf3e8fc14, 0x06000e00, 0x110d0000,
+       }, {    // 4.9 MHz
+               0xfffffffd, 0xfffc0002, 0x00160037, 0x00510046,
+               0xfff9ff6d, 0xfed0fe7c, 0xfecefff0, 0x01aa0356,
+               0x0413032b, 0x007ffcc5, 0xf96cf812, 0xf9cefe87,
+               0x04c90a2c, 0x0c4309b4, 0x02f3fa4a, 0xf30ef046,
+               0xf361fb7a, 0x059b0de0, 0x110d0000,
+       }, {    // 5.0 MHz
+               0xfffffffd, 0xfff9fffa, 0x000a002d, 0x00570067,
+               0x0037ffb5, 0xfefffe68, 0xfe62ff3d, 0x00ec02e3,
+               0x043503f6, 0x01befe05, 0xfa27f7ee, 0xf8c6fcf8,
+               0x034c0954, 0x0c5c0aa4, 0x044cfb7e, 0xf3b1f03f,
+               0xf2e2fae1, 0x05340dc0, 0x110d0000,
+       }, {    // 5.1 MHz
+               0x0000fffd, 0xfff8fff4, 0xfffd001e, 0x0051007b,
+               0x006e0006, 0xff48fe7c, 0xfe1bfe9a, 0x001d023e,
+               0x04130488, 0x02e6ff5b, 0xfb1ef812, 0xf7f7fb7f,
+               0x01bc084e, 0x0c430b72, 0x059afcba, 0xf467f046,
+               0xf26cfa4a, 0x04cd0da0, 0x110d0000,
+       }, {    // 5.2 MHz
+               0x0000fffe, 0xfff8ffef, 0xfff00009, 0x003f007f,
+               0x00980056, 0xffa5feb6, 0xfe00fe15, 0xff4b0170,
+               0x03b004d7, 0x03e800b9, 0xfc48f87f, 0xf768fa23,
+               0x0022071f, 0x0bf90c1b, 0x06dafdfd, 0xf52df05e,
+               0xf1fef9b5, 0x04640d7f, 0x110d0000,
+       }, {    // 5.3 MHz
+               0x0000ffff, 0xfff9ffee, 0xffe6fff3, 0x00250072,
+               0x00af009c, 0x000cff10, 0xfe13fdb8, 0xfe870089,
+               0x031104e1, 0x04b8020f, 0xfd98f92f, 0xf71df8f0,
+               0xfe8805ce, 0x0b7e0c9c, 0x0808ff44, 0xf603f086,
+               0xf19af922, 0x03fb0d5e, 0x110d0000,
+       }, {    // 5.4 MHz
+               0x00000001, 0xfffcffef, 0xffe0ffe0, 0x00050056,
+               0x00b000d1, 0x0071ff82, 0xfe53fd8c, 0xfddfff99,
+               0x024104a3, 0x054a034d, 0xff01fa1e, 0xf717f7ed,
+               0xfcf50461, 0x0ad50cf4, 0x0921008d, 0xf6e7f0bd,
+               0xf13ff891, 0x03920d3b, 0x110d0000,
+       }, {    // 5.5 MHz
+               0x00010002, 0xfffffff3, 0xffdeffd1, 0xffe5002f,
+               0x009c00ed, 0x00cb0000, 0xfebafd94, 0xfd61feb0,
+               0x014d0422, 0x05970464, 0x0074fb41, 0xf759f721,
+               0xfb7502de, 0x0a000d21, 0x0a2201d4, 0xf7d9f104,
+               0xf0edf804, 0x03280d19, 0x110d0000,
+       }, {    // 5.6 MHz
+               0x00010003, 0x0003fffa, 0xffe3ffc9, 0xffc90002,
+               0x007500ef, 0x010e007e, 0xff3dfdcf, 0xfd16fddd,
+               0x00440365, 0x059b0548, 0x01e3fc90, 0xf7dff691,
+               0xfa0f014d, 0x09020d23, 0x0b0a0318, 0xf8d7f15a,
+               0xf0a5f779, 0x02bd0cf6, 0x110d0000,
+       }, {    // 5.7 MHz
+               0x00010003, 0x00060001, 0xffecffc9, 0xffb4ffd4,
+               0x004000d5, 0x013600f0, 0xffd3fe39, 0xfd04fd31,
+               0xff360277, 0x055605ef, 0x033efdfe, 0xf8a5f642,
+               0xf8cbffb6, 0x07e10cfb, 0x0bd50456, 0xf9dff1be,
+               0xf067f6f2, 0x02520cd2, 0x110d0000,
+       }, {    // 5.8 MHz
+               0x00000003, 0x00080009, 0xfff8ffd2, 0xffaaffac,
+               0x000200a3, 0x013c014a, 0x006dfec9, 0xfd2bfcb7,
+               0xfe350165, 0x04cb0651, 0x0477ff7e, 0xf9a5f635,
+               0xf7b1fe20, 0x069f0ca8, 0x0c81058b, 0xfaf0f231,
+               0xf033f66d, 0x01e60cae, 0x110d0000,
+       }, {    // 5.9 MHz
+               0x00000002, 0x0009000e, 0x0005ffe1, 0xffacff90,
+               0xffc5005f, 0x01210184, 0x00fcff72, 0xfd8afc77,
+               0xfd51003f, 0x04020669, 0x05830103, 0xfad7f66b,
+               0xf6c8fc93, 0x05430c2b, 0x0d0d06b5, 0xfc08f2b2,
+               0xf00af5ec, 0x017b0c89, 0x110d0000,
+       }, {    // 6.0 MHz
+               0x00000001, 0x00070012, 0x0012fff5, 0xffbaff82,
+               0xff8e000f, 0x00e80198, 0x01750028, 0xfe18fc75,
+               0xfc99ff15, 0x03050636, 0x0656027f, 0xfc32f6e2,
+               0xf614fb17, 0x03d20b87, 0x0d7707d2, 0xfd26f341,
+               0xefeaf56f, 0x010f0c64, 0x110d0000,
+       }, {    // 6.1 MHz
+               0xffff0000, 0x00050012, 0x001c000b, 0xffd1ff84,
+               0xff66ffbe, 0x00960184, 0x01cd00da, 0xfeccfcb2,
+               0xfc17fdf9, 0x01e005bc, 0x06e703e4, 0xfdabf798,
+               0xf599f9b3, 0x02510abd, 0x0dbf08df, 0xfe48f3dc,
+               0xefd5f4f6, 0x00a20c3e, 0x110d0000,
+       }, {    // 6.2 MHz
+               0xfffffffe, 0x0002000f, 0x0021001f, 0xfff0ff97,
+               0xff50ff74, 0x0034014a, 0x01fa0179, 0xff97fd2a,
+               0xfbd3fcfa, 0x00a304fe, 0x07310525, 0xff37f886,
+               0xf55cf86e, 0x00c709d0, 0x0de209db, 0xff6df484,
+               0xefcbf481, 0x00360c18, 0x110d0000,
+       }, {    // 6.3 MHz
+               0xfffffffd, 0xfffe000a, 0x0021002f, 0x0010ffb8,
+               0xff50ff3b, 0xffcc00f0, 0x01fa01fa, 0x0069fdd4,
+               0xfbd3fc26, 0xff5d0407, 0x07310638, 0x00c9f9a8,
+               0xf55cf74e, 0xff3908c3, 0x0de20ac3, 0x0093f537,
+               0xefcbf410, 0xffca0bf2, 0x110d0000,
+       }, {    // 6.4 MHz
+               0xfffffffd, 0xfffb0003, 0x001c0037, 0x002fffe2,
+               0xff66ff17, 0xff6a007e, 0x01cd0251, 0x0134fea5,
+               0xfc17fb8b, 0xfe2002e0, 0x06e70713, 0x0255faf5,
+               0xf599f658, 0xfdaf0799, 0x0dbf0b96, 0x01b8f5f5,
+               0xefd5f3a3, 0xff5e0bca, 0x110d0000,
+       }, {    // 6.5 MHz
+               0x0000fffd, 0xfff9fffb, 0x00120037, 0x00460010,
+               0xff8eff0f, 0xff180000, 0x01750276, 0x01e8ff8d,
+               0xfc99fb31, 0xfcfb0198, 0x065607ad, 0x03cefc64,
+               0xf614f592, 0xfc2e0656, 0x0d770c52, 0x02daf6bd,
+               0xefeaf33b, 0xfef10ba3, 0x110d0000,
+       }, {    // 6.6 MHz
+               0x0000fffe, 0xfff7fff5, 0x0005002f, 0x0054003c,
+               0xffc5ff22, 0xfedfff82, 0x00fc0267, 0x0276007e,
+               0xfd51fb1c, 0xfbfe003e, 0x05830802, 0x0529fdec,
+               0xf6c8f4fe, 0xfabd04ff, 0x0d0d0cf6, 0x03f8f78f,
+               0xf00af2d7, 0xfe850b7b, 0x110d0000,
+       }, {    // 6.7 MHz
+               0x0000ffff, 0xfff8fff0, 0xfff80020, 0x00560060,
+               0x0002ff4e, 0xfec4ff10, 0x006d0225, 0x02d50166,
+               0xfe35fb4e, 0xfb35fee1, 0x0477080e, 0x065bff82,
+               0xf7b1f4a0, 0xf9610397, 0x0c810d80, 0x0510f869,
+               0xf033f278, 0xfe1a0b52, 0x110d0000,
+       }, {    // 6.8 MHz
+               0x00010000, 0xfffaffee, 0xffec000c, 0x004c0078,
+               0x0040ff8e, 0xfecafeb6, 0xffd301b6, 0x02fc0235,
+               0xff36fbc5, 0xfaaafd90, 0x033e07d2, 0x075b011b,
+               0xf8cbf47a, 0xf81f0224, 0x0bd50def, 0x0621f94b,
+               0xf067f21e, 0xfdae0b29, 0x110d0000,
+       }, {    // 6.9 MHz
+               0x00010001, 0xfffdffef, 0xffe3fff6, 0x0037007f,
+               0x0075ffdc, 0xfef2fe7c, 0xff3d0122, 0x02ea02dd,
+               0x0044fc79, 0xfa65fc5d, 0x01e3074e, 0x082102ad,
+               0xfa0ff48c, 0xf6fe00a9, 0x0b0a0e43, 0x0729fa33,
+               0xf0a5f1c9, 0xfd430b00, 0x110d0000,
+       }, {    // 7.0 MHz
+               0x00010002, 0x0001fff3, 0xffdeffe2, 0x001b0076,
+               0x009c002d, 0xff35fe68, 0xfeba0076, 0x029f0352,
+               0x014dfd60, 0xfa69fb53, 0x00740688, 0x08a7042d,
+               0xfb75f4d6, 0xf600ff2d, 0x0a220e7a, 0x0827fb22,
+               0xf0edf17a, 0xfcd80ad6, 0x110d0000,
+       }, {    // 7.1 MHz
+               0x00000003, 0x0004fff9, 0xffe0ffd2, 0xfffb005e,
+               0x00b0007a, 0xff8ffe7c, 0xfe53ffc1, 0x0221038c,
+               0x0241fe6e, 0xfab6fa80, 0xff010587, 0x08e90590,
+               0xfcf5f556, 0xf52bfdb3, 0x09210e95, 0x0919fc15,
+               0xf13ff12f, 0xfc6e0aab, 0x110d0000,
+       }, {    // 7.2 MHz
+               0x00000003, 0x00070000, 0xffe6ffc9, 0xffdb0039,
+               0x00af00b8, 0xfff4feb6, 0xfe13ff10, 0x01790388,
+               0x0311ff92, 0xfb48f9ed, 0xfd980453, 0x08e306cd,
+               0xfe88f60a, 0xf482fc40, 0x08080e93, 0x09fdfd0c,
+               0xf19af0ea, 0xfc050a81, 0x110d0000,
+       }, {    // 7.3 MHz
+               0x00000002, 0x00080008, 0xfff0ffc9, 0xffc1000d,
+               0x009800e2, 0x005bff10, 0xfe00fe74, 0x00b50345,
+               0x03b000bc, 0xfc18f9a1, 0xfc4802f9, 0x089807dc,
+               0x0022f6f0, 0xf407fada, 0x06da0e74, 0x0ad3fe06,
+               0xf1fef0ab, 0xfb9c0a55, 0x110d0000,
+       }, {    // 7.4 MHz
+               0x00000001, 0x0008000e, 0xfffdffd0, 0xffafffdf,
+               0x006e00f2, 0x00b8ff82, 0xfe1bfdf8, 0xffe302c8,
+               0x041301dc, 0xfd1af99e, 0xfb1e0183, 0x080908b5,
+               0x01bcf801, 0xf3bdf985, 0x059a0e38, 0x0b99ff03,
+               0xf26cf071, 0xfb330a2a, 0x110d0000,
+       }, {    // 7.5 MHz
+               0xffff0000, 0x00070011, 0x000affdf, 0xffa9ffb5,
+               0x003700e6, 0x01010000, 0xfe62fda8, 0xff140219,
+               0x043502e1, 0xfe42f9e6, 0xfa270000, 0x073a0953,
+               0x034cf939, 0xf3a4f845, 0x044c0de1, 0x0c4f0000,
+               0xf2e2f03c, 0xfacc09fe, 0x110d0000,
+       }, {    // 7.6 MHz
+               0xffffffff, 0x00040012, 0x0016fff3, 0xffafff95,
+               0xfff900c0, 0x0130007e, 0xfecefd89, 0xfe560146,
+               0x041303bc, 0xff81fa76, 0xf96cfe7d, 0x063209b1,
+               0x04c9fa93, 0xf3bdf71e, 0x02f30d6e, 0x0cf200fd,
+               0xf361f00e, 0xfa6509d1, 0x110d0000,
+       }, {    // 7.7 MHz
+               0xfffffffe, 0x00010010, 0x001e0008, 0xffc1ff84,
+               0xffbc0084, 0x013e00f0, 0xff56fd9f, 0xfdb8005c,
+               0x03b00460, 0x00c7fb45, 0xf8f4fd07, 0x04fa09ce,
+               0x062afc07, 0xf407f614, 0x01920ce0, 0x0d8301fa,
+               0xf3e8efe5, 0xfa0009a4, 0x110d0000,
+       }, {    // 7.8 MHz
+               0x0000fffd, 0xfffd000b, 0x0022001d, 0xffdbff82,
+               0xff870039, 0x012a014a, 0xffedfde7, 0xfd47ff6b,
+               0x031104c6, 0x0202fc4c, 0xf8c6fbad, 0x039909a7,
+               0x0767fd8e, 0xf482f52b, 0x002d0c39, 0x0e0002f4,
+               0xf477efc2, 0xf99b0977, 0x110d0000,
+       }, {    // 7.9 MHz
+               0x0000fffd, 0xfffa0004, 0x0020002d, 0xfffbff91,
+               0xff61ffe8, 0x00f70184, 0x0086fe5c, 0xfd0bfe85,
+               0x024104e5, 0x0323fd7d, 0xf8e2fa79, 0x021d093f,
+               0x0879ff22, 0xf52bf465, 0xfec70b79, 0x0e6803eb,
+               0xf50defa5, 0xf937094a, 0x110d0000,
+       }, {    // 8.0 MHz
+               0x0000fffe, 0xfff8fffd, 0x00190036, 0x001bffaf,
+               0xff4fff99, 0x00aa0198, 0x0112fef3, 0xfd09fdb9,
+               0x014d04be, 0x041bfecc, 0xf947f978, 0x00900897,
+               0x095a00b9, 0xf600f3c5, 0xfd650aa3, 0x0ebc04de,
+               0xf5aaef8e, 0xf8d5091c, 0x110d0000,
+       }, {    // 8.1 MHz
+               0x0000ffff, 0xfff7fff6, 0x000e0038, 0x0037ffd7,
+               0xff52ff56, 0x004b0184, 0x0186ffa1, 0xfd40fd16,
+               0x00440452, 0x04de0029, 0xf9f2f8b2, 0xfefe07b5,
+               0x0a05024d, 0xf6fef34d, 0xfc0a09b8, 0x0efa05cd,
+               0xf64eef7d, 0xf87308ed, 0x110d0000,
+       }, {    // 8.2 MHz
+               0x00010000, 0xfff8fff0, 0x00000031, 0x004c0005,
+               0xff6aff27, 0xffe4014a, 0x01d70057, 0xfdacfca6,
+               0xff3603a7, 0x05610184, 0xfadbf82e, 0xfd74069f,
+               0x0a7503d6, 0xf81ff2ff, 0xfab808b9, 0x0f2306b5,
+               0xf6f9ef72, 0xf81308bf, 0x110d0000,
+       }, {    // 8.3 MHz
+               0x00010001, 0xfffbffee, 0xfff30022, 0x00560032,
+               0xff95ff10, 0xff8000f0, 0x01fe0106, 0xfe46fc71,
+               0xfe3502c7, 0x059e02ce, 0xfbf9f7f2, 0xfbff055b,
+               0x0aa9054c, 0xf961f2db, 0xf97507aa, 0x0f350797,
+               0xf7a9ef6d, 0xf7b40890, 0x110d0000,
+       }, {    // 8.4 MHz
+               0x00010002, 0xfffeffee, 0xffe8000f, 0x00540058,
+               0xffcdff14, 0xff29007e, 0x01f6019e, 0xff01fc7c,
+               0xfd5101bf, 0x059203f6, 0xfd41f7fe, 0xfaa903f3,
+               0x0a9e06a9, 0xfabdf2e2, 0xf842068b, 0x0f320871,
+               0xf85eef6e, 0xf7560860, 0x110d0000,
+       }, {    // 8.5 MHz
+               0x00000003, 0x0002fff2, 0xffe1fff9, 0x00460073,
+               0x000bff34, 0xfee90000, 0x01c10215, 0xffd0fcc5,
+               0xfc99009d, 0x053d04f1, 0xfea5f853, 0xf97d0270,
+               0x0a5607e4, 0xfc2ef314, 0xf723055f, 0x0f180943,
+               0xf919ef75, 0xf6fa0830, 0x110d0000,
+       }, {    // 8.6 MHz
+               0x00000003, 0x0005fff8, 0xffdeffe4, 0x002f007f,
+               0x0048ff6b, 0xfec7ff82, 0x0163025f, 0x00a2fd47,
+               0xfc17ff73, 0x04a405b2, 0x0017f8ed, 0xf88500dc,
+               0x09d208f9, 0xfdaff370, 0xf61c0429, 0x0ee80a0b,
+               0xf9d8ef82, 0xf6a00800, 0x110d0000,
+       }, {    // 8.7 MHz
+               0x00000003, 0x0007ffff, 0xffe1ffd4, 0x0010007a,
+               0x007cffb2, 0xfec6ff10, 0x00e60277, 0x0168fdf9,
+               0xfbd3fe50, 0x03ce0631, 0x0188f9c8, 0xf7c7ff43,
+               0x091509e3, 0xff39f3f6, 0xf52d02ea, 0x0ea30ac9,
+               0xfa9bef95, 0xf64607d0, 0x110d0000,
+       }, {    // 8.8 MHz
+               0x00000002, 0x00090007, 0xffe9ffca, 0xfff00065,
+               0x00a10003, 0xfee6feb6, 0x0053025b, 0x0213fed0,
+               0xfbd3fd46, 0x02c70668, 0x02eafadb, 0xf74bfdae,
+               0x08230a9c, 0x00c7f4a3, 0xf45b01a6, 0x0e480b7c,
+               0xfb61efae, 0xf5ef079f, 0x110d0000,
+       }, {    // 8.9 MHz
+               0xffff0000, 0x0008000d, 0xfff5ffc8, 0xffd10043,
+               0x00b20053, 0xff24fe7c, 0xffb9020c, 0x0295ffbb,
+               0xfc17fc64, 0x019b0654, 0x042dfc1c, 0xf714fc2a,
+               0x07020b21, 0x0251f575, 0xf3a7005e, 0x0dd80c24,
+               0xfc2aefcd, 0xf599076e, 0x110d0000,
+       }, {    // 9.0 MHz
+               0xffffffff, 0x00060011, 0x0002ffcf, 0xffba0018,
+               0x00ad009a, 0xff79fe68, 0xff260192, 0x02e500ab,
+               0xfc99fbb6, 0x005b05f7, 0x0545fd81, 0xf723fabf,
+               0x05b80b70, 0x03d2f669, 0xf313ff15, 0x0d550cbf,
+               0xfcf6eff2, 0xf544073d, 0x110d0000,
+       }, {    // 9.1 MHz
+               0xfffffffe, 0x00030012, 0x000fffdd, 0xffacffea,
+               0x009300cf, 0xffdcfe7c, 0xfea600f7, 0x02fd0190,
+               0xfd51fb46, 0xff150554, 0x0627fefd, 0xf778f978,
+               0x044d0b87, 0x0543f77d, 0xf2a0fdcf, 0x0cbe0d4e,
+               0xfdc4f01d, 0xf4f2070b, 0x110d0000,
+       }, {    // 9.2 MHz
+               0x0000fffd, 0x00000010, 0x001afff0, 0xffaaffbf,
+               0x006700ed, 0x0043feb6, 0xfe460047, 0x02db0258,
+               0xfe35fb1b, 0xfddc0473, 0x06c90082, 0xf811f85e,
+               0x02c90b66, 0x069ff8ad, 0xf250fc8d, 0x0c140dcf,
+               0xfe93f04d, 0xf4a106d9, 0x110d0000,
+       }, {    // 9.3 MHz
+               0x0000fffd, 0xfffc000c, 0x00200006, 0xffb4ff9c,
+               0x002f00ef, 0x00a4ff10, 0xfe0dff92, 0x028102f7,
+               0xff36fb37, 0xfcbf035e, 0x07260202, 0xf8e8f778,
+               0x01340b0d, 0x07e1f9f4, 0xf223fb51, 0x0b590e42,
+               0xff64f083, 0xf45206a7, 0x110d0000,
+       }, {    // 9.4 MHz
+               0x0000fffd, 0xfff90005, 0x0022001a, 0xffc9ff86,
+               0xfff000d7, 0x00f2ff82, 0xfe01fee5, 0x01f60362,
+               0x0044fb99, 0xfbcc0222, 0x07380370, 0xf9f7f6cc,
+               0xff990a7e, 0x0902fb50, 0xf21afa1f, 0x0a8d0ea6,
+               0x0034f0bf, 0xf4050675, 0x110d0000,
+       }, {    // 9.5 MHz
+               0x0000fffe, 0xfff8fffe, 0x001e002b, 0xffe5ff81,
+               0xffb400a5, 0x01280000, 0xfe24fe50, 0x01460390,
+               0x014dfc3a, 0xfb1000ce, 0x070104bf, 0xfb37f65f,
+               0xfe0009bc, 0x0a00fcbb, 0xf235f8f8, 0x09b20efc,
+               0x0105f101, 0xf3ba0642, 0x110d0000,
+       }, {    // 9.6 MHz
+               0x0001ffff, 0xfff8fff7, 0x00150036, 0x0005ff8c,
+               0xff810061, 0x013d007e, 0xfe71fddf, 0x007c0380,
+               0x0241fd13, 0xfa94ff70, 0x068005e2, 0xfc9bf633,
+               0xfc7308ca, 0x0ad5fe30, 0xf274f7e0, 0x08c90f43,
+               0x01d4f147, 0xf371060f, 0x110d0000,
+       }, {    // 9.7 MHz
+               0x00010001, 0xfff9fff1, 0x00090038, 0x0025ffa7,
+               0xff5e0012, 0x013200f0, 0xfee3fd9b, 0xffaa0331,
+               0x0311fe15, 0xfa60fe18, 0x05bd06d1, 0xfe1bf64a,
+               0xfafa07ae, 0x0b7effab, 0xf2d5f6d7, 0x07d30f7a,
+               0x02a3f194, 0xf32905dc, 0x110d0000,
+       }, {    // 9.8 MHz
+               0x00010002, 0xfffcffee, 0xfffb0032, 0x003fffcd,
+               0xff4effc1, 0x0106014a, 0xff6efd8a, 0xfedd02aa,
+               0x03b0ff34, 0xfa74fcd7, 0x04bf0781, 0xffaaf6a3,
+               0xf99e066b, 0x0bf90128, 0xf359f5e1, 0x06d20fa2,
+               0x0370f1e5, 0xf2e405a8, 0x110d0000,
+       }, {    // 9.9 MHz
+               0x00000003, 0xffffffee, 0xffef0024, 0x0051fffa,
+               0xff54ff77, 0x00be0184, 0x0006fdad, 0xfe2701f3,
+               0x0413005e, 0xfad1fbba, 0x039007ee, 0x013bf73d,
+               0xf868050a, 0x0c4302a1, 0xf3fdf4fe, 0x05c70fba,
+               0x043bf23c, 0xf2a10575, 0x110d0000,
+       }, {    // 10.0 MHz
+               0x00000003, 0x0003fff1, 0xffe50011, 0x00570027,
+               0xff70ff3c, 0x00620198, 0x009efe01, 0xfd95011a,
+               0x04350183, 0xfb71fad0, 0x023c0812, 0x02c3f811,
+               0xf75e0390, 0x0c5c0411, 0xf4c1f432, 0x04b30fc1,
+               0x0503f297, 0xf2610541, 0x110d0000,
+       }, {    // 10.1 MHz
+               0x00000003, 0x0006fff7, 0xffdffffc, 0x00510050,
+               0xff9dff18, 0xfffc0184, 0x0128fe80, 0xfd32002e,
+               0x04130292, 0xfc4dfa21, 0x00d107ee, 0x0435f91c,
+               0xf6850205, 0x0c430573, 0xf5a1f37d, 0x03990fba,
+               0x05c7f2f8, 0xf222050d, 0x110d0000,
+       }, {    // 10.2 MHz
+               0x00000002, 0x0008fffe, 0xffdfffe7, 0x003f006e,
+               0xffd6ff0f, 0xff96014a, 0x0197ff1f, 0xfd05ff3e,
+               0x03b0037c, 0xfd59f9b7, 0xff5d0781, 0x0585fa56,
+               0xf5e4006f, 0x0bf906c4, 0xf69df2e0, 0x02790fa2,
+               0x0688f35d, 0xf1e604d8, 0x110d0000,
+       }, {    // 10.3 MHz
+               0xffff0001, 0x00090005, 0xffe4ffd6, 0x0025007e,
+               0x0014ff20, 0xff3c00f0, 0x01e1ffd0, 0xfd12fe5c,
+               0x03110433, 0xfe88f996, 0xfdf106d1, 0x06aafbb7,
+               0xf57efed8, 0x0b7e07ff, 0xf7b0f25e, 0x01560f7a,
+               0x0745f3c7, 0xf1ac04a4, 0x110d0000,
+       }, {    // 10.4 MHz
+               0xffffffff, 0x0008000c, 0xffedffcb, 0x0005007d,
+               0x0050ff4c, 0xfef6007e, 0x01ff0086, 0xfd58fd97,
+               0x024104ad, 0xffcaf9c0, 0xfc9905e2, 0x079afd35,
+               0xf555fd46, 0x0ad50920, 0xf8d9f1f6, 0x00310f43,
+               0x07fdf435, 0xf174046f, 0x110d0000,
+       }, {    // 10.5 MHz
+               0xfffffffe, 0x00050011, 0xfffaffc8, 0xffe5006b,
+               0x0082ff8c, 0xfecc0000, 0x01f00130, 0xfdd2fcfc,
+               0x014d04e3, 0x010efa32, 0xfb6404bf, 0x084efec5,
+               0xf569fbc2, 0x0a000a23, 0xfa15f1ab, 0xff0b0efc,
+               0x08b0f4a7, 0xf13f043a, 0x110d0000,
+       }, {    // 10.6 MHz
+               0x0000fffd, 0x00020012, 0x0007ffcd, 0xffc9004c,
+               0x00a4ffd9, 0xfec3ff82, 0x01b401c1, 0xfe76fc97,
+               0x004404d2, 0x0245fae8, 0xfa5f0370, 0x08c1005f,
+               0xf5bcfa52, 0x09020b04, 0xfb60f17b, 0xfde70ea6,
+               0x095df51e, 0xf10c0405, 0x110d0000,
+       }, {    // 10.7 MHz
+               0x0000fffd, 0xffff0011, 0x0014ffdb, 0xffb40023,
+               0x00b2002a, 0xfedbff10, 0x0150022d, 0xff38fc6f,
+               0xff36047b, 0x035efbda, 0xf9940202, 0x08ee01f5,
+               0xf649f8fe, 0x07e10bc2, 0xfcb6f169, 0xfcc60e42,
+               0x0a04f599, 0xf0db03d0, 0x110d0000,
+       }, {    // 10.8 MHz
+               0x0000fffd, 0xfffb000d, 0x001dffed, 0xffaafff5,
+               0x00aa0077, 0xff13feb6, 0x00ce026b, 0x000afc85,
+               0xfe3503e3, 0x044cfcfb, 0xf90c0082, 0x08d5037f,
+               0xf710f7cc, 0x069f0c59, 0xfe16f173, 0xfbaa0dcf,
+               0x0aa5f617, 0xf0ad039b, 0x110d0000,
+       }, {    // 10.9 MHz
+               0x0000fffe, 0xfff90006, 0x00210003, 0xffacffc8,
+               0x008e00b6, 0xff63fe7c, 0x003a0275, 0x00dafcda,
+               0xfd510313, 0x0501fe40, 0xf8cbfefd, 0x087604f0,
+               0xf80af6c2, 0x05430cc8, 0xff7af19a, 0xfa940d4e,
+               0x0b3ff699, 0xf0810365, 0x110d0000,
+       }, {    // 11.0 MHz
+               0x0001ffff, 0xfff8ffff, 0x00210018, 0xffbaffa3,
+               0x006000e1, 0xffc4fe68, 0xffa0024b, 0x019afd66,
+               0xfc990216, 0x0575ff99, 0xf8d4fd81, 0x07d40640,
+               0xf932f5e6, 0x03d20d0d, 0x00dff1de, 0xf9860cbf,
+               0x0bd1f71e, 0xf058032f, 0x110d0000,
+       }, {    // 11.1 MHz
+               0x00010000, 0xfff8fff8, 0x001b0029, 0xffd1ff8a,
+               0x002600f2, 0x002cfe7c, 0xff0f01f0, 0x023bfe20,
+               0xfc1700fa, 0x05a200f7, 0xf927fc1c, 0x06f40765,
+               0xfa82f53b, 0x02510d27, 0x0243f23d, 0xf8810c24,
+               0x0c5cf7a7, 0xf03102fa, 0x110d0000,
+       }, {    // 11.2 MHz
+               0x00010002, 0xfffafff2, 0x00110035, 0xfff0ff81,
+               0xffe700e7, 0x008ffeb6, 0xfe94016d, 0x02b0fefb,
+               0xfbd3ffd1, 0x05850249, 0xf9c1fadb, 0x05de0858,
+               0xfbf2f4c4, 0x00c70d17, 0x03a0f2b8, 0xf7870b7c,
+               0x0cdff833, 0xf00d02c4, 0x110d0000,
+       }, {    // 11.3 MHz
+               0x00000003, 0xfffdffee, 0x00040038, 0x0010ff88,
+               0xffac00c2, 0x00e2ff10, 0xfe3900cb, 0x02f1ffe9,
+               0xfbd3feaa, 0x05210381, 0xfa9cf9c8, 0x04990912,
+               0xfd7af484, 0xff390cdb, 0x04f4f34d, 0xf69a0ac9,
+               0x0d5af8c1, 0xefec028e, 0x110d0000,
+       }, {    // 11.4 MHz
+               0x00000003, 0x0000ffee, 0xfff60033, 0x002fff9f,
+               0xff7b0087, 0x011eff82, 0xfe080018, 0x02f900d8,
+               0xfc17fd96, 0x04790490, 0xfbadf8ed, 0x032f098e,
+               0xff10f47d, 0xfdaf0c75, 0x063cf3fc, 0xf5ba0a0b,
+               0x0dccf952, 0xefcd0258, 0x110d0000,
+       }, {    // 11.5 MHz
+               0x00000003, 0x0004fff1, 0xffea0026, 0x0046ffc3,
+               0xff5a003c, 0x013b0000, 0xfe04ff63, 0x02c801b8,
+               0xfc99fca6, 0x0397056a, 0xfcecf853, 0x01ad09c9,
+               0x00acf4ad, 0xfc2e0be7, 0x0773f4c2, 0xf4e90943,
+               0x0e35f9e6, 0xefb10221, 0x110d0000,
+       }, {    // 11.6 MHz
+               0x00000002, 0x0007fff6, 0xffe20014, 0x0054ffee,
+               0xff4effeb, 0x0137007e, 0xfe2efebb, 0x0260027a,
+               0xfd51fbe6, 0x02870605, 0xfe4af7fe, 0x001d09c1,
+               0x0243f515, 0xfabd0b32, 0x0897f59e, 0xf4280871,
+               0x0e95fa7c, 0xef9701eb, 0x110d0000,
+       }, {    // 11.7 MHz
+               0xffff0001, 0x0008fffd, 0xffdeffff, 0x0056001d,
+               0xff57ff9c, 0x011300f0, 0xfe82fe2e, 0x01ca0310,
+               0xfe35fb62, 0x0155065a, 0xffbaf7f2, 0xfe8c0977,
+               0x03cef5b2, 0xf9610a58, 0x09a5f68f, 0xf3790797,
+               0x0eebfb14, 0xef8001b5, 0x110d0000,
+       }, {    // 11.8 MHz
+               0xffff0000, 0x00080004, 0xffe0ffe9, 0x004c0047,
+               0xff75ff58, 0x00d1014a, 0xfef9fdc8, 0x0111036f,
+               0xff36fb21, 0x00120665, 0x012df82e, 0xfd0708ec,
+               0x0542f682, 0xf81f095c, 0x0a9af792, 0xf2db06b5,
+               0x0f38fbad, 0xef6c017e, 0x110d0000,
+       }, {    // 11.9 MHz
+               0xffffffff, 0x0007000b, 0xffe7ffd8, 0x00370068,
+               0xffa4ff28, 0x00790184, 0xff87fd91, 0x00430392,
+               0x0044fb26, 0xfece0626, 0x0294f8b2, 0xfb990825,
+               0x0698f77f, 0xf6fe0842, 0x0b73f8a7, 0xf25105cd,
+               0x0f7bfc48, 0xef5a0148, 0x110d0000,
+       }, {    // 12.0 MHz
+               0x0000fffe, 0x00050010, 0xfff2ffcc, 0x001b007b,
+               0xffdfff10, 0x00140198, 0x0020fd8e, 0xff710375,
+               0x014dfb73, 0xfd9a059f, 0x03e0f978, 0xfa4e0726,
+               0x07c8f8a7, 0xf600070c, 0x0c2ff9c9, 0xf1db04de,
+               0x0fb4fce5, 0xef4b0111, 0x110d0000,
+       }, {    // 12.1 MHz
+               0x0000fffd, 0x00010012, 0xffffffc8, 0xfffb007e,
+               0x001dff14, 0xffad0184, 0x00b7fdbe, 0xfea9031b,
+               0x0241fc01, 0xfc8504d6, 0x0504fa79, 0xf93005f6,
+               0x08caf9f2, 0xf52b05c0, 0x0ccbfaf9, 0xf17903eb,
+               0x0fe3fd83, 0xef3f00db, 0x110d0000,
+       }, {    // 12.2 MHz
+               0x0000fffd, 0xfffe0011, 0x000cffcc, 0xffdb0071,
+               0x0058ff32, 0xff4f014a, 0x013cfe1f, 0xfdfb028a,
+               0x0311fcc9, 0xfb9d03d6, 0x05f4fbad, 0xf848049d,
+               0x0999fb5b, 0xf4820461, 0x0d46fc32, 0xf12d02f4,
+               0x1007fe21, 0xef3600a4, 0x110d0000,
+       }, {    // 12.3 MHz
+               0x0000fffe, 0xfffa000e, 0x0017ffd9, 0xffc10055,
+               0x0088ff68, 0xff0400f0, 0x01a6fea7, 0xfd7501cc,
+               0x03b0fdc0, 0xfaef02a8, 0x06a7fd07, 0xf79d0326,
+               0x0a31fcda, 0xf40702f3, 0x0d9ffd72, 0xf0f601fa,
+               0x1021fec0, 0xef2f006d, 0x110d0000,
+       }, {    // 12.4 MHz
+               0x0001ffff, 0xfff80007, 0x001fffeb, 0xffaf002d,
+               0x00a8ffb0, 0xfed3007e, 0x01e9ff4c, 0xfd2000ee,
+               0x0413fed8, 0xfa82015c, 0x0715fe7d, 0xf7340198,
+               0x0a8dfe69, 0xf3bd017c, 0x0dd5feb8, 0xf0d500fd,
+               0x1031ff60, 0xef2b0037, 0x110d0000,
+       }, {    // 12.5 MHz
+               0x00010000, 0xfff70000, 0x00220000, 0xffa90000,
+               0x00b30000, 0xfec20000, 0x02000000, 0xfd030000,
+               0x04350000, 0xfa5e0000, 0x073b0000, 0xf7110000,
+               0x0aac0000, 0xf3a40000, 0x0de70000, 0xf0c90000,
+               0x10360000, 0xef290000, 0x110d0000,
+       }, {    // 12.6 MHz
+               0x00010001, 0xfff8fff9, 0x001f0015, 0xffafffd3,
+               0x00a80050, 0xfed3ff82, 0x01e900b4, 0xfd20ff12,
+               0x04130128, 0xfa82fea4, 0x07150183, 0xf734fe68,
+               0x0a8d0197, 0xf3bdfe84, 0x0dd50148, 0xf0d5ff03,
+               0x103100a0, 0xef2bffc9, 0x110d0000,
+       }, {    // 12.7 MHz
+               0x00000002, 0xfffafff2, 0x00170027, 0xffc1ffab,
+               0x00880098, 0xff04ff10, 0x01a60159, 0xfd75fe34,
+               0x03b00240, 0xfaeffd58, 0x06a702f9, 0xf79dfcda,
+               0x0a310326, 0xf407fd0d, 0x0d9f028e, 0xf0f6fe06,
+               0x10210140, 0xef2fff93, 0x110d0000,
+       }, {    // 12.8 MHz
+               0x00000003, 0xfffeffef, 0x000c0034, 0xffdbff8f,
+               0x005800ce, 0xff4ffeb6, 0x013c01e1, 0xfdfbfd76,
+               0x03110337, 0xfb9dfc2a, 0x05f40453, 0xf848fb63,
+               0x099904a5, 0xf482fb9f, 0x0d4603ce, 0xf12dfd0c,
+               0x100701df, 0xef36ff5c, 0x110d0000,
+       }, {    // 12.9 MHz
+               0x00000003, 0x0001ffee, 0xffff0038, 0xfffbff82,
+               0x001d00ec, 0xffadfe7c, 0x00b70242, 0xfea9fce5,
+               0x024103ff, 0xfc85fb2a, 0x05040587, 0xf930fa0a,
+               0x08ca060e, 0xf52bfa40, 0x0ccb0507, 0xf179fc15,
+               0x0fe3027d, 0xef3fff25, 0x110d0000,
+       }, {    // 13.0 MHz
+               0x00000002, 0x0005fff0, 0xfff20034, 0x001bff85,
+               0xffdf00f0, 0x0014fe68, 0x00200272, 0xff71fc8b,
+               0x014d048d, 0xfd9afa61, 0x03e00688, 0xfa4ef8da,
+               0x07c80759, 0xf600f8f4, 0x0c2f0637, 0xf1dbfb22,
+               0x0fb4031b, 0xef4bfeef, 0x110d0000,
+       }, {    // 13.1 MHz
+               0xffff0001, 0x0007fff5, 0xffe70028, 0x0037ff98,
+               0xffa400d8, 0x0079fe7c, 0xff87026f, 0x0043fc6e,
+               0x004404da, 0xfecef9da, 0x0294074e, 0xfb99f7db,
+               0x06980881, 0xf6fef7be, 0x0b730759, 0xf251fa33,
+               0x0f7b03b8, 0xef5afeb8, 0x110d0000,
+       }, {    // 13.2 MHz
+               0xffff0000, 0x0008fffc, 0xffe00017, 0x004cffb9,
+               0xff7500a8, 0x00d1feb6, 0xfef90238, 0x0111fc91,
+               0xff3604df, 0x0012f99b, 0x012d07d2, 0xfd07f714,
+               0x0542097e, 0xf81ff6a4, 0x0a9a086e, 0xf2dbf94b,
+               0x0f380453, 0xef6cfe82, 0x110d0000,
+       }, {    // 13.3 MHz
+               0xffffffff, 0x00080003, 0xffde0001, 0x0056ffe3,
+               0xff570064, 0x0113ff10, 0xfe8201d2, 0x01cafcf0,
+               0xfe35049e, 0x0155f9a6, 0xffba080e, 0xfe8cf689,
+               0x03ce0a4e, 0xf961f5a8, 0x09a50971, 0xf379f869,
+               0x0eeb04ec, 0xef80fe4b, 0x110d0000,
+       }, {    // 13.4 MHz
+               0x0000fffe, 0x0007000a, 0xffe2ffec, 0x00540012,
+               0xff4e0015, 0x0137ff82, 0xfe2e0145, 0x0260fd86,
+               0xfd51041a, 0x0287f9fb, 0xfe4a0802, 0x001df63f,
+               0x02430aeb, 0xfabdf4ce, 0x08970a62, 0xf428f78f,
+               0x0e950584, 0xef97fe15, 0x110d0000,
+       }, {    // 13.5 MHz
+               0x0000fffd, 0x0004000f, 0xffeaffda, 0x0046003d,
+               0xff5affc4, 0x013b0000, 0xfe04009d, 0x02c8fe48,
+               0xfc99035a, 0x0397fa96, 0xfcec07ad, 0x01adf637,
+               0x00ac0b53, 0xfc2ef419, 0x07730b3e, 0xf4e9f6bd,
+               0x0e35061a, 0xefb1fddf, 0x110d0000,
+       }, {    // 13.6 MHz
+               0x0000fffd, 0x00000012, 0xfff6ffcd, 0x002f0061,
+               0xff7bff79, 0x011e007e, 0xfe08ffe8, 0x02f9ff28,
+               0xfc17026a, 0x0479fb70, 0xfbad0713, 0x032ff672,
+               0xff100b83, 0xfdaff38b, 0x063c0c04, 0xf5baf5f5,
+               0x0dcc06ae, 0xefcdfda8, 0x110d0000,
+       }, {    // 13.7 MHz
+               0x0000fffd, 0xfffd0012, 0x0004ffc8, 0x00100078,
+               0xffacff3e, 0x00e200f0, 0xfe39ff35, 0x02f10017,
+               0xfbd30156, 0x0521fc7f, 0xfa9c0638, 0x0499f6ee,
+               0xfd7a0b7c, 0xff39f325, 0x04f40cb3, 0xf69af537,
+               0x0d5a073f, 0xefecfd72, 0x110d0000,
+       }, {    // 13.8 MHz
+               0x0001fffe, 0xfffa000e, 0x0011ffcb, 0xfff0007f,
+               0xffe7ff19, 0x008f014a, 0xfe94fe93, 0x02b00105,
+               0xfbd3002f, 0x0585fdb7, 0xf9c10525, 0x05def7a8,
+               0xfbf20b3c, 0x00c7f2e9, 0x03a00d48, 0xf787f484,
+               0x0cdf07cd, 0xf00dfd3c, 0x110d0000,
+       }, {    // 13.9 MHz
+               0x00010000, 0xfff80008, 0x001bffd7, 0xffd10076,
+               0x0026ff0e, 0x002c0184, 0xff0ffe10, 0x023b01e0,
+               0xfc17ff06, 0x05a2ff09, 0xf92703e4, 0x06f4f89b,
+               0xfa820ac5, 0x0251f2d9, 0x02430dc3, 0xf881f3dc,
+               0x0c5c0859, 0xf031fd06, 0x110d0000,
+       }, {    // 14.0 MHz
+               0x00010001, 0xfff80001, 0x0021ffe8, 0xffba005d,
+               0x0060ff1f, 0xffc40198, 0xffa0fdb5, 0x019a029a,
+               0xfc99fdea, 0x05750067, 0xf8d4027f, 0x07d4f9c0,
+               0xf9320a1a, 0x03d2f2f3, 0x00df0e22, 0xf986f341,
+               0x0bd108e2, 0xf058fcd1, 0x110d0000,
+       }, {    // 14.1 MHz
+               0x00000002, 0xfff9fffa, 0x0021fffd, 0xffac0038,
+               0x008eff4a, 0xff630184, 0x003afd8b, 0x00da0326,
+               0xfd51fced, 0x050101c0, 0xf8cb0103, 0x0876fb10,
+               0xf80a093e, 0x0543f338, 0xff7a0e66, 0xfa94f2b2,
+               0x0b3f0967, 0xf081fc9b, 0x110d0000,
+       }, {    // 14.2 MHz
+               0x00000003, 0xfffbfff3, 0x001d0013, 0xffaa000b,
+               0x00aaff89, 0xff13014a, 0x00cefd95, 0x000a037b,
+               0xfe35fc1d, 0x044c0305, 0xf90cff7e, 0x08d5fc81,
+               0xf7100834, 0x069ff3a7, 0xfe160e8d, 0xfbaaf231,
+               0x0aa509e9, 0xf0adfc65, 0x110d0000,
+       }, {    // 14.3 MHz
+               0x00000003, 0xffffffef, 0x00140025, 0xffb4ffdd,
+               0x00b2ffd6, 0xfedb00f0, 0x0150fdd3, 0xff380391,
+               0xff36fb85, 0x035e0426, 0xf994fdfe, 0x08eefe0b,
+               0xf6490702, 0x07e1f43e, 0xfcb60e97, 0xfcc6f1be,
+               0x0a040a67, 0xf0dbfc30, 0x110d0000,
+       }, {    // 14.4 MHz
+               0x00000003, 0x0002ffee, 0x00070033, 0xffc9ffb4,
+               0x00a40027, 0xfec3007e, 0x01b4fe3f, 0xfe760369,
+               0x0044fb2e, 0x02450518, 0xfa5ffc90, 0x08c1ffa1,
+               0xf5bc05ae, 0x0902f4fc, 0xfb600e85, 0xfde7f15a,
+               0x095d0ae2, 0xf10cfbfb, 0x110d0000,
+       }, {    // 14.5 MHz
+               0xffff0002, 0x0005ffef, 0xfffa0038, 0xffe5ff95,
+               0x00820074, 0xfecc0000, 0x01f0fed0, 0xfdd20304,
+               0x014dfb1d, 0x010e05ce, 0xfb64fb41, 0x084e013b,
+               0xf569043e, 0x0a00f5dd, 0xfa150e55, 0xff0bf104,
+               0x08b00b59, 0xf13ffbc6, 0x110d0000,
+       }, {    // 14.6 MHz
+               0xffff0001, 0x0008fff4, 0xffed0035, 0x0005ff83,
+               0x005000b4, 0xfef6ff82, 0x01ffff7a, 0xfd580269,
+               0x0241fb53, 0xffca0640, 0xfc99fa1e, 0x079a02cb,
+               0xf55502ba, 0x0ad5f6e0, 0xf8d90e0a, 0x0031f0bd,
+               0x07fd0bcb, 0xf174fb91, 0x110d0000,
+       }, {    // 14.7 MHz
+               0xffffffff, 0x0009fffb, 0xffe4002a, 0x0025ff82,
+               0x001400e0, 0xff3cff10, 0x01e10030, 0xfd1201a4,
+               0x0311fbcd, 0xfe88066a, 0xfdf1f92f, 0x06aa0449,
+               0xf57e0128, 0x0b7ef801, 0xf7b00da2, 0x0156f086,
+               0x07450c39, 0xf1acfb5c, 0x110d0000,
+       }, {    // 14.8 MHz
+               0x0000fffe, 0x00080002, 0xffdf0019, 0x003fff92,
+               0xffd600f1, 0xff96feb6, 0x019700e1, 0xfd0500c2,
+               0x03b0fc84, 0xfd590649, 0xff5df87f, 0x058505aa,
+               0xf5e4ff91, 0x0bf9f93c, 0xf69d0d20, 0x0279f05e,
+               0x06880ca3, 0xf1e6fb28, 0x110d0000,
+       }, {    // 14.9 MHz
+               0x0000fffd, 0x00060009, 0xffdf0004, 0x0051ffb0,
+               0xff9d00e8, 0xfffcfe7c, 0x01280180, 0xfd32ffd2,
+               0x0413fd6e, 0xfc4d05df, 0x00d1f812, 0x043506e4,
+               0xf685fdfb, 0x0c43fa8d, 0xf5a10c83, 0x0399f046,
+               0x05c70d08, 0xf222faf3, 0x110d0000,
+       }, {    // 15.0 MHz
+               0x0000fffd, 0x0003000f, 0xffe5ffef, 0x0057ffd9,
+               0xff7000c4, 0x0062fe68, 0x009e01ff, 0xfd95fee6,
+               0x0435fe7d, 0xfb710530, 0x023cf7ee, 0x02c307ef,
+               0xf75efc70, 0x0c5cfbef, 0xf4c10bce, 0x04b3f03f,
+               0x05030d69, 0xf261fabf, 0x110d0000,
+       }, {    // 15.1 MHz
+               0x0000fffd, 0xffff0012, 0xffefffdc, 0x00510006,
+               0xff540089, 0x00befe7c, 0x00060253, 0xfe27fe0d,
+               0x0413ffa2, 0xfad10446, 0x0390f812, 0x013b08c3,
+               0xf868faf6, 0x0c43fd5f, 0xf3fd0b02, 0x05c7f046,
+               0x043b0dc4, 0xf2a1fa8b, 0x110d0000,
+       }, {    // 15.2 MHz
+               0x0001fffe, 0xfffc0012, 0xfffbffce, 0x003f0033,
+               0xff4e003f, 0x0106feb6, 0xff6e0276, 0xfeddfd56,
+               0x03b000cc, 0xfa740329, 0x04bff87f, 0xffaa095d,
+               0xf99ef995, 0x0bf9fed8, 0xf3590a1f, 0x06d2f05e,
+               0x03700e1b, 0xf2e4fa58, 0x110d0000,
+       }, {    // 15.3 MHz
+               0x0001ffff, 0xfff9000f, 0x0009ffc8, 0x00250059,
+               0xff5effee, 0x0132ff10, 0xfee30265, 0xffaafccf,
+               0x031101eb, 0xfa6001e8, 0x05bdf92f, 0xfe1b09b6,
+               0xfafaf852, 0x0b7e0055, 0xf2d50929, 0x07d3f086,
+               0x02a30e6c, 0xf329fa24, 0x110d0000,
+       }, {    // 15.4 MHz
+               0x00010001, 0xfff80009, 0x0015ffca, 0x00050074,
+               0xff81ff9f, 0x013dff82, 0xfe710221, 0x007cfc80,
+               0x024102ed, 0xfa940090, 0x0680fa1e, 0xfc9b09cd,
+               0xfc73f736, 0x0ad501d0, 0xf2740820, 0x08c9f0bd,
+               0x01d40eb9, 0xf371f9f1, 0x110d0000,
+       }, {    // 15.5 MHz
+               0x00000002, 0xfff80002, 0x001effd5, 0xffe5007f,
+               0xffb4ff5b, 0x01280000, 0xfe2401b0, 0x0146fc70,
+               0x014d03c6, 0xfb10ff32, 0x0701fb41, 0xfb3709a1,
+               0xfe00f644, 0x0a000345, 0xf2350708, 0x09b2f104,
+               0x01050eff, 0xf3baf9be, 0x110d0000,
+       }, {    // 15.6 MHz
+               0x00000003, 0xfff9fffb, 0x0022ffe6, 0xffc9007a,
+               0xfff0ff29, 0x00f2007e, 0xfe01011b, 0x01f6fc9e,
+               0x00440467, 0xfbccfdde, 0x0738fc90, 0xf9f70934,
+               0xff99f582, 0x090204b0, 0xf21a05e1, 0x0a8df15a,
+               0x00340f41, 0xf405f98b, 0x110d0000,
+       }, {    // 15.7 MHz
+               0x00000003, 0xfffcfff4, 0x0020fffa, 0xffb40064,
+               0x002fff11, 0x00a400f0, 0xfe0d006e, 0x0281fd09,
+               0xff3604c9, 0xfcbffca2, 0x0726fdfe, 0xf8e80888,
+               0x0134f4f3, 0x07e1060c, 0xf22304af, 0x0b59f1be,
+               0xff640f7d, 0xf452f959, 0x110d0000,
+       }, {    // 15.8 MHz
+               0x00000003, 0x0000fff0, 0x001a0010, 0xffaa0041,
+               0x0067ff13, 0x0043014a, 0xfe46ffb9, 0x02dbfda8,
+               0xfe3504e5, 0xfddcfb8d, 0x06c9ff7e, 0xf81107a2,
+               0x02c9f49a, 0x069f0753, 0xf2500373, 0x0c14f231,
+               0xfe930fb3, 0xf4a1f927, 0x110d0000,
+       }, {    // 15.9 MHz
+               0xffff0002, 0x0003ffee, 0x000f0023, 0xffac0016,
+               0x0093ff31, 0xffdc0184, 0xfea6ff09, 0x02fdfe70,
+               0xfd5104ba, 0xff15faac, 0x06270103, 0xf7780688,
+               0x044df479, 0x05430883, 0xf2a00231, 0x0cbef2b2,
+               0xfdc40fe3, 0xf4f2f8f5, 0x110d0000,
+       }, {    // 16.0 MHz
+               0xffff0001, 0x0006ffef, 0x00020031, 0xffbaffe8,
+               0x00adff66, 0xff790198, 0xff26fe6e, 0x02e5ff55,
+               0xfc99044a, 0x005bfa09, 0x0545027f, 0xf7230541,
+               0x05b8f490, 0x03d20997, 0xf31300eb, 0x0d55f341,
+               0xfcf6100e, 0xf544f8c3, 0x110d0000,
+       }
+};
+
 static void cx23885_dif_setup(struct i2c_client *client, u32 ifHz)
 {
        u64 pll_freq;
        u32 pll_freq_word;
+       const u32 *coeffs;
 
        v4l_dbg(1, cx25840_debug, client, "%s(%d)\n", __func__, ifHz);
 
@@ -2763,2889 +3554,26 @@ static void cx23885_dif_setup(struct i2c_client *client, u32 ifHz)
 
        v4l_dbg(1, cx25840_debug, client, "%s(%d) again\n", __func__, ifHz);
 
-       switch (ifHz) {
-       case 3000000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00080012);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x001e0024);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x001bfff8);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xffb4ff50);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfed8fe68);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe24fe34);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfebaffc7);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d031f);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x04f0065d);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x07010688);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x04c901d6);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xfe00f9d3);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf600f342);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf235f337);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf64efb22);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0105070f);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x0c460fce);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 3100000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00070012);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x00220032);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x00370026);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xfff0ff91);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff0efe7c);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe01fdcc);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe0afedb);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x00440224);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x0434060c);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x0738074e);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x06090361);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xff99fb39);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf6fef3b6);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf21af2a5);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf573fa33);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0034067d);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x0bfb0fb9);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 3200000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000000);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0004000e);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x00200038);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x004c004f);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x002fffdf);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff5cfeb6);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe0dfd92);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd7ffe03);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36010a);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x03410575);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x072607d2);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x071804d5);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0134fcb7);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf81ff451);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf223f22e);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf4a7f94b);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xff6405e8);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x0bae0fa4);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 3300000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000ffff);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00000008);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x001a0036);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0056006d);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00670030);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xffbdff10);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe46fd8d);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd25fd4f);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35ffe0);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x0224049f);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x06c9080e);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x07ef0627);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x02c9fe45);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf961f513);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf250f1d2);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf3ecf869);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xfe930552);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x0b5f0f8f);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 3400000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffd0001);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x000f002c);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0054007d);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x0093007c);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x0024ff82);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfea6fdbb);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd03fcca);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51feb9);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x00eb0392);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x06270802);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x08880750);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x044dffdb);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xfabdf5f8);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2a0f193);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf342f78f);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xfdc404b9);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x0b0e0f78);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 3500000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffafff9);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0002001b);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0046007d);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00ad00ba);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x00870000);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xff26fe1a);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd1bfc7e);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fda4);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xffa5025c);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x054507ad);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x08dd0847);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x05b80172);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xfc2ef6ff);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf313f170);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf2abf6bd);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xfcf6041f);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x0abc0f61);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 3600000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff3);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xfff50006);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x002f006c);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00b200e3);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x00dc007e);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xffb9fea0);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd6bfc71);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fcb1);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfe65010b);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x042d0713);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x08ec0906);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x07020302);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xfdaff823);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf3a7f16a);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf228f5f5);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xfc2a0384);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x0a670f4a);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 3700000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff7ffef);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe9fff1);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0010004d);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00a100f2);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x011a00f0);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x0053ff44);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdedfca2);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fbef);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd39ffae);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x02ea0638);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x08b50987);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x08230483);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xff39f960);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf45bf180);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf1b8f537);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xfb6102e7);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x0a110f32);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 3800000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9ffee);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe1ffdd);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xfff00024);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x007c00e5);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x013a014a);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x00e6fff8);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe98fd0f);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fb67);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc32fe54);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x01880525);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x083909c7);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x091505ee);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x00c7fab3);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf52df1b4);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf15df484);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xfa9b0249);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x09ba0f19);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 3900000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000000);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffbfff0);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffdeffcf);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffd1fff6);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x004800be);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x01390184);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x016300ac);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xff5efdb1);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fb23);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb5cfd0d);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x001703e4);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x077b09c4);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x09d2073c);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0251fc18);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf61cf203);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf118f3dc);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf9d801aa);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x09600eff);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 4000000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffefff4);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe1ffc8);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffbaffca);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x000b0082);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x01170198);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01c10152);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x0030fe7b);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fb24);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfac3fbe9);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfea5027f);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x0683097f);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a560867);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d2fd89);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf723f26f);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0e8f341);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf919010a);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x09060ee5);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 4100000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0002fffb);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe8ffca);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffacffa4);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xffcd0036);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x00d70184);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01f601dc);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x00ffff60);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51fb6d);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa6efaf5);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfd410103);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x055708f9);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a9e0969);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0543ff02);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf842f2f5);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0cef2b2);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf85e006b);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x08aa0ecb);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 4200000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00010003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00050003);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xfff3ffd3);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffaaff8b);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff95ffe5);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x0080014a);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01fe023f);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x01ba0050);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35fbf8);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa62fa3b);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfbf9ff7e);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x04010836);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0aa90a3d);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x069f007f);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf975f395);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0cbf231);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf7a9ffcb);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x084c0eaf);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 4300000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00010003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0008000a);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0000ffe4);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffb4ff81);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff6aff96);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x001c00f0);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01d70271);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x0254013b);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36fcbd);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa9ff9c5);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfadbfdfe);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x028c073b);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a750adf);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e101fa);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xfab8f44e);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0ddf1be);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf6f9ff2b);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x07ed0e94);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 4400000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0009000f);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x000efff8);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffc9ff87);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff52ff54);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xffb5007e);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01860270);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x02c00210);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x0044fdb2);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb22f997);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf9f2fc90);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x0102060f);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a050b4c);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0902036e);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xfc0af51e);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf106f15a);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf64efe8b);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x078d0e77);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 4500000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00080012);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0019000e);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5ff9e);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff4fff25);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff560000);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x0112023b);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f702c0);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x014dfec8);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfbe5f9b3);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf947fb41);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xff7004b9);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x095a0b81);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a0004d8);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xfd65f603);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf144f104);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf5aafdec);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x072b0e5a);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 4600000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00060012);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x00200022);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0005ffc1);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff61ff10);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff09ff82);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x008601d7);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f50340);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x0241fff0);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfcddfa19);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8e2fa1e);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfde30343);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x08790b7f);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad50631);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xfec7f6fc);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf198f0bd);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf50dfd4e);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x06c90e3d);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 4700000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000ffff);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0003000f);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x00220030);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0025ffed);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff87ff15);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfed6ff10);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xffed014c);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x02b90386);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x03110119);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfdfefac4);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8c6f92f);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfc6701b7);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x07670b44);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7e0776);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x002df807);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf200f086);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf477fcb1);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x06650e1e);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 4800000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xffff0009);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x001e0038);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x003f001b);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xffbcff36);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec2feb6);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xff5600a5);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x0248038d);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b00232);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xff39fbab);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8f4f87f);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfb060020);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x062a0ad2);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf908a3);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0192f922);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf27df05e);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf3e8fc14);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x06000e00);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 4900000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffc0002);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x00160037);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x00510046);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xfff9ff6d);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfed0fe7c);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfecefff0);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x01aa0356);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x0413032b);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x007ffcc5);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf96cf812);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf9cefe87);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x04c90a2c);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c4309b4);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x02f3fa4a);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf30ef046);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf361fb7a);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x059b0de0);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 5000000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9fffa);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x000a002d);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x00570067);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x0037ffb5);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfefffe68);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe62ff3d);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x00ec02e3);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x043503f6);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x01befe05);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa27f7ee);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf8c6fcf8);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x034c0954);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c5c0aa4);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x044cfb7e);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf3b1f03f);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf2e2fae1);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x05340dc0);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 5100000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff4);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xfffd001e);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0051007b);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x006e0006);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff48fe7c);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe1bfe9a);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x001d023e);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x04130488);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x02e6ff5b);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfb1ef812);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7f7fb7f);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x01bc084e);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c430b72);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x059afcba);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf467f046);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf26cfa4a);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x04cd0da0);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 5200000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8ffef);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xfff00009);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x003f007f);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00980056);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xffa5feb6);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe00fe15);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xff4b0170);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b004d7);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x03e800b9);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfc48f87f);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf768fa23);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0022071f);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf90c1b);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x06dafdfd);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf52df05e);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf1fef9b5);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x04640d7f);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 5300000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000ffff);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9ffee);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe6fff3);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x00250072);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00af009c);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x000cff10);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe13fdb8);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe870089);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x031104e1);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x04b8020f);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfd98f92f);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf71df8f0);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xfe8805ce);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7e0c9c);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0808ff44);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf603f086);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf19af922);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x03fb0d5e);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 5400000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffcffef);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe0ffe0);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x00050056);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00b000d1);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x0071ff82);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe53fd8c);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfddfff99);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x024104a3);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x054a034d);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xff01fa1e);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf717f7ed);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xfcf50461);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad50cf4);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0921008d);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf6e7f0bd);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf13ff891);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x03920d3b);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 5500000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffffff3);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffdeffd1);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5002f);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x009c00ed);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x00cb0000);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfebafd94);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd61feb0);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d0422);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x05970464);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x0074fb41);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf759f721);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xfb7502de);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a000d21);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0a2201d4);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf7d9f104);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf0edf804);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x03280d19);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 5600000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00010003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0003fffa);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe3ffc9);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffc90002);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x007500ef);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x010e007e);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xff3dfdcf);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd16fddd);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x00440365);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x059b0548);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x01e3fc90);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7dff691);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xfa0f014d);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x09020d23);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0b0a0318);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf8d7f15a);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf0a5f779);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x02bd0cf6);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 5700000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00010003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00060001);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffecffc9);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffb4ffd4);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x004000d5);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x013600f0);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xffd3fe39);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd04fd31);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xff360277);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x055605ef);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x033efdfe);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf8a5f642);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf8cbffb6);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e10cfb);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0bd50456);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf9dff1be);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf067f6f2);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x02520cd2);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 5800000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00080009);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xfff8ffd2);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffaaffac);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x000200a3);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x013c014a);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x006dfec9);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd2bfcb7);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe350165);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x04cb0651);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x0477ff7e);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf9a5f635);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf7b1fe20);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x069f0ca8);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0c81058b);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xfaf0f231);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf033f66d);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x01e60cae);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 5900000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0009000e);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0005ffe1);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffacff90);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xffc5005f);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x01210184);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x00fcff72);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd8afc77);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51003f);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x04020669);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x05830103);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfad7f66b);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf6c8fc93);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x05430c2b);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d0d06b5);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xfc08f2b2);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf00af5ec);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x017b0c89);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 6000000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00070012);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0012fff5);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffbaff82);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff8e000f);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x00e80198);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01750028);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe18fc75);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99ff15);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x03050636);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x0656027f);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfc32f6e2);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf614fb17);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d20b87);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d7707d2);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xfd26f341);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xefeaf56f);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x010f0c64);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 6100000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0000);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00050012);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x001c000b);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffd1ff84);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff66ffbe);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x00960184);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01cd00da);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfeccfcb2);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fdf9);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x01e005bc);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x06e703e4);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfdabf798);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf599f9b3);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x02510abd);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0dbf08df);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xfe48f3dc);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xefd5f4f6);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x00a20c3e);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 6200000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0002000f);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0021001f);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xfff0ff97);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff50ff74);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x0034014a);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01fa0179);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xff97fd2a);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fcfa);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x00a304fe);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x07310525);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xff37f886);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf55cf86e);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x00c709d0);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0de209db);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xff6df484);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xefcbf481);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0x00360c18);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 6300000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffe000a);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0021002f);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0010ffb8);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff50ff3b);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xffcc00f0);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01fa01fa);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x0069fdd4);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fc26);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xff5d0407);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x07310638);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x00c9f9a8);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf55cf74e);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xff3908c3);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0de20ac3);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0093f537);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xefcbf410);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xffca0bf2);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 6400000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffb0003);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x001c0037);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x002fffe2);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff66ff17);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff6a007e);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01cd0251);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x0134fea5);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fb8b);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfe2002e0);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x06e70713);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x0255faf5);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf599f658);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xfdaf0799);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0dbf0b96);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x01b8f5f5);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xefd5f3a3);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xff5e0bca);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 6500000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9fffb);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x00120037);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x00460010);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff8eff0f);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff180000);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01750276);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x01e8ff8d);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fb31);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfcfb0198);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x065607ad);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x03cefc64);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf614f592);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xfc2e0656);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d770c52);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x02daf6bd);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xefeaf33b);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xfef10ba3);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 6600000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff7fff5);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0005002f);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0054003c);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xffc5ff22);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfedfff82);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x00fc0267);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x0276007e);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51fb1c);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfbfe003e);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x05830802);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x0529fdec);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf6c8f4fe);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xfabd04ff);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d0d0cf6);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x03f8f78f);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf00af2d7);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xfe850b7b);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 6700000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000ffff);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff0);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xfff80020);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x00560060);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x0002ff4e);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec4ff10);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x006d0225);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x02d50166);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35fb4e);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb35fee1);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x0477080e);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x065bff82);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf7b1f4a0);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf9610397);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0c810d80);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0510f869);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf033f278);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xfe1a0b52);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 6800000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00010000);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffaffee);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffec000c);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x004c0078);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x0040ff8e);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfecafeb6);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xffd301b6);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x02fc0235);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36fbc5);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfaaafd90);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x033e07d2);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x075b011b);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf8cbf47a);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf81f0224);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0bd50def);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0621f94b);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf067f21e);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xfdae0b29);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 6900000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffdffef);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe3fff6);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0037007f);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x0075ffdc);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfef2fe7c);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xff3d0122);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x02ea02dd);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x0044fc79);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa65fc5d);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x01e3074e);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x082102ad);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xfa0ff48c);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf6fe00a9);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0b0a0e43);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0729fa33);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf0a5f1c9);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xfd430b00);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 7000000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0001fff3);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffdeffe2);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x001b0076);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x009c002d);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff35fe68);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfeba0076);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x029f0352);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x014dfd60);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa69fb53);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x00740688);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x08a7042d);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xfb75f4d6);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf600ff2d);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0a220e7a);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0827fb22);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf0edf17a);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xfcd80ad6);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 7100000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0004fff9);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe0ffd2);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xfffb005e);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00b0007a);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff8ffe7c);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe53ffc1);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x0221038c);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x0241fe6e);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfab6fa80);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xff010587);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x08e90590);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xfcf5f556);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf52bfdb3);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x09210e95);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0919fc15);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf13ff12f);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xfc6e0aab);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 7200000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00070000);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe6ffc9);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffdb0039);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00af00b8);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfff4feb6);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe13ff10);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x01790388);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x0311ff92);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb48f9ed);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfd980453);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x08e306cd);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xfe88f60a);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf482fc40);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x08080e93);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x09fdfd0c);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf19af0ea);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xfc050a81);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 7300000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00080008);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xfff0ffc9);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffc1000d);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x009800e2);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x005bff10);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe00fe74);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x00b50345);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b000bc);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc18f9a1);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfc4802f9);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x089807dc);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0022f6f0);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf407fada);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x06da0e74);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0ad3fe06);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf1fef0ab);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xfb9c0a55);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 7400000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000001);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0008000e);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xfffdffd0);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffafffdf);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x006e00f2);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x00b8ff82);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe1bfdf8);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xffe302c8);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x041301dc);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd1af99e);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfb1e0183);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x080908b5);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x01bcf801);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3bdf985);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x059a0e38);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0b99ff03);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf26cf071);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xfb330a2a);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 7500000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0000);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00070011);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x000affdf);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffa9ffb5);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x003700e6);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x01010000);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe62fda8);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xff140219);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x043502e1);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfe42f9e6);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa270000);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x073a0953);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x034cf939);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3a4f845);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x044c0de1);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0c4f0000);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf2e2f03c);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xfacc09fe);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 7600000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00040012);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0016fff3);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffafff95);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xfff900c0);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x0130007e);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfecefd89);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe560146);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x041303bc);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xff81fa76);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf96cfe7d);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x063209b1);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x04c9fa93);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3bdf71e);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x02f30d6e);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0cf200fd);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf361f00e);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xfa6509d1);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 7700000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00010010);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x001e0008);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffc1ff84);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xffbc0084);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x013e00f0);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xff56fd9f);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdb8005c);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b00460);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x00c7fb45);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8f4fd07);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x04fa09ce);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x062afc07);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf407f614);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x01920ce0);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0d8301fa);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf3e8efe5);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xfa0009a4);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 7800000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffd000b);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0022001d);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffdbff82);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff870039);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x012a014a);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xffedfde7);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd47ff6b);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x031104c6);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x0202fc4c);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8c6fbad);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x039909a7);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0767fd8e);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf482f52b);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x002d0c39);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0e0002f4);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf477efc2);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf99b0977);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 7900000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffa0004);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0020002d);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xfffbff91);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff61ffe8);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x00f70184);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x0086fe5c);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd0bfe85);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x024104e5);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x0323fd7d);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8e2fa79);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x021d093f);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0879ff22);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf52bf465);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xfec70b79);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0e6803eb);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf50defa5);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf937094a);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 8000000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fffd);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x00190036);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x001bffaf);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff4fff99);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x00aa0198);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x0112fef3);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd09fdb9);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d04be);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x041bfecc);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf947f978);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x00900897);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x095a00b9);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf600f3c5);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xfd650aa3);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0ebc04de);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf5aaef8e);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf8d5091c);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 8100000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000ffff);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff7fff6);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x000e0038);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0037ffd7);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff52ff56);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x004b0184);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x0186ffa1);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd40fd16);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x00440452);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x04de0029);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf9f2f8b2);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfefe07b5);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a05024d);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf6fef34d);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xfc0a09b8);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0efa05cd);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf64eef7d);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf87308ed);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 8200000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00010000);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff0);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x00000031);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x004c0005);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff6aff27);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xffe4014a);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01d70057);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdacfca6);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xff3603a7);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x05610184);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfadbf82e);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfd74069f);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a7503d6);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf81ff2ff);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xfab808b9);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0f2306b5);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf6f9ef72);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf81308bf);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 8300000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffbffee);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xfff30022);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x00560032);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff95ff10);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff8000f0);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01fe0106);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe46fc71);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe3502c7);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x059e02ce);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfbf9f7f2);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfbff055b);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0aa9054c);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf961f2db);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf97507aa);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0f350797);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf7a9ef6d);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf7b40890);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 8400000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffeffee);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe8000f);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x00540058);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xffcdff14);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff29007e);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01f6019e);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xff01fc7c);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd5101bf);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x059203f6);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfd41f7fe);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfaa903f3);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a9e06a9);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xfabdf2e2);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf842068b);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0f320871);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf85eef6e);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf7560860);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 8500000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0002fff2);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe1fff9);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x00460073);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x000bff34);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfee90000);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01c10215);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xffd0fcc5);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99009d);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x053d04f1);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfea5f853);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf97d0270);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a5607e4);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xfc2ef314);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf723055f);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0f180943);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf919ef75);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf6fa0830);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 8600000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0005fff8);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffdeffe4);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x002f007f);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x0048ff6b);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec7ff82);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x0163025f);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x00a2fd47);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17ff73);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x04a405b2);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x0017f8ed);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf88500dc);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x09d208f9);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xfdaff370);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf61c0429);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0ee80a0b);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xf9d8ef82);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf6a00800);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 8700000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0007ffff);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe1ffd4);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0010007a);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x007cffb2);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec6ff10);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x00e60277);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x0168fdf9);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fe50);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x03ce0631);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x0188f9c8);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7c7ff43);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x091509e3);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xff39f3f6);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf52d02ea);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0ea30ac9);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xfa9bef95);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf64607d0);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 8800000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00090007);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe9ffca);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xfff00065);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00a10003);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfee6feb6);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x0053025b);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x0213fed0);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3fd46);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x02c70668);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x02eafadb);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf74bfdae);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x08230a9c);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x00c7f4a3);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf45b01a6);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0e480b7c);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xfb61efae);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf5ef079f);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 8900000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0000);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0008000d);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xfff5ffc8);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffd10043);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00b20053);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff24fe7c);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xffb9020c);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x0295ffbb);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fc64);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x019b0654);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x042dfc1c);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf714fc2a);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x07020b21);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0251f575);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf3a7005e);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0dd80c24);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xfc2aefcd);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf599076e);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 9000000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00060011);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0002ffcf);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffba0018);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00ad009a);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff79fe68);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xff260192);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x02e500ab);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fbb6);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x005b05f7);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x0545fd81);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf723fabf);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x05b80b70);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d2f669);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf313ff15);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0d550cbf);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xfcf6eff2);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf544073d);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 9100000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00030012);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x000fffdd);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffacffea);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x009300cf);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xffdcfe7c);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfea600f7);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x02fd0190);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51fb46);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xff150554);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x0627fefd);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf778f978);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x044d0b87);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0543f77d);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2a0fdcf);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0cbe0d4e);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xfdc4f01d);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf4f2070b);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 9200000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00000010);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x001afff0);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffaaffbf);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x006700ed);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x0043feb6);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe460047);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x02db0258);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35fb1b);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfddc0473);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x06c90082);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf811f85e);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x02c90b66);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x069ff8ad);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf250fc8d);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0c140dcf);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xfe93f04d);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf4a106d9);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 9300000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffc000c);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x00200006);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffb4ff9c);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x002f00ef);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x00a4ff10);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe0dff92);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x028102f7);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36fb37);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfcbf035e);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x07260202);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf8e8f778);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x01340b0d);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e1f9f4);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf223fb51);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0b590e42);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xff64f083);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf45206a7);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 9400000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff90005);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0022001a);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffc9ff86);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xfff000d7);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x00f2ff82);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe01fee5);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x01f60362);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x0044fb99);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfbcc0222);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x07380370);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf9f7f6cc);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xff990a7e);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0902fb50);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf21afa1f);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0a8d0ea6);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0034f0bf);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf4050675);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 9500000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fffe);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x001e002b);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5ff81);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xffb400a5);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x01280000);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe24fe50);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x01460390);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x014dfc3a);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb1000ce);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x070104bf);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfb37f65f);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xfe0009bc);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a00fcbb);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf235f8f8);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x09b20efc);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0105f101);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf3ba0642);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 9600000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0001ffff);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff7);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x00150036);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0005ff8c);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff810061);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x013d007e);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe71fddf);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x007c0380);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x0241fd13);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa94ff70);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x068005e2);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfc9bf633);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xfc7308ca);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad5fe30);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf274f7e0);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x08c90f43);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x01d4f147);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf371060f);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 9700000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9fff1);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x00090038);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0025ffa7);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff5e0012);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x013200f0);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfee3fd9b);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xffaa0331);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x0311fe15);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa60fe18);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x05bd06d1);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfe1bf64a);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xfafa07ae);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7effab);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2d5f6d7);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x07d30f7a);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x02a3f194);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf32905dc);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 9800000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffcffee);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xfffb0032);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x003fffcd);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff4effc1);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x0106014a);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xff6efd8a);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfedd02aa);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b0ff34);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa74fcd7);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x04bf0781);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xffaaf6a3);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf99e066b);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf90128);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf359f5e1);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x06d20fa2);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0370f1e5);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf2e405a8);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 9900000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xffffffee);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffef0024);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0051fffa);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff54ff77);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x00be0184);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x0006fdad);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe2701f3);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x0413005e);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfad1fbba);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x039007ee);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x013bf73d);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf868050a);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c4302a1);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf3fdf4fe);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x05c70fba);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x043bf23c);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf2a10575);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 10000000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0003fff1);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe50011);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x00570027);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff70ff3c);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x00620198);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x009efe01);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd95011a);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x04350183);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb71fad0);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x023c0812);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x02c3f811);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf75e0390);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c5c0411);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf4c1f432);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x04b30fc1);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0503f297);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf2610541);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 10100000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0006fff7);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffdffffc);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x00510050);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff9dff18);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfffc0184);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x0128fe80);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd32002e);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x04130292);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc4dfa21);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x00d107ee);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x0435f91c);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf6850205);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c430573);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf5a1f37d);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x03990fba);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x05c7f2f8);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf222050d);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 10200000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0008fffe);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffdfffe7);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x003f006e);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xffd6ff0f);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff96014a);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x0197ff1f);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd05ff3e);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b0037c);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd59f9b7);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xff5d0781);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x0585fa56);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf5e4006f);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf906c4);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf69df2e0);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x02790fa2);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0688f35d);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf1e604d8);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 10300000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0001);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00090005);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe4ffd6);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0025007e);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x0014ff20);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff3c00f0);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01e1ffd0);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd12fe5c);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x03110433);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfe88f996);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfdf106d1);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x06aafbb7);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf57efed8);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7e07ff);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf7b0f25e);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x01560f7a);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0745f3c7);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf1ac04a4);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 10400000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0008000c);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffedffcb);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0005007d);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x0050ff4c);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfef6007e);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01ff0086);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd58fd97);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x024104ad);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xffcaf9c0);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfc9905e2);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x079afd35);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf555fd46);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad50920);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf8d9f1f6);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x00310f43);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x07fdf435);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf174046f);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 10500000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xfffffffe);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00050011);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xfffaffc8);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5006b);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x0082ff8c);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfecc0000);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01f00130);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdd2fcfc);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d04e3);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x010efa32);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfb6404bf);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x084efec5);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf569fbc2);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a000a23);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xfa15f1ab);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xff0b0efc);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x08b0f4a7);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf13f043a);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 10600000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00020012);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0007ffcd);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffc9004c);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00a4ffd9);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec3ff82);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01b401c1);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe76fc97);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x004404d2);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x0245fae8);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa5f0370);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x08c1005f);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf5bcfa52);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x09020b04);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xfb60f17b);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xfde70ea6);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x095df51e);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf10c0405);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 10700000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xffff0011);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0014ffdb);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffb40023);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00b2002a);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfedbff10);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x0150022d);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xff38fc6f);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36047b);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x035efbda);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf9940202);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x08ee01f5);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf649f8fe);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e10bc2);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xfcb6f169);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xfcc60e42);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0a04f599);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf0db03d0);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 10800000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffb000d);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x001dffed);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffaafff5);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00aa0077);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff13feb6);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x00ce026b);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x000afc85);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe3503e3);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x044cfcfb);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf90c0082);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x08d5037f);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf710f7cc);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x069f0c59);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xfe16f173);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xfbaa0dcf);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0aa5f617);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf0ad039b);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 10900000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff90006);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x00210003);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffacffc8);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x008e00b6);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff63fe7c);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x003a0275);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x00dafcda);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd510313);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x0501fe40);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8cbfefd);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x087604f0);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf80af6c2);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x05430cc8);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xff7af19a);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xfa940d4e);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0b3ff699);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf0810365);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 11000000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0001ffff);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8ffff);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x00210018);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffbaffa3);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x006000e1);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xffc4fe68);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xffa0024b);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x019afd66);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc990216);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x0575ff99);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8d4fd81);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x07d40640);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf932f5e6);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d20d0d);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x00dff1de);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf9860cbf);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0bd1f71e);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf058032f);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 11100000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00010000);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff8);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x001b0029);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffd1ff8a);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x002600f2);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x002cfe7c);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xff0f01f0);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x023bfe20);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc1700fa);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x05a200f7);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf927fc1c);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x06f40765);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xfa82f53b);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x02510d27);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0243f23d);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf8810c24);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0c5cf7a7);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf03102fa);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 11200000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00010002);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffafff2);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x00110035);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xfff0ff81);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xffe700e7);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x008ffeb6);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe94016d);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x02b0fefb);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3ffd1);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x05850249);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf9c1fadb);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x05de0858);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xfbf2f4c4);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x00c70d17);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x03a0f2b8);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf7870b7c);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0cdff833);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf00d02c4);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 11300000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffdffee);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x00040038);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0010ff88);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xffac00c2);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x00e2ff10);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe3900cb);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f1ffe9);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3feaa);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x05210381);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa9cf9c8);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x04990912);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xfd7af484);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xff390cdb);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x04f4f34d);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf69a0ac9);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0d5af8c1);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xefec028e);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 11400000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0000ffee);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xfff60033);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x002fff9f);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff7b0087);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x011eff82);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe080018);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f900d8);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17fd96);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x04790490);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfbadf8ed);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x032f098e);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xff10f47d);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xfdaf0c75);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x063cf3fc);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf5ba0a0b);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0dccf952);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xefcd0258);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 11500000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0004fff1);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffea0026);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0046ffc3);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff5a003c);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x013b0000);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe04ff63);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x02c801b8);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fca6);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x0397056a);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfcecf853);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x01ad09c9);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x00acf4ad);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xfc2e0be7);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0773f4c2);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf4e90943);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0e35f9e6);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xefb10221);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 11600000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0007fff6);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe20014);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0054ffee);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff4effeb);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x0137007e);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe2efebb);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x0260027a);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51fbe6);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x02870605);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfe4af7fe);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x001d09c1);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0243f515);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xfabd0b32);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0897f59e);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf4280871);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0e95fa7c);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xef9701eb);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 11700000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0001);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0008fffd);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffdeffff);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0056001d);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff57ff9c);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x011300f0);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe82fe2e);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x01ca0310);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35fb62);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x0155065a);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xffbaf7f2);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfe8c0977);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x03cef5b2);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf9610a58);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x09a5f68f);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf3790797);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0eebfb14);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xef8001b5);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 11800000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0000);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00080004);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe0ffe9);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x004c0047);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff75ff58);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x00d1014a);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfef9fdc8);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x0111036f);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36fb21);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x00120665);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x012df82e);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfd0708ec);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0542f682);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf81f095c);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0a9af792);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf2db06b5);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0f38fbad);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xef6c017e);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 11900000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0007000b);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe7ffd8);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x00370068);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xffa4ff28);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x00790184);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xff87fd91);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x00430392);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x0044fb26);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfece0626);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x0294f8b2);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfb990825);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0698f77f);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf6fe0842);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0b73f8a7);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf25105cd);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0f7bfc48);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xef5a0148);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 12000000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00050010);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xfff2ffcc);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x001b007b);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xffdfff10);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x00140198);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x0020fd8e);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xff710375);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x014dfb73);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd9a059f);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x03e0f978);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfa4e0726);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x07c8f8a7);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf600070c);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0c2ff9c9);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf1db04de);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0fb4fce5);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xef4b0111);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 12100000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00010012);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffffffc8);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xfffb007e);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x001dff14);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xffad0184);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x00b7fdbe);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfea9031b);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x0241fc01);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc8504d6);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x0504fa79);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf93005f6);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x08caf9f2);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf52b05c0);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0ccbfaf9);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf17903eb);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0fe3fd83);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xef3f00db);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 12200000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffe0011);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x000cffcc);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffdb0071);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x0058ff32);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff4f014a);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x013cfe1f);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdfb028a);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x0311fcc9);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb9d03d6);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x05f4fbad);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf848049d);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0999fb5b);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf4820461);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d46fc32);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf12d02f4);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x1007fe21);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xef3600a4);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 12300000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffa000e);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0017ffd9);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffc10055);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x0088ff68);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff0400f0);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01a6fea7);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd7501cc);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b0fdc0);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfaef02a8);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x06a7fd07);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf79d0326);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a31fcda);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf40702f3);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d9ffd72);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0f601fa);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x1021fec0);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xef2f006d);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 12400000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0001ffff);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff80007);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x001fffeb);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffaf002d);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00a8ffb0);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfed3007e);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01e9ff4c);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd2000ee);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x0413fed8);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa82015c);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x0715fe7d);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7340198);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a8dfe69);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3bd017c);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0dd5feb8);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0d500fd);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x1031ff60);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xef2b0037);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 12500000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00010000);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff70000);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x00220000);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffa90000);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00b30000);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec20000);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x02000000);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd030000);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x04350000);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa5e0000);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x073b0000);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7110000);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0aac0000);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3a40000);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0de70000);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0c90000);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x10360000);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xef290000);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 12600000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff8fff9);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x001f0015);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffafffd3);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00a80050);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfed3ff82);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01e900b4);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd20ff12);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x04130128);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa82fea4);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x07150183);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf734fe68);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a8d0197);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf3bdfe84);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0dd50148);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0d5ff03);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x103100a0);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xef2bffc9);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 12700000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffafff2);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x00170027);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffc1ffab);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00880098);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff04ff10);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01a60159);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd75fe34);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b00240);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfaeffd58);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x06a702f9);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf79dfcda);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0a310326);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf407fd0d);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d9f028e);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf0f6fe06);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x10210140);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xef2fff93);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 12800000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffeffef);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x000c0034);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffdbff8f);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x005800ce);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff4ffeb6);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x013c01e1);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdfbfd76);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x03110337);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb9dfc2a);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x05f40453);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf848fb63);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x099904a5);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf482fb9f);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0d4603ce);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf12dfd0c);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x100701df);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xef36ff5c);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 12900000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0001ffee);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffff0038);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xfffbff82);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x001d00ec);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xffadfe7c);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x00b70242);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfea9fce5);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x024103ff);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc85fb2a);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x05040587);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf930fa0a);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x08ca060e);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf52bfa40);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0ccb0507);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf179fc15);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0fe3027d);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xef3fff25);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 13000000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0005fff0);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xfff20034);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x001bff85);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xffdf00f0);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x0014fe68);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x00200272);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xff71fc8b);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d048d);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd9afa61);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x03e00688);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfa4ef8da);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x07c80759);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf600f8f4);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0c2f0637);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf1dbfb22);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0fb4031b);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xef4bfeef);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 13100000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0001);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0007fff5);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe70028);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0037ff98);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xffa400d8);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x0079fe7c);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xff87026f);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x0043fc6e);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x004404da);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfecef9da);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x0294074e);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfb99f7db);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x06980881);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf6fef7be);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0b730759);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf251fa33);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0f7b03b8);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xef5afeb8);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 13200000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0000);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0008fffc);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe00017);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x004cffb9);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff7500a8);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x00d1feb6);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfef90238);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x0111fc91);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xff3604df);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x0012f99b);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x012d07d2);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfd07f714);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0542097e);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf81ff6a4);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x0a9a086e);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf2dbf94b);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0f380453);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xef6cfe82);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 13300000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00080003);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffde0001);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0056ffe3);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff570064);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x0113ff10);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe8201d2);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x01cafcf0);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35049e);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x0155f9a6);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xffba080e);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfe8cf689);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x03ce0a4e);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xf961f5a8);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x09a50971);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf379f869);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0eeb04ec);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xef80fe4b);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 13400000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0007000a);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe2ffec);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x00540012);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff4e0015);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x0137ff82);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe2e0145);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x0260fd86);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51041a);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x0287f9fb);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfe4a0802);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x001df63f);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x02430aeb);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xfabdf4ce);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x08970a62);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf428f78f);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0e950584);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xef97fe15);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 13500000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0004000f);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffeaffda);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0046003d);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff5affc4);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x013b0000);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe04009d);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x02c8fe48);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99035a);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x0397fa96);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfcec07ad);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x01adf637);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x00ac0b53);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xfc2ef419);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x07730b3e);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf4e9f6bd);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0e35061a);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xefb1fddf);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 13600000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00000012);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xfff6ffcd);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x002f0061);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff7bff79);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x011e007e);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe08ffe8);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f9ff28);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17026a);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x0479fb70);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfbad0713);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x032ff672);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xff100b83);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xfdaff38b);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x063c0c04);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf5baf5f5);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0dcc06ae);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xefcdfda8);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 13700000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffd0012);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0004ffc8);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x00100078);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xffacff3e);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x00e200f0);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe39ff35);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x02f10017);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd30156);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x0521fc7f);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa9c0638);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x0499f6ee);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xfd7a0b7c);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0xff39f325);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x04f40cb3);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf69af537);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0d5a073f);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xefecfd72);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 13800000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0001fffe);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffa000e);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0011ffcb);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xfff0007f);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xffe7ff19);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x008f014a);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe94fe93);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x02b00105);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfbd3002f);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x0585fdb7);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf9c10525);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x05def7a8);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xfbf20b3c);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x00c7f2e9);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x03a00d48);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf787f484);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0cdf07cd);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf00dfd3c);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 13900000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00010000);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff80008);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x001bffd7);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffd10076);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x0026ff0e);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x002c0184);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xff0ffe10);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x023b01e0);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc17ff06);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x05a2ff09);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf92703e4);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x06f4f89b);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xfa820ac5);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0251f2d9);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x02430dc3);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf881f3dc);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0c5c0859);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf031fd06);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 14000000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff80001);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0021ffe8);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffba005d);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x0060ff1f);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xffc40198);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xffa0fdb5);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x019a029a);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99fdea);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x05750067);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8d4027f);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x07d4f9c0);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf9320a1a);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d2f2f3);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0x00df0e22);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xf986f341);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0bd108e2);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf058fcd1);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 14100000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9fffa);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0021fffd);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffac0038);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x008eff4a);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff630184);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x003afd8b);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x00da0326);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd51fced);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x050101c0);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf8cb0103);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x0876fb10);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf80a093e);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0543f338);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xff7a0e66);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xfa94f2b2);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0b3f0967);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf081fc9b);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 14200000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffbfff3);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x001d0013);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffaa000b);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00aaff89);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff13014a);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x00cefd95);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x000a037b);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe35fc1d);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x044c0305);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf90cff7e);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x08d5fc81);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf7100834);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x069ff3a7);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xfe160e8d);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xfbaaf231);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0aa509e9);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf0adfc65);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 14300000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xffffffef);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x00140025);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffb4ffdd);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00b2ffd6);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfedb00f0);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x0150fdd3);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xff380391);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xff36fb85);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x035e0426);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xf994fdfe);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x08eefe0b);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf6490702);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e1f43e);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xfcb60e97);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xfcc6f1be);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x0a040a67);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf0dbfc30);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 14400000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0002ffee);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x00070033);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffc9ffb4);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00a40027);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfec3007e);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01b4fe3f);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe760369);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x0044fb2e);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x02450518);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfa5ffc90);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x08c1ffa1);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf5bc05ae);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0902f4fc);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xfb600e85);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xfde7f15a);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x095d0ae2);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf10cfbfb);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 14500000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0002);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0005ffef);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xfffa0038);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5ff95);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00820074);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfecc0000);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01f0fed0);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfdd20304);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x014dfb1d);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x010e05ce);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfb64fb41);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x084e013b);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf569043e);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a00f5dd);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xfa150e55);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0xff0bf104);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x08b00b59);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf13ffbc6);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 14600000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0001);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0008fff4);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffed0035);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0005ff83);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x005000b4);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfef6ff82);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01ffff7a);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd580269);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x0241fb53);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xffca0640);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfc99fa1e);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x079a02cb);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf55502ba);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad5f6e0);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf8d90e0a);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0031f0bd);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x07fd0bcb);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf174fb91);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 14700000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xffffffff);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0009fffb);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe4002a);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0025ff82);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x001400e0);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff3cff10);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01e10030);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd1201a4);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x0311fbcd);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfe88066a);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xfdf1f92f);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x06aa0449);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf57e0128);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7ef801);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf7b00da2);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0156f086);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x07450c39);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf1acfb5c);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 14800000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffe);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00080002);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffdf0019);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x003fff92);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xffd600f1);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff96feb6);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x019700e1);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd0500c2);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b0fc84);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfd590649);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0xff5df87f);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x058505aa);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf5e4ff91);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf9f93c);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf69d0d20);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0279f05e);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x06880ca3);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf1e6fb28);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 14900000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x00060009);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffdf0004);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0051ffb0);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff9d00e8);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xfffcfe7c);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x01280180);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd32ffd2);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x0413fd6e);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfc4d05df);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x00d1f812);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x043506e4);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf685fdfb);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c43fa8d);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf5a10c83);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0399f046);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x05c70d08);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf222faf3);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 15000000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0003000f);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffe5ffef);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x0057ffd9);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff7000c4);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x0062fe68);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x009e01ff);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfd95fee6);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x0435fe7d);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb710530);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x023cf7ee);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x02c307ef);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf75efc70);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c5cfbef);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf4c10bce);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x04b3f03f);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x05030d69);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf261fabf);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 15100000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0000fffd);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xffff0012);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xffefffdc);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x00510006);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff540089);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x00befe7c);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0x00060253);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfe27fe0d);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x0413ffa2);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfad10446);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x0390f812);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0x013b08c3);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf868faf6);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0c43fd5f);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf3fd0b02);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x05c7f046);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x043b0dc4);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf2a1fa8b);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 15200000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0001fffe);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffc0012);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0xfffbffce);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x003f0033);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff4e003f);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x0106feb6);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xff6e0276);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xfeddfd56);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x03b000cc);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa740329);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x04bff87f);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xffaa095d);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xf99ef995);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0bf9fed8);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf3590a1f);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x06d2f05e);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x03700e1b);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf2e4fa58);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 15300000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x0001ffff);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9000f);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0009ffc8);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x00250059);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff5effee);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x0132ff10);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfee30265);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0xffaafccf);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x031101eb);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa6001e8);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x05bdf92f);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfe1b09b6);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xfafaf852);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0b7e0055);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2d50929);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x07d3f086);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x02a30e6c);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf329fa24);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 15400000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00010001);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff80009);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0015ffca);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0x00050074);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xff81ff9f);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x013dff82);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe710221);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x007cfc80);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x024102ed);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfa940090);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x0680fa1e);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfc9b09cd);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xfc73f736);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0ad501d0);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2740820);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x08c9f0bd);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x01d40eb9);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf371f9f1);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 15500000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000002);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff80002);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x001effd5);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffe5007f);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xffb4ff5b);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x01280000);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe2401b0);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x0146fc70);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x014d03c6);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfb10ff32);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x0701fb41);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xfb3709a1);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xfe00f644);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x0a000345);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2350708);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x09b2f104);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x01050eff);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf3baf9be);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 15600000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfff9fffb);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0022ffe6);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffc9007a);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0xfff0ff29);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x00f2007e);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe01011b);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x01f6fc9e);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0x00440467);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfbccfdde);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x0738fc90);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf9f70934);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0xff99f582);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x090204b0);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf21a05e1);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0a8df15a);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0x00340f41);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf405f98b);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 15700000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0xfffcfff4);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x0020fffa);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffb40064);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x002fff11);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x00a400f0);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe0d006e);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x0281fd09);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xff3604c9);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfcbffca2);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x0726fdfe);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf8e80888);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x0134f4f3);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x07e1060c);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf22304af);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0b59f1be);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xff640f7d);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf452f959);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 15800000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0x00000003);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0000fff0);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x001a0010);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffaa0041);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x0067ff13);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0x0043014a);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfe46ffb9);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x02dbfda8);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfe3504e5);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xfddcfb8d);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x06c9ff7e);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf81107a2);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x02c9f49a);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x069f0753);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2500373);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0c14f231);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xfe930fb3);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf4a1f927);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 15900000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0002);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0003ffee);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x000f0023);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffac0016);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x0093ff31);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xffdc0184);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xfea6ff09);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x02fdfe70);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfd5104ba);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0xff15faac);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x06270103);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7780688);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x044df479);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x05430883);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf2a00231);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0cbef2b2);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xfdc40fe3);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf4f2f8f5);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-
-       case 16000000:
-               cx25840_write4(client, DIF_BPF_COEFF01, 0xffff0001);
-               cx25840_write4(client, DIF_BPF_COEFF23, 0x0006ffef);
-               cx25840_write4(client, DIF_BPF_COEFF45, 0x00020031);
-               cx25840_write4(client, DIF_BPF_COEFF67, 0xffbaffe8);
-               cx25840_write4(client, DIF_BPF_COEFF89, 0x00adff66);
-               cx25840_write4(client, DIF_BPF_COEFF1011, 0xff790198);
-               cx25840_write4(client, DIF_BPF_COEFF1213, 0xff26fe6e);
-               cx25840_write4(client, DIF_BPF_COEFF1415, 0x02e5ff55);
-               cx25840_write4(client, DIF_BPF_COEFF1617, 0xfc99044a);
-               cx25840_write4(client, DIF_BPF_COEFF1819, 0x005bfa09);
-               cx25840_write4(client, DIF_BPF_COEFF2021, 0x0545027f);
-               cx25840_write4(client, DIF_BPF_COEFF2223, 0xf7230541);
-               cx25840_write4(client, DIF_BPF_COEFF2425, 0x05b8f490);
-               cx25840_write4(client, DIF_BPF_COEFF2627, 0x03d20997);
-               cx25840_write4(client, DIF_BPF_COEFF2829, 0xf31300eb);
-               cx25840_write4(client, DIF_BPF_COEFF3031, 0x0d55f341);
-               cx25840_write4(client, DIF_BPF_COEFF3233, 0xfcf6100e);
-               cx25840_write4(client, DIF_BPF_COEFF3435, 0xf544f8c3);
-               cx25840_write4(client, DIF_BPF_COEFF36, 0x110d0000);
-               break;
-       }
+       coeffs = ifhz_coeffs[(ifHz - 3000000) / 100000];
+       cx25840_write4(client, DIF_BPF_COEFF01, coeffs[0]);
+       cx25840_write4(client, DIF_BPF_COEFF23, coeffs[1]);
+       cx25840_write4(client, DIF_BPF_COEFF45, coeffs[2]);
+       cx25840_write4(client, DIF_BPF_COEFF67, coeffs[3]);
+       cx25840_write4(client, DIF_BPF_COEFF89, coeffs[4]);
+       cx25840_write4(client, DIF_BPF_COEFF1011, coeffs[5]);
+       cx25840_write4(client, DIF_BPF_COEFF1213, coeffs[6]);
+       cx25840_write4(client, DIF_BPF_COEFF1415, coeffs[7]);
+       cx25840_write4(client, DIF_BPF_COEFF1617, coeffs[8]);
+       cx25840_write4(client, DIF_BPF_COEFF1819, coeffs[9]);
+       cx25840_write4(client, DIF_BPF_COEFF2021, coeffs[10]);
+       cx25840_write4(client, DIF_BPF_COEFF2223, coeffs[11]);
+       cx25840_write4(client, DIF_BPF_COEFF2425, coeffs[12]);
+       cx25840_write4(client, DIF_BPF_COEFF2627, coeffs[13]);
+       cx25840_write4(client, DIF_BPF_COEFF2829, coeffs[14]);
+       cx25840_write4(client, DIF_BPF_COEFF3031, coeffs[15]);
+       cx25840_write4(client, DIF_BPF_COEFF3233, coeffs[16]);
+       cx25840_write4(client, DIF_BPF_COEFF3435, coeffs[17]);
+       cx25840_write4(client, DIF_BPF_COEFF36, coeffs[18]);
 }
 
 static void cx23888_std_setup(struct i2c_client *client)
index 4bfa3b3cf619b36e0f3eb7bef9813fe8082cecb9..8e9ebed09f6498ef5c2db403191f5a01bc5646df 100644 (file)
@@ -362,8 +362,6 @@ static int ub913_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
        if (ret)
                return ret;
 
-       memset(fd, 0, sizeof(*fd));
-
        fd->type = V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL;
 
        state = v4l2_subdev_lock_and_get_active_state(sd);
index dc394e22a42c4383516e276e40ae4dcc877a7cd2..644022312833df8ac487a809a95eb7116aae5688 100644 (file)
@@ -499,8 +499,6 @@ static int ub953_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
        if (ret)
                return ret;
 
-       memset(fd, 0, sizeof(*fd));
-
        fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2;
 
        state = v4l2_subdev_lock_and_get_active_state(sd);
index 8ba5750f5a2319c97d37cffd42b5ccf86880d252..b8f3e5ca03efba91274493a35db1312a7e32ca99 100644 (file)
@@ -2786,8 +2786,6 @@ static int ub960_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
        if (!ub960_pad_is_source(priv, pad))
                return -EINVAL;
 
-       memset(fd, 0, sizeof(*fd));
-
        fd->type = V4L2_MBUS_FRAME_DESC_TYPE_CSI2;
 
        state = v4l2_subdev_lock_and_get_active_state(&priv->sd);
index fd56ba13873915b5f28f3242b8fa640e7c4e521d..f6ea9b7b970051c8b05e98339cc4a4edb4a9b803 100644 (file)
@@ -477,6 +477,50 @@ static const struct hi556_reg mode_1296x972_regs[] = {
        {0x0958, 0xbb80},
 };
 
+static const struct hi556_reg mode_1296x722_regs[] = {
+       {0x0a00, 0x0000},
+       {0x0b0a, 0x8259},
+       {0x0f30, 0x5b15},
+       {0x0f32, 0x7167},
+       {0x004a, 0x0100},
+       {0x004c, 0x0000},
+       {0x004e, 0x0100},
+       {0x000c, 0x0122},
+       {0x0008, 0x0b00},
+       {0x005a, 0x0404},
+       {0x0012, 0x000c},
+       {0x0018, 0x0a33},
+       {0x0022, 0x0008},
+       {0x0028, 0x0017},
+       {0x0024, 0x0022},
+       {0x002a, 0x002b},
+       {0x0026, 0x012a},
+       {0x002c, 0x06cf},
+       {0x002e, 0x3311},
+       {0x0030, 0x3311},
+       {0x0032, 0x3311},
+       {0x0006, 0x0814},
+       {0x0a22, 0x0000},
+       {0x0a12, 0x0510},
+       {0x0a14, 0x02d2},
+       {0x003e, 0x0000},
+       {0x0074, 0x0812},
+       {0x0070, 0x0409},
+       {0x0804, 0x0308},
+       {0x0806, 0x0100},
+       {0x0a04, 0x016a},
+       {0x090c, 0x09c0},
+       {0x090e, 0x0010},
+       {0x0902, 0x4319},
+       {0x0914, 0xc106},
+       {0x0916, 0x040e},
+       {0x0918, 0x0304},
+       {0x091a, 0x0708},
+       {0x091c, 0x0e06},
+       {0x091e, 0x0300},
+       {0x0958, 0xbb80},
+};
+
 static const char * const hi556_test_pattern_menu[] = {
        "Disabled",
        "Solid Colour",
@@ -556,7 +600,25 @@ static const struct hi556_mode supported_modes[] = {
                        .regs = mode_1296x972_regs,
                },
                .link_freq_index = HI556_LINK_FREQ_437MHZ_INDEX,
-       }
+       },
+       {
+               .width = 1296,
+               .height = 722,
+               .crop = {
+                       .left = HI556_PIXEL_ARRAY_LEFT,
+                       .top = 250,
+                       .width = HI556_PIXEL_ARRAY_WIDTH,
+                       .height = 1444
+               },
+               .fll_def = HI556_FLL_30FPS,
+               .fll_min = HI556_FLL_30FPS_MIN,
+               .llp = 0x0b00,
+               .reg_list = {
+                       .num_of_regs = ARRAY_SIZE(mode_1296x722_regs),
+                       .regs = mode_1296x722_regs,
+               },
+               .link_freq_index = HI556_LINK_FREQ_437MHZ_INDEX,
+       },
 };
 
 struct hi556 {
@@ -577,9 +639,6 @@ struct hi556 {
        /* To serialize asynchronus callbacks */
        struct mutex mutex;
 
-       /* Streaming on/off */
-       bool streaming;
-
        /* True if the device has been identified */
        bool identified;
 };
@@ -976,9 +1035,6 @@ static int hi556_set_stream(struct v4l2_subdev *sd, int enable)
        struct i2c_client *client = v4l2_get_subdevdata(sd);
        int ret = 0;
 
-       if (hi556->streaming == enable)
-               return 0;
-
        mutex_lock(&hi556->mutex);
        if (enable) {
                ret = pm_runtime_resume_and_get(&client->dev);
@@ -998,47 +1054,8 @@ static int hi556_set_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_put(&client->dev);
        }
 
-       hi556->streaming = enable;
-       mutex_unlock(&hi556->mutex);
-
-       return ret;
-}
-
-static int __maybe_unused hi556_suspend(struct device *dev)
-{
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct hi556 *hi556 = to_hi556(sd);
-
-       mutex_lock(&hi556->mutex);
-       if (hi556->streaming)
-               hi556_stop_streaming(hi556);
-
-       mutex_unlock(&hi556->mutex);
-
-       return 0;
-}
-
-static int __maybe_unused hi556_resume(struct device *dev)
-{
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct hi556 *hi556 = to_hi556(sd);
-       int ret;
-
-       mutex_lock(&hi556->mutex);
-       if (hi556->streaming) {
-               ret = hi556_start_streaming(hi556);
-               if (ret)
-                       goto error;
-       }
-
        mutex_unlock(&hi556->mutex);
 
-       return 0;
-
-error:
-       hi556_stop_streaming(hi556);
-       hi556->streaming = 0;
-       mutex_unlock(&hi556->mutex);
        return ret;
 }
 
@@ -1331,10 +1348,6 @@ probe_error_v4l2_ctrl_handler_free:
        return ret;
 }
 
-static const struct dev_pm_ops hi556_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(hi556_suspend, hi556_resume)
-};
-
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id hi556_acpi_ids[] = {
        {"INT3537"},
@@ -1347,7 +1360,6 @@ MODULE_DEVICE_TABLE(acpi, hi556_acpi_ids);
 static struct i2c_driver hi556_i2c_driver = {
        .driver = {
                .name = "hi556",
-               .pm = &hi556_pm_ops,
                .acpi_match_table = ACPI_PTR(hi556_acpi_ids),
        },
        .probe = hi556_probe,
index fa0038749a3b98dca98861a37cf5c8c995d6684e..825fc8dc48f50bde0214092bb1172a668686d1f5 100644 (file)
@@ -1607,17 +1607,12 @@ static int hi846_set_stream(struct v4l2_subdev *sd, int enable)
        struct i2c_client *client = v4l2_get_subdevdata(sd);
        int ret = 0;
 
-       if (hi846->streaming == enable)
-               return 0;
-
        mutex_lock(&hi846->mutex);
 
        if (enable) {
-               ret = pm_runtime_get_sync(&client->dev);
-               if (ret < 0) {
-                       pm_runtime_put_noidle(&client->dev);
+               ret = pm_runtime_resume_and_get(&client->dev);
+               if (ret)
                        goto out;
-               }
 
                ret = hi846_start_streaming(hi846);
        }
@@ -1680,9 +1675,6 @@ static int __maybe_unused hi846_suspend(struct device *dev)
        struct v4l2_subdev *sd = i2c_get_clientdata(client);
        struct hi846 *hi846 = to_hi846(sd);
 
-       if (hi846->streaming)
-               hi846_stop_streaming(hi846);
-
        return hi846_power_off(hi846);
 }
 
@@ -1691,26 +1683,8 @@ static int __maybe_unused hi846_resume(struct device *dev)
        struct i2c_client *client = to_i2c_client(dev);
        struct v4l2_subdev *sd = i2c_get_clientdata(client);
        struct hi846 *hi846 = to_hi846(sd);
-       int ret;
-
-       ret = hi846_power_on(hi846);
-       if (ret)
-               return ret;
 
-       if (hi846->streaming) {
-               ret = hi846_start_streaming(hi846);
-               if (ret) {
-                       dev_err(dev, "%s: start streaming failed: %d\n",
-                               __func__, ret);
-                       goto error;
-               }
-       }
-
-       return 0;
-
-error:
-       hi846_power_off(hi846);
-       return ret;
+       return hi846_power_on(hi846);
 }
 
 static int hi846_set_format(struct v4l2_subdev *sd,
@@ -2173,8 +2147,6 @@ static void hi846_remove(struct i2c_client *client)
 }
 
 static const struct dev_pm_ops hi846_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
-                               pm_runtime_force_resume)
        SET_RUNTIME_PM_OPS(hi846_suspend, hi846_resume, NULL)
 };
 
index 32547d7a2659fad021fd629a4e4fafc8a7d8c3f1..4075c389804c1af2f79428f95b530b4427f43f73 100644 (file)
@@ -2184,9 +2184,6 @@ struct hi847 {
 
        /* To serialize asynchronus callbacks */
        struct mutex mutex;
-
-       /* Streaming on/off */
-       bool streaming;
 };
 
 static u64 to_pixel_rate(u32 f_index)
@@ -2618,14 +2615,10 @@ static int hi847_set_stream(struct v4l2_subdev *sd, int enable)
        struct i2c_client *client = v4l2_get_subdevdata(sd);
        int ret = 0;
 
-       if (hi847->streaming == enable)
-               return 0;
-
        mutex_lock(&hi847->mutex);
        if (enable) {
-               ret = pm_runtime_get_sync(&client->dev);
-               if (ret < 0) {
-                       pm_runtime_put_noidle(&client->dev);
+               ret = pm_runtime_resume_and_get(&client->dev);
+               if (ret) {
                        mutex_unlock(&hi847->mutex);
                        return ret;
                }
@@ -2641,49 +2634,8 @@ static int hi847_set_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_put(&client->dev);
        }
 
-       hi847->streaming = enable;
-       mutex_unlock(&hi847->mutex);
-
-       return ret;
-}
-
-static int __maybe_unused hi847_suspend(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct v4l2_subdev *sd = i2c_get_clientdata(client);
-       struct hi847 *hi847 = to_hi847(sd);
-
-       mutex_lock(&hi847->mutex);
-       if (hi847->streaming)
-               hi847_stop_streaming(hi847);
-
-       mutex_unlock(&hi847->mutex);
-
-       return 0;
-}
-
-static int __maybe_unused hi847_resume(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct v4l2_subdev *sd = i2c_get_clientdata(client);
-       struct hi847 *hi847 = to_hi847(sd);
-       int ret;
-
-       mutex_lock(&hi847->mutex);
-       if (hi847->streaming) {
-               ret = hi847_start_streaming(hi847);
-               if (ret)
-                       goto error;
-       }
-
        mutex_unlock(&hi847->mutex);
 
-       return 0;
-
-error:
-       hi847_stop_streaming(hi847);
-       hi847->streaming = 0;
-       mutex_unlock(&hi847->mutex);
        return ret;
 }
 
@@ -2980,10 +2932,6 @@ probe_error_v4l2_ctrl_handler_free:
        return ret;
 }
 
-static const struct dev_pm_ops hi847_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(hi847_suspend, hi847_resume)
-};
-
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id hi847_acpi_ids[] = {
        {"HYV0847"},
@@ -2996,7 +2944,6 @@ MODULE_DEVICE_TABLE(acpi, hi847_acpi_ids);
 static struct i2c_driver hi847_i2c_driver = {
        .driver = {
                .name = "hi847",
-               .pm = &hi847_pm_ops,
                .acpi_match_table = ACPI_PTR(hi847_acpi_ids),
        },
        .probe = hi847_probe,
index ee5a28675388440c974e7f717cc219a9bd23c117..a9b0aea1ae3b8427a2be29ec9978d83711683474 100644 (file)
@@ -290,9 +290,6 @@ struct imx208 {
         */
        struct mutex imx208_mx;
 
-       /* Streaming on/off */
-       bool streaming;
-
        /* OTP data */
        bool otp_read;
        char otp_data[IMX208_OTP_SIZE];
@@ -714,15 +711,13 @@ static int imx208_set_stream(struct v4l2_subdev *sd, int enable)
        int ret = 0;
 
        mutex_lock(&imx208->imx208_mx);
-       if (imx208->streaming == enable) {
-               mutex_unlock(&imx208->imx208_mx);
-               return 0;
-       }
 
        if (enable) {
-               ret = pm_runtime_get_sync(&client->dev);
-               if (ret < 0)
-                       goto err_rpm_put;
+               ret = pm_runtime_resume_and_get(&client->dev);
+               if (ret) {
+                       mutex_unlock(&imx208->imx208_mx);
+                       return ret;
+               }
 
                /*
                 * Apply default & customized values
@@ -736,7 +731,6 @@ static int imx208_set_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_put(&client->dev);
        }
 
-       imx208->streaming = enable;
        mutex_unlock(&imx208->imx208_mx);
 
        /* vflip and hflip cannot change during streaming */
@@ -752,40 +746,6 @@ err_rpm_put:
        return ret;
 }
 
-static int __maybe_unused imx208_suspend(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct v4l2_subdev *sd = i2c_get_clientdata(client);
-       struct imx208 *imx208 = to_imx208(sd);
-
-       if (imx208->streaming)
-               imx208_stop_streaming(imx208);
-
-       return 0;
-}
-
-static int __maybe_unused imx208_resume(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct v4l2_subdev *sd = i2c_get_clientdata(client);
-       struct imx208 *imx208 = to_imx208(sd);
-       int ret;
-
-       if (imx208->streaming) {
-               ret = imx208_start_streaming(imx208);
-               if (ret)
-                       goto error;
-       }
-
-       return 0;
-
-error:
-       imx208_stop_streaming(imx208);
-       imx208->streaming = 0;
-
-       return ret;
-}
-
 /* Verify chip ID */
 static const struct v4l2_subdev_video_ops imx208_video_ops = {
        .s_stream = imx208_set_stream,
@@ -819,11 +779,9 @@ static int imx208_read_otp(struct imx208 *imx208)
        if (imx208->otp_read)
                goto out_unlock;
 
-       ret = pm_runtime_get_sync(&client->dev);
-       if (ret < 0) {
-               pm_runtime_put_noidle(&client->dev);
+       ret = pm_runtime_resume_and_get(&client->dev);
+       if (ret)
                goto out_unlock;
-       }
 
        ret = imx208_identify_module(imx208);
        if (ret)
@@ -1081,10 +1039,6 @@ static void imx208_remove(struct i2c_client *client)
        mutex_destroy(&imx208->imx208_mx);
 }
 
-static const struct dev_pm_ops imx208_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(imx208_suspend, imx208_resume)
-};
-
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id imx208_acpi_ids[] = {
        { "INT3478" },
@@ -1097,7 +1051,6 @@ MODULE_DEVICE_TABLE(acpi, imx208_acpi_ids);
 static struct i2c_driver imx208_i2c_driver = {
        .driver = {
                .name = "imx208",
-               .pm = &imx208_pm_ops,
                .acpi_match_table = ACPI_PTR(imx208_acpi_ids),
        },
        .probe = imx208_probe,
index 2f9c8582f9401a2564688f88ef45f526c1a2a587..4f77ea02cc27d800628426205660b22af9d57830 100644 (file)
@@ -58,8 +58,6 @@ struct imx214 {
         * and start streaming.
         */
        struct mutex mutex;
-
-       bool streaming;
 };
 
 struct reg_8 {
@@ -775,9 +773,6 @@ static int imx214_s_stream(struct v4l2_subdev *subdev, int enable)
        struct imx214 *imx214 = to_imx214(subdev);
        int ret;
 
-       if (imx214->streaming == enable)
-               return 0;
-
        if (enable) {
                ret = pm_runtime_resume_and_get(imx214->dev);
                if (ret < 0)
@@ -793,7 +788,6 @@ static int imx214_s_stream(struct v4l2_subdev *subdev, int enable)
                pm_runtime_put(imx214->dev);
        }
 
-       imx214->streaming = enable;
        return 0;
 
 err_rpm_put:
@@ -909,39 +903,6 @@ done:
        return ret;
 }
 
-static int __maybe_unused imx214_suspend(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct v4l2_subdev *sd = i2c_get_clientdata(client);
-       struct imx214 *imx214 = to_imx214(sd);
-
-       if (imx214->streaming)
-               imx214_stop_streaming(imx214);
-
-       return 0;
-}
-
-static int __maybe_unused imx214_resume(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct v4l2_subdev *sd = i2c_get_clientdata(client);
-       struct imx214 *imx214 = to_imx214(sd);
-       int ret;
-
-       if (imx214->streaming) {
-               ret = imx214_start_streaming(imx214);
-               if (ret)
-                       goto error;
-       }
-
-       return 0;
-
-error:
-       imx214_stop_streaming(imx214);
-       imx214->streaming = 0;
-       return ret;
-}
-
 static int imx214_probe(struct i2c_client *client)
 {
        struct device *dev = &client->dev;
@@ -1102,7 +1063,6 @@ static const struct of_device_id imx214_of_match[] = {
 MODULE_DEVICE_TABLE(of, imx214_of_match);
 
 static const struct dev_pm_ops imx214_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(imx214_suspend, imx214_resume)
        SET_RUNTIME_PM_OPS(imx214_power_off, imx214_power_on, NULL)
 };
 
index ec53abe2e84e53ef2e24a9760f10f94836340f67..8436880dcf7a8aa78ba240a5df8ee30cd7dc8c68 100644 (file)
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
+#include <linux/minmax.h>
 #include <linux/module.h>
 #include <linux/pm_runtime.h>
 #include <linux/regulator/consumer.h>
+
+#include <media/v4l2-cci.h>
 #include <media/v4l2-ctrls.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-event.h>
 #include <media/v4l2-fwnode.h>
 #include <media/v4l2-mediabus.h>
-#include <asm/unaligned.h>
-
-#define IMX219_REG_VALUE_08BIT         1
-#define IMX219_REG_VALUE_16BIT         2
-
-#define IMX219_REG_MODE_SELECT         0x0100
-#define IMX219_MODE_STANDBY            0x00
-#define IMX219_MODE_STREAMING          0x01
 
 /* Chip ID */
-#define IMX219_REG_CHIP_ID             0x0000
+#define IMX219_REG_CHIP_ID             CCI_REG16(0x0000)
 #define IMX219_CHIP_ID                 0x0219
 
-/* External clock frequency is 24.0M */
-#define IMX219_XCLK_FREQ               24000000
-
-/* Pixel rate is fixed for all the modes */
-#define IMX219_PIXEL_RATE              182400000
-#define IMX219_PIXEL_RATE_4LANE                280800000
-
-#define IMX219_DEFAULT_LINK_FREQ       456000000
-#define IMX219_DEFAULT_LINK_FREQ_4LANE 363000000
+#define IMX219_REG_MODE_SELECT         CCI_REG8(0x0100)
+#define IMX219_MODE_STANDBY            0x00
+#define IMX219_MODE_STREAMING          0x01
 
-#define IMX219_REG_CSI_LANE_MODE       0x0114
+#define IMX219_REG_CSI_LANE_MODE       CCI_REG8(0x0114)
 #define IMX219_CSI_2_LANE_MODE         0x01
 #define IMX219_CSI_4_LANE_MODE         0x03
 
-/* V_TIMING internal */
-#define IMX219_REG_VTS                 0x0160
-#define IMX219_VTS_15FPS               0x0dc6
-#define IMX219_VTS_30FPS_1080P         0x06e3
-#define IMX219_VTS_30FPS_BINNED                0x06e3
-#define IMX219_VTS_30FPS_640x480       0x06e3
-#define IMX219_VTS_MAX                 0xffff
-
-#define IMX219_VBLANK_MIN              4
-
-/*Frame Length Line*/
-#define IMX219_FLL_MIN                 0x08a6
-#define IMX219_FLL_MAX                 0xffff
-#define IMX219_FLL_STEP                        1
-#define IMX219_FLL_DEFAULT             0x0c98
-
-/* HBLANK control - read only */
-#define IMX219_PPL_DEFAULT             3448
+#define IMX219_REG_DPHY_CTRL           CCI_REG8(0x0128)
+#define IMX219_DPHY_CTRL_TIMING_AUTO   0
+#define IMX219_DPHY_CTRL_TIMING_MANUAL 1
 
-/* Exposure control */
-#define IMX219_REG_EXPOSURE            0x015a
-#define IMX219_EXPOSURE_MIN            4
-#define IMX219_EXPOSURE_STEP           1
-#define IMX219_EXPOSURE_DEFAULT                0x640
-#define IMX219_EXPOSURE_MAX            65535
+#define IMX219_REG_EXCK_FREQ           CCI_REG16(0x012a)
+#define IMX219_EXCK_FREQ(n)            ((n) * 256)             /* n expressed in MHz */
 
 /* Analog gain control */
-#define IMX219_REG_ANALOG_GAIN         0x0157
+#define IMX219_REG_ANALOG_GAIN         CCI_REG8(0x0157)
 #define IMX219_ANA_GAIN_MIN            0
 #define IMX219_ANA_GAIN_MAX            232
 #define IMX219_ANA_GAIN_STEP           1
 #define IMX219_ANA_GAIN_DEFAULT                0x0
 
 /* Digital gain control */
-#define IMX219_REG_DIGITAL_GAIN                0x0158
+#define IMX219_REG_DIGITAL_GAIN                CCI_REG16(0x0158)
 #define IMX219_DGTL_GAIN_MIN           0x0100
 #define IMX219_DGTL_GAIN_MAX           0x0fff
 #define IMX219_DGTL_GAIN_DEFAULT       0x0100
 #define IMX219_DGTL_GAIN_STEP          1
 
-#define IMX219_REG_ORIENTATION         0x0172
+/* Exposure control */
+#define IMX219_REG_EXPOSURE            CCI_REG16(0x015a)
+#define IMX219_EXPOSURE_MIN            4
+#define IMX219_EXPOSURE_STEP           1
+#define IMX219_EXPOSURE_DEFAULT                0x640
+#define IMX219_EXPOSURE_MAX            65535
+
+/* V_TIMING internal */
+#define IMX219_REG_VTS                 CCI_REG16(0x0160)
+#define IMX219_VTS_MAX                 0xffff
+
+#define IMX219_VBLANK_MIN              4
+
+/* HBLANK control - read only */
+#define IMX219_PPL_DEFAULT             3448
+
+#define IMX219_REG_LINE_LENGTH_A       CCI_REG16(0x0162)
+#define IMX219_REG_X_ADD_STA_A         CCI_REG16(0x0164)
+#define IMX219_REG_X_ADD_END_A         CCI_REG16(0x0166)
+#define IMX219_REG_Y_ADD_STA_A         CCI_REG16(0x0168)
+#define IMX219_REG_Y_ADD_END_A         CCI_REG16(0x016a)
+#define IMX219_REG_X_OUTPUT_SIZE       CCI_REG16(0x016c)
+#define IMX219_REG_Y_OUTPUT_SIZE       CCI_REG16(0x016e)
+#define IMX219_REG_X_ODD_INC_A         CCI_REG8(0x0170)
+#define IMX219_REG_Y_ODD_INC_A         CCI_REG8(0x0171)
+#define IMX219_REG_ORIENTATION         CCI_REG8(0x0172)
 
 /* Binning  Mode */
-#define IMX219_REG_BINNING_MODE                0x0174
-#define IMX219_BINNING_NONE            0x0000
-#define IMX219_BINNING_2X2             0x0101
-#define IMX219_BINNING_2X2_ANALOG      0x0303
+#define IMX219_REG_BINNING_MODE_H      CCI_REG8(0x0174)
+#define IMX219_REG_BINNING_MODE_V      CCI_REG8(0x0175)
+#define IMX219_BINNING_NONE            0x00
+#define IMX219_BINNING_X2              0x01
+#define IMX219_BINNING_X2_ANALOG       0x03
+
+#define IMX219_REG_CSI_DATA_FORMAT_A   CCI_REG16(0x018c)
+
+/* PLL Settings */
+#define IMX219_REG_VTPXCK_DIV          CCI_REG8(0x0301)
+#define IMX219_REG_VTSYCK_DIV          CCI_REG8(0x0303)
+#define IMX219_REG_PREPLLCK_VT_DIV     CCI_REG8(0x0304)
+#define IMX219_REG_PREPLLCK_OP_DIV     CCI_REG8(0x0305)
+#define IMX219_REG_PLL_VT_MPY          CCI_REG16(0x0306)
+#define IMX219_REG_OPPXCK_DIV          CCI_REG8(0x0309)
+#define IMX219_REG_OPSYCK_DIV          CCI_REG8(0x030b)
+#define IMX219_REG_PLL_OP_MPY          CCI_REG16(0x030c)
 
 /* Test Pattern Control */
-#define IMX219_REG_TEST_PATTERN                0x0600
+#define IMX219_REG_TEST_PATTERN                CCI_REG16(0x0600)
 #define IMX219_TEST_PATTERN_DISABLE    0
 #define IMX219_TEST_PATTERN_SOLID_COLOR        1
 #define IMX219_TEST_PATTERN_COLOR_BARS 2
 #define IMX219_TEST_PATTERN_PN9                4
 
 /* Test pattern colour components */
-#define IMX219_REG_TESTP_RED           0x0602
-#define IMX219_REG_TESTP_GREENR                0x0604
-#define IMX219_REG_TESTP_BLUE          0x0606
-#define IMX219_REG_TESTP_GREENB                0x0608
+#define IMX219_REG_TESTP_RED           CCI_REG16(0x0602)
+#define IMX219_REG_TESTP_GREENR                CCI_REG16(0x0604)
+#define IMX219_REG_TESTP_BLUE          CCI_REG16(0x0606)
+#define IMX219_REG_TESTP_GREENB                CCI_REG16(0x0608)
 #define IMX219_TESTP_COLOUR_MIN                0
 #define IMX219_TESTP_COLOUR_MAX                0x03ff
 #define IMX219_TESTP_COLOUR_STEP       1
-#define IMX219_TESTP_RED_DEFAULT       IMX219_TESTP_COLOUR_MAX
-#define IMX219_TESTP_GREENR_DEFAULT    0
-#define IMX219_TESTP_BLUE_DEFAULT      0
-#define IMX219_TESTP_GREENB_DEFAULT    0
+
+#define IMX219_REG_TP_WINDOW_WIDTH     CCI_REG16(0x0624)
+#define IMX219_REG_TP_WINDOW_HEIGHT    CCI_REG16(0x0626)
+
+/* External clock frequency is 24.0M */
+#define IMX219_XCLK_FREQ               24000000
+
+/* Pixel rate is fixed for all the modes */
+#define IMX219_PIXEL_RATE              182400000
+#define IMX219_PIXEL_RATE_4LANE                280800000
+
+#define IMX219_DEFAULT_LINK_FREQ       456000000
+#define IMX219_DEFAULT_LINK_FREQ_4LANE 363000000
 
 /* IMX219 native and active pixel array size. */
 #define IMX219_NATIVE_WIDTH            3296U
 #define IMX219_PIXEL_ARRAY_WIDTH       3280U
 #define IMX219_PIXEL_ARRAY_HEIGHT      2464U
 
-struct imx219_reg {
-       u16 address;
-       u8 val;
-};
-
-struct imx219_reg_list {
-       unsigned int num_of_regs;
-       const struct imx219_reg *regs;
-};
-
 /* Mode : resolution and related config&values */
 struct imx219_mode {
        /* Frame width */
@@ -147,159 +154,52 @@ struct imx219_mode {
        /* Frame height */
        unsigned int height;
 
-       /* Analog crop rectangle. */
-       struct v4l2_rect crop;
-
        /* V-timing */
        unsigned int vts_def;
-
-       /* Default register values */
-       struct imx219_reg_list reg_list;
-
-       /* 2x2 binning is used */
-       bool binning;
 };
 
-static const struct imx219_reg imx219_common_regs[] = {
-       {0x0100, 0x00}, /* Mode Select */
+static const struct cci_reg_sequence imx219_common_regs[] = {
+       { IMX219_REG_MODE_SELECT, 0x00 },       /* Mode Select */
 
        /* To Access Addresses 3000-5fff, send the following commands */
-       {0x30eb, 0x0c},
-       {0x30eb, 0x05},
-       {0x300a, 0xff},
-       {0x300b, 0xff},
-       {0x30eb, 0x05},
-       {0x30eb, 0x09},
+       { CCI_REG8(0x30eb), 0x0c },
+       { CCI_REG8(0x30eb), 0x05 },
+       { CCI_REG8(0x300a), 0xff },
+       { CCI_REG8(0x300b), 0xff },
+       { CCI_REG8(0x30eb), 0x05 },
+       { CCI_REG8(0x30eb), 0x09 },
 
        /* PLL Clock Table */
-       {0x0301, 0x05}, /* VTPXCK_DIV */
-       {0x0303, 0x01}, /* VTSYSCK_DIV */
-       {0x0304, 0x03}, /* PREPLLCK_VT_DIV 0x03 = AUTO set */
-       {0x0305, 0x03}, /* PREPLLCK_OP_DIV 0x03 = AUTO set */
-       {0x0306, 0x00}, /* PLL_VT_MPY */
-       {0x0307, 0x39},
-       {0x030b, 0x01}, /* OP_SYS_CLK_DIV */
-       {0x030c, 0x00}, /* PLL_OP_MPY */
-       {0x030d, 0x72},
+       { IMX219_REG_VTPXCK_DIV, 5 },
+       { IMX219_REG_VTSYCK_DIV, 1 },
+       { IMX219_REG_PREPLLCK_VT_DIV, 3 },      /* 0x03 = AUTO set */
+       { IMX219_REG_PREPLLCK_OP_DIV, 3 },      /* 0x03 = AUTO set */
+       { IMX219_REG_PLL_VT_MPY, 57 },
+       { IMX219_REG_OPSYCK_DIV, 1 },
+       { IMX219_REG_PLL_OP_MPY, 114 },
 
        /* Undocumented registers */
-       {0x455e, 0x00},
-       {0x471e, 0x4b},
-       {0x4767, 0x0f},
-       {0x4750, 0x14},
-       {0x4540, 0x00},
-       {0x47b4, 0x14},
-       {0x4713, 0x30},
-       {0x478b, 0x10},
-       {0x478f, 0x10},
-       {0x4793, 0x10},
-       {0x4797, 0x0e},
-       {0x479b, 0x0e},
+       { CCI_REG8(0x455e), 0x00 },
+       { CCI_REG8(0x471e), 0x4b },
+       { CCI_REG8(0x4767), 0x0f },
+       { CCI_REG8(0x4750), 0x14 },
+       { CCI_REG8(0x4540), 0x00 },
+       { CCI_REG8(0x47b4), 0x14 },
+       { CCI_REG8(0x4713), 0x30 },
+       { CCI_REG8(0x478b), 0x10 },
+       { CCI_REG8(0x478f), 0x10 },
+       { CCI_REG8(0x4793), 0x10 },
+       { CCI_REG8(0x4797), 0x0e },
+       { CCI_REG8(0x479b), 0x0e },
 
        /* Frame Bank Register Group "A" */
-       {0x0162, 0x0d}, /* Line_Length_A */
-       {0x0163, 0x78},
-       {0x0170, 0x01}, /* X_ODD_INC_A */
-       {0x0171, 0x01}, /* Y_ODD_INC_A */
+       { IMX219_REG_LINE_LENGTH_A, 3448 },
+       { IMX219_REG_X_ODD_INC_A, 1 },
+       { IMX219_REG_Y_ODD_INC_A, 1 },
 
        /* Output setup registers */
-       {0x0114, 0x01}, /* CSI 2-Lane Mode */
-       {0x0128, 0x00}, /* DPHY Auto Mode */
-       {0x012a, 0x18}, /* EXCK_Freq */
-       {0x012b, 0x00},
-};
-
-/*
- * Register sets lifted off the i2C interface from the Raspberry Pi firmware
- * driver.
- * 3280x2464 = mode 2, 1920x1080 = mode 1, 1640x1232 = mode 4, 640x480 = mode 7.
- */
-static const struct imx219_reg mode_3280x2464_regs[] = {
-       {0x0164, 0x00},
-       {0x0165, 0x00},
-       {0x0166, 0x0c},
-       {0x0167, 0xcf},
-       {0x0168, 0x00},
-       {0x0169, 0x00},
-       {0x016a, 0x09},
-       {0x016b, 0x9f},
-       {0x016c, 0x0c},
-       {0x016d, 0xd0},
-       {0x016e, 0x09},
-       {0x016f, 0xa0},
-       {0x0624, 0x0c},
-       {0x0625, 0xd0},
-       {0x0626, 0x09},
-       {0x0627, 0xa0},
-};
-
-static const struct imx219_reg mode_1920_1080_regs[] = {
-       {0x0164, 0x02},
-       {0x0165, 0xa8},
-       {0x0166, 0x0a},
-       {0x0167, 0x27},
-       {0x0168, 0x02},
-       {0x0169, 0xb4},
-       {0x016a, 0x06},
-       {0x016b, 0xeb},
-       {0x016c, 0x07},
-       {0x016d, 0x80},
-       {0x016e, 0x04},
-       {0x016f, 0x38},
-       {0x0624, 0x07},
-       {0x0625, 0x80},
-       {0x0626, 0x04},
-       {0x0627, 0x38},
-};
-
-static const struct imx219_reg mode_1640_1232_regs[] = {
-       {0x0164, 0x00},
-       {0x0165, 0x00},
-       {0x0166, 0x0c},
-       {0x0167, 0xcf},
-       {0x0168, 0x00},
-       {0x0169, 0x00},
-       {0x016a, 0x09},
-       {0x016b, 0x9f},
-       {0x016c, 0x06},
-       {0x016d, 0x68},
-       {0x016e, 0x04},
-       {0x016f, 0xd0},
-       {0x0624, 0x06},
-       {0x0625, 0x68},
-       {0x0626, 0x04},
-       {0x0627, 0xd0},
-};
-
-static const struct imx219_reg mode_640_480_regs[] = {
-       {0x0164, 0x03},
-       {0x0165, 0xe8},
-       {0x0166, 0x08},
-       {0x0167, 0xe7},
-       {0x0168, 0x02},
-       {0x0169, 0xf0},
-       {0x016a, 0x06},
-       {0x016b, 0xaf},
-       {0x016c, 0x02},
-       {0x016d, 0x80},
-       {0x016e, 0x01},
-       {0x016f, 0xe0},
-       {0x0624, 0x06},
-       {0x0625, 0x68},
-       {0x0626, 0x04},
-       {0x0627, 0xd0},
-};
-
-static const struct imx219_reg raw8_framefmt_regs[] = {
-       {0x018c, 0x08},
-       {0x018d, 0x08},
-       {0x0309, 0x08},
-};
-
-static const struct imx219_reg raw10_framefmt_regs[] = {
-       {0x018c, 0x0a},
-       {0x018d, 0x0a},
-       {0x0309, 0x0a},
+       { IMX219_REG_DPHY_CTRL, IMX219_DPHY_CTRL_TIMING_AUTO },
+       { IMX219_REG_EXCK_FREQ, IMX219_EXCK_FREQ(IMX219_XCLK_FREQ / 1000000) },
 };
 
 static const s64 imx219_link_freq_menu[] = {
@@ -390,69 +290,25 @@ static const struct imx219_mode supported_modes[] = {
                /* 8MPix 15fps mode */
                .width = 3280,
                .height = 2464,
-               .crop = {
-                       .left = IMX219_PIXEL_ARRAY_LEFT,
-                       .top = IMX219_PIXEL_ARRAY_TOP,
-                       .width = 3280,
-                       .height = 2464
-               },
-               .vts_def = IMX219_VTS_15FPS,
-               .reg_list = {
-                       .num_of_regs = ARRAY_SIZE(mode_3280x2464_regs),
-                       .regs = mode_3280x2464_regs,
-               },
-               .binning = false,
+               .vts_def = 3526,
        },
        {
                /* 1080P 30fps cropped */
                .width = 1920,
                .height = 1080,
-               .crop = {
-                       .left = 688,
-                       .top = 700,
-                       .width = 1920,
-                       .height = 1080
-               },
-               .vts_def = IMX219_VTS_30FPS_1080P,
-               .reg_list = {
-                       .num_of_regs = ARRAY_SIZE(mode_1920_1080_regs),
-                       .regs = mode_1920_1080_regs,
-               },
-               .binning = false,
+               .vts_def = 1763,
        },
        {
                /* 2x2 binned 30fps mode */
                .width = 1640,
                .height = 1232,
-               .crop = {
-                       .left = IMX219_PIXEL_ARRAY_LEFT,
-                       .top = IMX219_PIXEL_ARRAY_TOP,
-                       .width = 3280,
-                       .height = 2464
-               },
-               .vts_def = IMX219_VTS_30FPS_BINNED,
-               .reg_list = {
-                       .num_of_regs = ARRAY_SIZE(mode_1640_1232_regs),
-                       .regs = mode_1640_1232_regs,
-               },
-               .binning = true,
+               .vts_def = 1763,
        },
        {
                /* 640x480 30fps mode */
                .width = 640,
                .height = 480,
-               .crop = {
-                       .left = 1008,
-                       .top = 760,
-                       .width = 1280,
-                       .height = 960
-               },
-               .vts_def = IMX219_VTS_30FPS_640x480,
-               .reg_list = {
-                       .num_of_regs = ARRAY_SIZE(mode_640_480_regs),
-                       .regs = mode_640_480_regs,
-               },
-               .binning = true,
+               .vts_def = 1763,
        },
 };
 
@@ -460,6 +316,7 @@ struct imx219 {
        struct v4l2_subdev sd;
        struct media_pad pad;
 
+       struct regmap *regmap;
        struct clk *xclk; /* system clock to IMX219 */
        u32 xclk_freq;
 
@@ -476,12 +333,6 @@ struct imx219 {
        struct v4l2_ctrl *vblank;
        struct v4l2_ctrl *hblank;
 
-       /* Current mode */
-       const struct imx219_mode *mode;
-
-       /* Streaming on/off */
-       bool streaming;
-
        /* Two or Four lanes */
        u8 lanes;
 };
@@ -491,78 +342,6 @@ static inline struct imx219 *to_imx219(struct v4l2_subdev *_sd)
        return container_of(_sd, struct imx219, sd);
 }
 
-/* Read registers up to 2 at a time */
-static int imx219_read_reg(struct imx219 *imx219, u16 reg, u32 len, u32 *val)
-{
-       struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
-       struct i2c_msg msgs[2];
-       u8 addr_buf[2] = { reg >> 8, reg & 0xff };
-       u8 data_buf[4] = { 0, };
-       int ret;
-
-       if (len > 4)
-               return -EINVAL;
-
-       /* Write register address */
-       msgs[0].addr = client->addr;
-       msgs[0].flags = 0;
-       msgs[0].len = ARRAY_SIZE(addr_buf);
-       msgs[0].buf = addr_buf;
-
-       /* Read data from register */
-       msgs[1].addr = client->addr;
-       msgs[1].flags = I2C_M_RD;
-       msgs[1].len = len;
-       msgs[1].buf = &data_buf[4 - len];
-
-       ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
-       if (ret != ARRAY_SIZE(msgs))
-               return -EIO;
-
-       *val = get_unaligned_be32(data_buf);
-
-       return 0;
-}
-
-/* Write registers up to 2 at a time */
-static int imx219_write_reg(struct imx219 *imx219, u16 reg, u32 len, u32 val)
-{
-       struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
-       u8 buf[6];
-
-       if (len > 4)
-               return -EINVAL;
-
-       put_unaligned_be16(reg, buf);
-       put_unaligned_be32(val << (8 * (4 - len)), buf + 2);
-       if (i2c_master_send(client, buf, len + 2) != len + 2)
-               return -EIO;
-
-       return 0;
-}
-
-/* Write a list of registers */
-static int imx219_write_regs(struct imx219 *imx219,
-                            const struct imx219_reg *regs, u32 len)
-{
-       struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
-       unsigned int i;
-       int ret;
-
-       for (i = 0; i < len; i++) {
-               ret = imx219_write_reg(imx219, regs[i].address, 1, regs[i].val);
-               if (ret) {
-                       dev_err_ratelimited(&client->dev,
-                                           "Failed to write reg 0x%4.4x. error = %d\n",
-                                           regs[i].address, ret);
-
-                       return ret;
-               }
-       }
-
-       return 0;
-}
-
 /* Get bayer order based on flip setting. */
 static u32 imx219_get_format_code(struct imx219 *imx219, u32 code)
 {
@@ -581,18 +360,27 @@ static u32 imx219_get_format_code(struct imx219 *imx219, u32 code)
        return imx219_mbus_formats[i];
 }
 
+/* -----------------------------------------------------------------------------
+ * Controls
+ */
+
 static int imx219_set_ctrl(struct v4l2_ctrl *ctrl)
 {
        struct imx219 *imx219 =
                container_of(ctrl->handler, struct imx219, ctrl_handler);
        struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
-       int ret;
+       const struct v4l2_mbus_framefmt *format;
+       struct v4l2_subdev_state *state;
+       int ret = 0;
+
+       state = v4l2_subdev_get_locked_active_state(&imx219->sd);
+       format = v4l2_subdev_get_pad_format(&imx219->sd, state, 0);
 
        if (ctrl->id == V4L2_CID_VBLANK) {
                int exposure_max, exposure_def;
 
                /* Update max exposure while meeting expected vblanking */
-               exposure_max = imx219->mode->height + ctrl->val - 4;
+               exposure_max = format->height + ctrl->val - 4;
                exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ?
                        exposure_max : IMX219_EXPOSURE_DEFAULT;
                __v4l2_ctrl_modify_range(imx219->exposure,
@@ -610,48 +398,45 @@ static int imx219_set_ctrl(struct v4l2_ctrl *ctrl)
 
        switch (ctrl->id) {
        case V4L2_CID_ANALOGUE_GAIN:
-               ret = imx219_write_reg(imx219, IMX219_REG_ANALOG_GAIN,
-                                      IMX219_REG_VALUE_08BIT, ctrl->val);
+               cci_write(imx219->regmap, IMX219_REG_ANALOG_GAIN,
+                         ctrl->val, &ret);
                break;
        case V4L2_CID_EXPOSURE:
-               ret = imx219_write_reg(imx219, IMX219_REG_EXPOSURE,
-                                      IMX219_REG_VALUE_16BIT, ctrl->val);
+               cci_write(imx219->regmap, IMX219_REG_EXPOSURE,
+                         ctrl->val, &ret);
                break;
        case V4L2_CID_DIGITAL_GAIN:
-               ret = imx219_write_reg(imx219, IMX219_REG_DIGITAL_GAIN,
-                                      IMX219_REG_VALUE_16BIT, ctrl->val);
+               cci_write(imx219->regmap, IMX219_REG_DIGITAL_GAIN,
+                         ctrl->val, &ret);
                break;
        case V4L2_CID_TEST_PATTERN:
-               ret = imx219_write_reg(imx219, IMX219_REG_TEST_PATTERN,
-                                      IMX219_REG_VALUE_16BIT,
-                                      imx219_test_pattern_val[ctrl->val]);
+               cci_write(imx219->regmap, IMX219_REG_TEST_PATTERN,
+                         imx219_test_pattern_val[ctrl->val], &ret);
                break;
        case V4L2_CID_HFLIP:
        case V4L2_CID_VFLIP:
-               ret = imx219_write_reg(imx219, IMX219_REG_ORIENTATION, 1,
-                                      imx219->hflip->val |
-                                      imx219->vflip->val << 1);
+               cci_write(imx219->regmap, IMX219_REG_ORIENTATION,
+                         imx219->hflip->val | imx219->vflip->val << 1, &ret);
                break;
        case V4L2_CID_VBLANK:
-               ret = imx219_write_reg(imx219, IMX219_REG_VTS,
-                                      IMX219_REG_VALUE_16BIT,
-                                      imx219->mode->height + ctrl->val);
+               cci_write(imx219->regmap, IMX219_REG_VTS,
+                         format->height + ctrl->val, &ret);
                break;
        case V4L2_CID_TEST_PATTERN_RED:
-               ret = imx219_write_reg(imx219, IMX219_REG_TESTP_RED,
-                                      IMX219_REG_VALUE_16BIT, ctrl->val);
+               cci_write(imx219->regmap, IMX219_REG_TESTP_RED,
+                         ctrl->val, &ret);
                break;
        case V4L2_CID_TEST_PATTERN_GREENR:
-               ret = imx219_write_reg(imx219, IMX219_REG_TESTP_GREENR,
-                                      IMX219_REG_VALUE_16BIT, ctrl->val);
+               cci_write(imx219->regmap, IMX219_REG_TESTP_GREENR,
+                         ctrl->val, &ret);
                break;
        case V4L2_CID_TEST_PATTERN_BLUE:
-               ret = imx219_write_reg(imx219, IMX219_REG_TESTP_BLUE,
-                                      IMX219_REG_VALUE_16BIT, ctrl->val);
+               cci_write(imx219->regmap, IMX219_REG_TESTP_BLUE,
+                         ctrl->val, &ret);
                break;
        case V4L2_CID_TEST_PATTERN_GREENB:
-               ret = imx219_write_reg(imx219, IMX219_REG_TESTP_GREENB,
-                                      IMX219_REG_VALUE_16BIT, ctrl->val);
+               cci_write(imx219->regmap, IMX219_REG_TESTP_GREENB,
+                         ctrl->val, &ret);
                break;
        default:
                dev_info(&client->dev,
@@ -670,226 +455,224 @@ static const struct v4l2_ctrl_ops imx219_ctrl_ops = {
        .s_ctrl = imx219_set_ctrl,
 };
 
-static void imx219_update_pad_format(struct imx219 *imx219,
-                                    const struct imx219_mode *mode,
-                                    struct v4l2_mbus_framefmt *fmt, u32 code)
+static unsigned long imx219_get_pixel_rate(struct imx219 *imx219)
 {
-       /* Bayer order varies with flips */
-       fmt->code = imx219_get_format_code(imx219, code);
-       fmt->width = mode->width;
-       fmt->height = mode->height;
-       fmt->field = V4L2_FIELD_NONE;
-       fmt->colorspace = V4L2_COLORSPACE_RAW;
-       fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
-       fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+       return (imx219->lanes == 2) ? IMX219_PIXEL_RATE : IMX219_PIXEL_RATE_4LANE;
 }
 
-static int imx219_init_cfg(struct v4l2_subdev *sd,
-                          struct v4l2_subdev_state *state)
+/* Initialize control handlers */
+static int imx219_init_controls(struct imx219 *imx219)
 {
-       struct imx219 *imx219 = to_imx219(sd);
-       struct v4l2_mbus_framefmt *format;
-       struct v4l2_rect *crop;
+       struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+       const struct imx219_mode *mode = &supported_modes[0];
+       struct v4l2_ctrl_handler *ctrl_hdlr;
+       struct v4l2_fwnode_device_properties props;
+       int exposure_max, exposure_def, hblank;
+       int i, ret;
 
-       /* Initialize the format. */
-       format = v4l2_subdev_get_pad_format(sd, state, 0);
-       imx219_update_pad_format(imx219, &supported_modes[0], format,
-                                MEDIA_BUS_FMT_SRGGB10_1X10);
+       ctrl_hdlr = &imx219->ctrl_handler;
+       ret = v4l2_ctrl_handler_init(ctrl_hdlr, 12);
+       if (ret)
+               return ret;
 
-       /* Initialize the crop rectangle. */
-       crop = v4l2_subdev_get_pad_crop(sd, state, 0);
-       crop->top = IMX219_PIXEL_ARRAY_TOP;
-       crop->left = IMX219_PIXEL_ARRAY_LEFT;
-       crop->width = IMX219_PIXEL_ARRAY_WIDTH;
-       crop->height = IMX219_PIXEL_ARRAY_HEIGHT;
+       /* By default, PIXEL_RATE is read only */
+       imx219->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
+                                              V4L2_CID_PIXEL_RATE,
+                                              imx219_get_pixel_rate(imx219),
+                                              imx219_get_pixel_rate(imx219), 1,
+                                              imx219_get_pixel_rate(imx219));
 
-       return 0;
-}
+       imx219->link_freq =
+               v4l2_ctrl_new_int_menu(ctrl_hdlr, &imx219_ctrl_ops,
+                                      V4L2_CID_LINK_FREQ,
+                                      ARRAY_SIZE(imx219_link_freq_menu) - 1, 0,
+                                      (imx219->lanes == 2) ? imx219_link_freq_menu :
+                                      imx219_link_freq_4lane_menu);
+       if (imx219->link_freq)
+               imx219->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
 
-static int imx219_enum_mbus_code(struct v4l2_subdev *sd,
-                                struct v4l2_subdev_state *sd_state,
-                                struct v4l2_subdev_mbus_code_enum *code)
-{
-       struct imx219 *imx219 = to_imx219(sd);
+       /* Initial vblank/hblank/exposure parameters based on current mode */
+       imx219->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
+                                          V4L2_CID_VBLANK, IMX219_VBLANK_MIN,
+                                          IMX219_VTS_MAX - mode->height, 1,
+                                          mode->vts_def - mode->height);
+       hblank = IMX219_PPL_DEFAULT - mode->width;
+       imx219->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
+                                          V4L2_CID_HBLANK, hblank, hblank,
+                                          1, hblank);
+       if (imx219->hblank)
+               imx219->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+       exposure_max = mode->vts_def - 4;
+       exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ?
+               exposure_max : IMX219_EXPOSURE_DEFAULT;
+       imx219->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
+                                            V4L2_CID_EXPOSURE,
+                                            IMX219_EXPOSURE_MIN, exposure_max,
+                                            IMX219_EXPOSURE_STEP,
+                                            exposure_def);
 
-       if (code->index >= (ARRAY_SIZE(imx219_mbus_formats) / 4))
-               return -EINVAL;
+       v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+                         IMX219_ANA_GAIN_MIN, IMX219_ANA_GAIN_MAX,
+                         IMX219_ANA_GAIN_STEP, IMX219_ANA_GAIN_DEFAULT);
 
-       code->code = imx219_get_format_code(imx219, imx219_mbus_formats[code->index * 4]);
+       v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+                         IMX219_DGTL_GAIN_MIN, IMX219_DGTL_GAIN_MAX,
+                         IMX219_DGTL_GAIN_STEP, IMX219_DGTL_GAIN_DEFAULT);
 
-       return 0;
-}
+       imx219->hflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
+                                         V4L2_CID_HFLIP, 0, 1, 1, 0);
+       if (imx219->hflip)
+               imx219->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
 
-static int imx219_enum_frame_size(struct v4l2_subdev *sd,
-                                 struct v4l2_subdev_state *sd_state,
-                                 struct v4l2_subdev_frame_size_enum *fse)
-{
-       struct imx219 *imx219 = to_imx219(sd);
-       u32 code;
+       imx219->vflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
+                                         V4L2_CID_VFLIP, 0, 1, 1, 0);
+       if (imx219->vflip)
+               imx219->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
 
-       if (fse->index >= ARRAY_SIZE(supported_modes))
-               return -EINVAL;
+       v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &imx219_ctrl_ops,
+                                    V4L2_CID_TEST_PATTERN,
+                                    ARRAY_SIZE(imx219_test_pattern_menu) - 1,
+                                    0, 0, imx219_test_pattern_menu);
+       for (i = 0; i < 4; i++) {
+               /*
+                * The assumption is that
+                * V4L2_CID_TEST_PATTERN_GREENR == V4L2_CID_TEST_PATTERN_RED + 1
+                * V4L2_CID_TEST_PATTERN_BLUE   == V4L2_CID_TEST_PATTERN_RED + 2
+                * V4L2_CID_TEST_PATTERN_GREENB == V4L2_CID_TEST_PATTERN_RED + 3
+                */
+               v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
+                                 V4L2_CID_TEST_PATTERN_RED + i,
+                                 IMX219_TESTP_COLOUR_MIN,
+                                 IMX219_TESTP_COLOUR_MAX,
+                                 IMX219_TESTP_COLOUR_STEP,
+                                 IMX219_TESTP_COLOUR_MAX);
+               /* The "Solid color" pattern is white by default */
+       }
 
-       code = imx219_get_format_code(imx219, fse->code);
-       if (fse->code != code)
-               return -EINVAL;
+       if (ctrl_hdlr->error) {
+               ret = ctrl_hdlr->error;
+               dev_err(&client->dev, "%s control init failed (%d)\n",
+                       __func__, ret);
+               goto error;
+       }
 
-       fse->min_width = supported_modes[fse->index].width;
-       fse->max_width = fse->min_width;
-       fse->min_height = supported_modes[fse->index].height;
-       fse->max_height = fse->min_height;
+       ret = v4l2_fwnode_device_parse(&client->dev, &props);
+       if (ret)
+               goto error;
 
-       return 0;
-}
-
-static int imx219_set_pad_format(struct v4l2_subdev *sd,
-                                struct v4l2_subdev_state *sd_state,
-                                struct v4l2_subdev_format *fmt)
-{
-       struct imx219 *imx219 = to_imx219(sd);
-       const struct imx219_mode *mode;
-       int exposure_max, exposure_def, hblank;
-       struct v4l2_mbus_framefmt *format;
-       struct v4l2_rect *crop;
-
-       mode = v4l2_find_nearest_size(supported_modes,
-                                     ARRAY_SIZE(supported_modes),
-                                     width, height,
-                                     fmt->format.width, fmt->format.height);
-
-       imx219_update_pad_format(imx219, mode, &fmt->format, fmt->format.code);
+       ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &imx219_ctrl_ops,
+                                             &props);
+       if (ret)
+               goto error;
 
-       format = v4l2_subdev_get_pad_format(sd, sd_state, 0);
-       crop = v4l2_subdev_get_pad_crop(sd, sd_state, 0);
+       imx219->sd.ctrl_handler = ctrl_hdlr;
 
-       *format = fmt->format;
-       *crop = mode->crop;
+       return 0;
 
-       if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
-               imx219->mode = mode;
-               /* Update limits and set FPS to default */
-               __v4l2_ctrl_modify_range(imx219->vblank, IMX219_VBLANK_MIN,
-                                        IMX219_VTS_MAX - mode->height, 1,
-                                        mode->vts_def - mode->height);
-               __v4l2_ctrl_s_ctrl(imx219->vblank,
-                                  mode->vts_def - mode->height);
-               /* Update max exposure while meeting expected vblanking */
-               exposure_max = mode->vts_def - 4;
-               exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ?
-                       exposure_max : IMX219_EXPOSURE_DEFAULT;
-               __v4l2_ctrl_modify_range(imx219->exposure,
-                                        imx219->exposure->minimum,
-                                        exposure_max, imx219->exposure->step,
-                                        exposure_def);
-               /*
-                * Currently PPL is fixed to IMX219_PPL_DEFAULT, so hblank
-                * depends on mode->width only, and is not changeble in any
-                * way other than changing the mode.
-                */
-               hblank = IMX219_PPL_DEFAULT - mode->width;
-               __v4l2_ctrl_modify_range(imx219->hblank, hblank, hblank, 1,
-                                        hblank);
-       }
+error:
+       v4l2_ctrl_handler_free(ctrl_hdlr);
 
-       return 0;
+       return ret;
 }
 
-static int imx219_set_framefmt(struct imx219 *imx219,
-                              const struct v4l2_mbus_framefmt *format)
+static void imx219_free_controls(struct imx219 *imx219)
 {
-       switch (format->code) {
-       case MEDIA_BUS_FMT_SRGGB8_1X8:
-       case MEDIA_BUS_FMT_SGRBG8_1X8:
-       case MEDIA_BUS_FMT_SGBRG8_1X8:
-       case MEDIA_BUS_FMT_SBGGR8_1X8:
-               return imx219_write_regs(imx219, raw8_framefmt_regs,
-                                       ARRAY_SIZE(raw8_framefmt_regs));
-
-       case MEDIA_BUS_FMT_SRGGB10_1X10:
-       case MEDIA_BUS_FMT_SGRBG10_1X10:
-       case MEDIA_BUS_FMT_SGBRG10_1X10:
-       case MEDIA_BUS_FMT_SBGGR10_1X10:
-               return imx219_write_regs(imx219, raw10_framefmt_regs,
-                                       ARRAY_SIZE(raw10_framefmt_regs));
-       }
-
-       return -EINVAL;
+       v4l2_ctrl_handler_free(imx219->sd.ctrl_handler);
 }
 
-static int imx219_set_binning(struct imx219 *imx219,
-                             const struct v4l2_mbus_framefmt *format)
+/* -----------------------------------------------------------------------------
+ * Subdev operations
+ */
+
+static int imx219_set_framefmt(struct imx219 *imx219,
+                              struct v4l2_subdev_state *state)
 {
-       if (!imx219->mode->binning) {
-               return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
-                                       IMX219_REG_VALUE_16BIT,
-                                       IMX219_BINNING_NONE);
-       }
+       const struct v4l2_mbus_framefmt *format;
+       const struct v4l2_rect *crop;
+       unsigned int bpp;
+       u64 bin_h, bin_v;
+       int ret = 0;
+
+       format = v4l2_subdev_get_pad_format(&imx219->sd, state, 0);
+       crop = v4l2_subdev_get_pad_crop(&imx219->sd, state, 0);
 
        switch (format->code) {
        case MEDIA_BUS_FMT_SRGGB8_1X8:
        case MEDIA_BUS_FMT_SGRBG8_1X8:
        case MEDIA_BUS_FMT_SGBRG8_1X8:
        case MEDIA_BUS_FMT_SBGGR8_1X8:
-               return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
-                                       IMX219_REG_VALUE_16BIT,
-                                       IMX219_BINNING_2X2_ANALOG);
+               bpp = 8;
+               break;
 
        case MEDIA_BUS_FMT_SRGGB10_1X10:
        case MEDIA_BUS_FMT_SGRBG10_1X10:
        case MEDIA_BUS_FMT_SGBRG10_1X10:
        case MEDIA_BUS_FMT_SBGGR10_1X10:
-               return imx219_write_reg(imx219, IMX219_REG_BINNING_MODE,
-                                       IMX219_REG_VALUE_16BIT,
-                                       IMX219_BINNING_2X2);
+       default:
+               bpp = 10;
+               break;
        }
 
-       return -EINVAL;
-}
+       cci_write(imx219->regmap, IMX219_REG_X_ADD_STA_A,
+                 crop->left - IMX219_PIXEL_ARRAY_LEFT, &ret);
+       cci_write(imx219->regmap, IMX219_REG_X_ADD_END_A,
+                 crop->left - IMX219_PIXEL_ARRAY_LEFT + crop->width - 1, &ret);
+       cci_write(imx219->regmap, IMX219_REG_Y_ADD_STA_A,
+                 crop->top - IMX219_PIXEL_ARRAY_TOP, &ret);
+       cci_write(imx219->regmap, IMX219_REG_Y_ADD_END_A,
+                 crop->top - IMX219_PIXEL_ARRAY_TOP + crop->height - 1, &ret);
+
+       switch (crop->width / format->width) {
+       case 1:
+       default:
+               bin_h = IMX219_BINNING_NONE;
+               break;
+       case 2:
+               bin_h = bpp == 8 ? IMX219_BINNING_X2_ANALOG : IMX219_BINNING_X2;
+               break;
+       }
 
-static int imx219_get_selection(struct v4l2_subdev *sd,
-                               struct v4l2_subdev_state *sd_state,
-                               struct v4l2_subdev_selection *sel)
-{
-       switch (sel->target) {
-       case V4L2_SEL_TGT_CROP: {
-               sel->r = *v4l2_subdev_get_pad_crop(sd, sd_state, 0);
-               return 0;
+       switch (crop->height / format->height) {
+       case 1:
+       default:
+               bin_v = IMX219_BINNING_NONE;
+               break;
+       case 2:
+               bin_v = bpp == 8 ? IMX219_BINNING_X2_ANALOG : IMX219_BINNING_X2;
+               break;
        }
 
-       case V4L2_SEL_TGT_NATIVE_SIZE:
-               sel->r.top = 0;
-               sel->r.left = 0;
-               sel->r.width = IMX219_NATIVE_WIDTH;
-               sel->r.height = IMX219_NATIVE_HEIGHT;
+       cci_write(imx219->regmap, IMX219_REG_BINNING_MODE_H, bin_h, &ret);
+       cci_write(imx219->regmap, IMX219_REG_BINNING_MODE_V, bin_v, &ret);
 
-               return 0;
+       cci_write(imx219->regmap, IMX219_REG_X_OUTPUT_SIZE,
+                 format->width, &ret);
+       cci_write(imx219->regmap, IMX219_REG_Y_OUTPUT_SIZE,
+                 format->height, &ret);
 
-       case V4L2_SEL_TGT_CROP_DEFAULT:
-       case V4L2_SEL_TGT_CROP_BOUNDS:
-               sel->r.top = IMX219_PIXEL_ARRAY_TOP;
-               sel->r.left = IMX219_PIXEL_ARRAY_LEFT;
-               sel->r.width = IMX219_PIXEL_ARRAY_WIDTH;
-               sel->r.height = IMX219_PIXEL_ARRAY_HEIGHT;
+       cci_write(imx219->regmap, IMX219_REG_TP_WINDOW_WIDTH,
+                 format->width, &ret);
+       cci_write(imx219->regmap, IMX219_REG_TP_WINDOW_HEIGHT,
+                 format->height, &ret);
 
-               return 0;
-       }
+       cci_write(imx219->regmap, IMX219_REG_CSI_DATA_FORMAT_A,
+                 (bpp << 8) | bpp, &ret);
+       cci_write(imx219->regmap, IMX219_REG_OPPXCK_DIV, bpp, &ret);
 
-       return -EINVAL;
+       return ret;
 }
 
 static int imx219_configure_lanes(struct imx219 *imx219)
 {
-       return imx219_write_reg(imx219, IMX219_REG_CSI_LANE_MODE,
-                               IMX219_REG_VALUE_08BIT, (imx219->lanes == 2) ?
-                               IMX219_CSI_2_LANE_MODE : IMX219_CSI_4_LANE_MODE);
+       return cci_write(imx219->regmap, IMX219_REG_CSI_LANE_MODE,
+                        imx219->lanes == 2 ? IMX219_CSI_2_LANE_MODE :
+                        IMX219_CSI_4_LANE_MODE, NULL);
 };
 
 static int imx219_start_streaming(struct imx219 *imx219,
                                  struct v4l2_subdev_state *state)
 {
        struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
-       const struct v4l2_mbus_framefmt *format;
-       const struct imx219_reg_list *reg_list;
        int ret;
 
        ret = pm_runtime_resume_and_get(&client->dev);
@@ -897,7 +680,8 @@ static int imx219_start_streaming(struct imx219 *imx219,
                return ret;
 
        /* Send all registers that are common to all modes */
-       ret = imx219_write_regs(imx219, imx219_common_regs, ARRAY_SIZE(imx219_common_regs));
+       ret = cci_multi_reg_write(imx219->regmap, imx219_common_regs,
+                                 ARRAY_SIZE(imx219_common_regs), NULL);
        if (ret) {
                dev_err(&client->dev, "%s failed to send mfg header\n", __func__);
                goto err_rpm_put;
@@ -910,37 +694,22 @@ static int imx219_start_streaming(struct imx219 *imx219,
                goto err_rpm_put;
        }
 
-       /* Apply default values of current mode */
-       reg_list = &imx219->mode->reg_list;
-       ret = imx219_write_regs(imx219, reg_list->regs, reg_list->num_of_regs);
-       if (ret) {
-               dev_err(&client->dev, "%s failed to set mode\n", __func__);
-               goto err_rpm_put;
-       }
-
-       format = v4l2_subdev_get_pad_format(&imx219->sd, state, 0);
-       ret = imx219_set_framefmt(imx219, format);
+       /* Apply format and crop settings. */
+       ret = imx219_set_framefmt(imx219, state);
        if (ret) {
                dev_err(&client->dev, "%s failed to set frame format: %d\n",
                        __func__, ret);
                goto err_rpm_put;
        }
 
-       ret = imx219_set_binning(imx219, format);
-       if (ret) {
-               dev_err(&client->dev, "%s failed to set binning: %d\n",
-                       __func__, ret);
-               goto err_rpm_put;
-       }
-
        /* Apply customized values from user */
        ret =  __v4l2_ctrl_handler_setup(imx219->sd.ctrl_handler);
        if (ret)
                goto err_rpm_put;
 
        /* set stream on register */
-       ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
-                              IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
+       ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT,
+                       IMX219_MODE_STREAMING, NULL);
        if (ret)
                goto err_rpm_put;
 
@@ -961,8 +730,8 @@ static void imx219_stop_streaming(struct imx219 *imx219)
        int ret;
 
        /* set stream off register */
-       ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
-                              IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY);
+       ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT,
+                       IMX219_MODE_STANDBY, NULL);
        if (ret)
                dev_err(&client->dev, "%s failed to set stream\n", __func__);
 
@@ -980,142 +749,176 @@ static int imx219_set_stream(struct v4l2_subdev *sd, int enable)
 
        state = v4l2_subdev_lock_and_get_active_state(sd);
 
-       if (imx219->streaming == enable)
-               goto unlock;
-
-       if (enable) {
-               /*
-                * Apply default & customized values
-                * and then start streaming.
-                */
+       if (enable)
                ret = imx219_start_streaming(imx219, state);
-               if (ret)
-                       goto unlock;
-       } else {
+       else
                imx219_stop_streaming(imx219);
-       }
-
-       imx219->streaming = enable;
 
-unlock:
        v4l2_subdev_unlock_state(state);
        return ret;
 }
 
-/* Power/clock management functions */
-static int imx219_power_on(struct device *dev)
+static void imx219_update_pad_format(struct imx219 *imx219,
+                                    const struct imx219_mode *mode,
+                                    struct v4l2_mbus_framefmt *fmt, u32 code)
 {
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct imx219 *imx219 = to_imx219(sd);
-       int ret;
+       /* Bayer order varies with flips */
+       fmt->code = imx219_get_format_code(imx219, code);
+       fmt->width = mode->width;
+       fmt->height = mode->height;
+       fmt->field = V4L2_FIELD_NONE;
+       fmt->colorspace = V4L2_COLORSPACE_RAW;
+       fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
+       fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+       fmt->xfer_func = V4L2_XFER_FUNC_NONE;
+}
 
-       ret = regulator_bulk_enable(IMX219_NUM_SUPPLIES,
-                                   imx219->supplies);
-       if (ret) {
-               dev_err(dev, "%s: failed to enable regulators\n",
-                       __func__);
-               return ret;
-       }
+static int imx219_enum_mbus_code(struct v4l2_subdev *sd,
+                                struct v4l2_subdev_state *state,
+                                struct v4l2_subdev_mbus_code_enum *code)
+{
+       struct imx219 *imx219 = to_imx219(sd);
 
-       ret = clk_prepare_enable(imx219->xclk);
-       if (ret) {
-               dev_err(dev, "%s: failed to enable clock\n",
-                       __func__);
-               goto reg_off;
-       }
+       if (code->index >= (ARRAY_SIZE(imx219_mbus_formats) / 4))
+               return -EINVAL;
 
-       gpiod_set_value_cansleep(imx219->reset_gpio, 1);
-       usleep_range(IMX219_XCLR_MIN_DELAY_US,
-                    IMX219_XCLR_MIN_DELAY_US + IMX219_XCLR_DELAY_RANGE_US);
+       code->code = imx219_get_format_code(imx219, imx219_mbus_formats[code->index * 4]);
 
        return 0;
-
-reg_off:
-       regulator_bulk_disable(IMX219_NUM_SUPPLIES, imx219->supplies);
-
-       return ret;
 }
 
-static int imx219_power_off(struct device *dev)
+static int imx219_enum_frame_size(struct v4l2_subdev *sd,
+                                 struct v4l2_subdev_state *state,
+                                 struct v4l2_subdev_frame_size_enum *fse)
 {
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
        struct imx219 *imx219 = to_imx219(sd);
+       u32 code;
 
-       gpiod_set_value_cansleep(imx219->reset_gpio, 0);
-       regulator_bulk_disable(IMX219_NUM_SUPPLIES, imx219->supplies);
-       clk_disable_unprepare(imx219->xclk);
+       if (fse->index >= ARRAY_SIZE(supported_modes))
+               return -EINVAL;
+
+       code = imx219_get_format_code(imx219, fse->code);
+       if (fse->code != code)
+               return -EINVAL;
+
+       fse->min_width = supported_modes[fse->index].width;
+       fse->max_width = fse->min_width;
+       fse->min_height = supported_modes[fse->index].height;
+       fse->max_height = fse->min_height;
 
        return 0;
 }
 
-static int __maybe_unused imx219_suspend(struct device *dev)
+static int imx219_set_pad_format(struct v4l2_subdev *sd,
+                                struct v4l2_subdev_state *state,
+                                struct v4l2_subdev_format *fmt)
 {
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
        struct imx219 *imx219 = to_imx219(sd);
+       const struct imx219_mode *mode;
+       struct v4l2_mbus_framefmt *format;
+       struct v4l2_rect *crop;
+       unsigned int bin_h, bin_v;
 
-       if (imx219->streaming)
-               imx219_stop_streaming(imx219);
+       mode = v4l2_find_nearest_size(supported_modes,
+                                     ARRAY_SIZE(supported_modes),
+                                     width, height,
+                                     fmt->format.width, fmt->format.height);
 
-       return 0;
-}
+       imx219_update_pad_format(imx219, mode, &fmt->format, fmt->format.code);
 
-static int __maybe_unused imx219_resume(struct device *dev)
-{
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct imx219 *imx219 = to_imx219(sd);
-       struct v4l2_subdev_state *state;
-       int ret;
+       format = v4l2_subdev_get_pad_format(sd, state, 0);
+       *format = fmt->format;
 
-       if (imx219->streaming) {
-               state = v4l2_subdev_lock_and_get_active_state(sd);
-               ret = imx219_start_streaming(imx219, state);
-               v4l2_subdev_unlock_state(state);
-               if (ret)
-                       goto error;
-       }
+       /*
+        * Use binning to maximize the crop rectangle size, and centre it in the
+        * sensor.
+        */
+       bin_h = min(IMX219_PIXEL_ARRAY_WIDTH / format->width, 2U);
+       bin_v = min(IMX219_PIXEL_ARRAY_HEIGHT / format->height, 2U);
 
-       return 0;
+       crop = v4l2_subdev_get_pad_crop(sd, state, 0);
+       crop->width = format->width * bin_h;
+       crop->height = format->height * bin_v;
+       crop->left = (IMX219_NATIVE_WIDTH - crop->width) / 2;
+       crop->top = (IMX219_NATIVE_HEIGHT - crop->height) / 2;
 
-error:
-       imx219_stop_streaming(imx219);
-       imx219->streaming = false;
+       if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+               int exposure_max;
+               int exposure_def;
+               int hblank;
 
-       return ret;
+               /* Update limits and set FPS to default */
+               __v4l2_ctrl_modify_range(imx219->vblank, IMX219_VBLANK_MIN,
+                                        IMX219_VTS_MAX - mode->height, 1,
+                                        mode->vts_def - mode->height);
+               __v4l2_ctrl_s_ctrl(imx219->vblank,
+                                  mode->vts_def - mode->height);
+               /* Update max exposure while meeting expected vblanking */
+               exposure_max = mode->vts_def - 4;
+               exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ?
+                       exposure_max : IMX219_EXPOSURE_DEFAULT;
+               __v4l2_ctrl_modify_range(imx219->exposure,
+                                        imx219->exposure->minimum,
+                                        exposure_max, imx219->exposure->step,
+                                        exposure_def);
+               /*
+                * Currently PPL is fixed to IMX219_PPL_DEFAULT, so hblank
+                * depends on mode->width only, and is not changeble in any
+                * way other than changing the mode.
+                */
+               hblank = IMX219_PPL_DEFAULT - mode->width;
+               __v4l2_ctrl_modify_range(imx219->hblank, hblank, hblank, 1,
+                                        hblank);
+       }
+
+       return 0;
 }
 
-static int imx219_get_regulators(struct imx219 *imx219)
+static int imx219_get_selection(struct v4l2_subdev *sd,
+                               struct v4l2_subdev_state *state,
+                               struct v4l2_subdev_selection *sel)
 {
-       struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
-       unsigned int i;
+       switch (sel->target) {
+       case V4L2_SEL_TGT_CROP: {
+               sel->r = *v4l2_subdev_get_pad_crop(sd, state, 0);
+               return 0;
+       }
 
-       for (i = 0; i < IMX219_NUM_SUPPLIES; i++)
-               imx219->supplies[i].supply = imx219_supply_name[i];
+       case V4L2_SEL_TGT_NATIVE_SIZE:
+               sel->r.top = 0;
+               sel->r.left = 0;
+               sel->r.width = IMX219_NATIVE_WIDTH;
+               sel->r.height = IMX219_NATIVE_HEIGHT;
 
-       return devm_regulator_bulk_get(&client->dev,
-                                      IMX219_NUM_SUPPLIES,
-                                      imx219->supplies);
-}
+               return 0;
 
-/* Verify chip ID */
-static int imx219_identify_module(struct imx219 *imx219)
-{
-       struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
-       int ret;
-       u32 val;
+       case V4L2_SEL_TGT_CROP_DEFAULT:
+       case V4L2_SEL_TGT_CROP_BOUNDS:
+               sel->r.top = IMX219_PIXEL_ARRAY_TOP;
+               sel->r.left = IMX219_PIXEL_ARRAY_LEFT;
+               sel->r.width = IMX219_PIXEL_ARRAY_WIDTH;
+               sel->r.height = IMX219_PIXEL_ARRAY_HEIGHT;
 
-       ret = imx219_read_reg(imx219, IMX219_REG_CHIP_ID,
-                             IMX219_REG_VALUE_16BIT, &val);
-       if (ret) {
-               dev_err(&client->dev, "failed to read chip id %x\n",
-                       IMX219_CHIP_ID);
-               return ret;
+               return 0;
        }
 
-       if (val != IMX219_CHIP_ID) {
-               dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
-                       IMX219_CHIP_ID, val);
-               return -EIO;
-       }
+       return -EINVAL;
+}
+
+static int imx219_init_cfg(struct v4l2_subdev *sd,
+                          struct v4l2_subdev_state *state)
+{
+       struct v4l2_subdev_format fmt = {
+               .which = V4L2_SUBDEV_FORMAT_TRY,
+               .pad = 0,
+               .format = {
+                       .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+                       .width = supported_modes[0].width,
+                       .height = supported_modes[0].height,
+               },
+       };
+
+       imx219_set_pad_format(sd, state, &fmt);
 
        return 0;
 }
@@ -1145,129 +948,93 @@ static const struct v4l2_subdev_ops imx219_subdev_ops = {
 };
 
 
-static unsigned long imx219_get_pixel_rate(struct imx219 *imx219)
-{
-       return (imx219->lanes == 2) ? IMX219_PIXEL_RATE : IMX219_PIXEL_RATE_4LANE;
-}
+/* -----------------------------------------------------------------------------
+ * Power management
+ */
 
-/* Initialize control handlers */
-static int imx219_init_controls(struct imx219 *imx219)
+static int imx219_power_on(struct device *dev)
 {
-       struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
-       struct v4l2_ctrl_handler *ctrl_hdlr;
-       unsigned int height = imx219->mode->height;
-       struct v4l2_fwnode_device_properties props;
-       int exposure_max, exposure_def, hblank;
-       int i, ret;
+       struct v4l2_subdev *sd = dev_get_drvdata(dev);
+       struct imx219 *imx219 = to_imx219(sd);
+       int ret;
 
-       ctrl_hdlr = &imx219->ctrl_handler;
-       ret = v4l2_ctrl_handler_init(ctrl_hdlr, 12);
-       if (ret)
+       ret = regulator_bulk_enable(IMX219_NUM_SUPPLIES,
+                                   imx219->supplies);
+       if (ret) {
+               dev_err(dev, "%s: failed to enable regulators\n",
+                       __func__);
                return ret;
+       }
 
-       /* By default, PIXEL_RATE is read only */
-       imx219->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
-                                              V4L2_CID_PIXEL_RATE,
-                                              imx219_get_pixel_rate(imx219),
-                                              imx219_get_pixel_rate(imx219), 1,
-                                              imx219_get_pixel_rate(imx219));
+       ret = clk_prepare_enable(imx219->xclk);
+       if (ret) {
+               dev_err(dev, "%s: failed to enable clock\n",
+                       __func__);
+               goto reg_off;
+       }
 
-       imx219->link_freq =
-               v4l2_ctrl_new_int_menu(ctrl_hdlr, &imx219_ctrl_ops,
-                                      V4L2_CID_LINK_FREQ,
-                                      ARRAY_SIZE(imx219_link_freq_menu) - 1, 0,
-                                      (imx219->lanes == 2) ? imx219_link_freq_menu :
-                                      imx219_link_freq_4lane_menu);
-       if (imx219->link_freq)
-               imx219->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+       gpiod_set_value_cansleep(imx219->reset_gpio, 1);
+       usleep_range(IMX219_XCLR_MIN_DELAY_US,
+                    IMX219_XCLR_MIN_DELAY_US + IMX219_XCLR_DELAY_RANGE_US);
 
-       /* Initial vblank/hblank/exposure parameters based on current mode */
-       imx219->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
-                                          V4L2_CID_VBLANK, IMX219_VBLANK_MIN,
-                                          IMX219_VTS_MAX - height, 1,
-                                          imx219->mode->vts_def - height);
-       hblank = IMX219_PPL_DEFAULT - imx219->mode->width;
-       imx219->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
-                                          V4L2_CID_HBLANK, hblank, hblank,
-                                          1, hblank);
-       if (imx219->hblank)
-               imx219->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
-       exposure_max = imx219->mode->vts_def - 4;
-       exposure_def = (exposure_max < IMX219_EXPOSURE_DEFAULT) ?
-               exposure_max : IMX219_EXPOSURE_DEFAULT;
-       imx219->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
-                                            V4L2_CID_EXPOSURE,
-                                            IMX219_EXPOSURE_MIN, exposure_max,
-                                            IMX219_EXPOSURE_STEP,
-                                            exposure_def);
+       return 0;
 
-       v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
-                         IMX219_ANA_GAIN_MIN, IMX219_ANA_GAIN_MAX,
-                         IMX219_ANA_GAIN_STEP, IMX219_ANA_GAIN_DEFAULT);
+reg_off:
+       regulator_bulk_disable(IMX219_NUM_SUPPLIES, imx219->supplies);
 
-       v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
-                         IMX219_DGTL_GAIN_MIN, IMX219_DGTL_GAIN_MAX,
-                         IMX219_DGTL_GAIN_STEP, IMX219_DGTL_GAIN_DEFAULT);
+       return ret;
+}
 
-       imx219->hflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
-                                         V4L2_CID_HFLIP, 0, 1, 1, 0);
-       if (imx219->hflip)
-               imx219->hflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+static int imx219_power_off(struct device *dev)
+{
+       struct v4l2_subdev *sd = dev_get_drvdata(dev);
+       struct imx219 *imx219 = to_imx219(sd);
 
-       imx219->vflip = v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
-                                         V4L2_CID_VFLIP, 0, 1, 1, 0);
-       if (imx219->vflip)
-               imx219->vflip->flags |= V4L2_CTRL_FLAG_MODIFY_LAYOUT;
+       gpiod_set_value_cansleep(imx219->reset_gpio, 0);
+       regulator_bulk_disable(IMX219_NUM_SUPPLIES, imx219->supplies);
+       clk_disable_unprepare(imx219->xclk);
 
-       v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &imx219_ctrl_ops,
-                                    V4L2_CID_TEST_PATTERN,
-                                    ARRAY_SIZE(imx219_test_pattern_menu) - 1,
-                                    0, 0, imx219_test_pattern_menu);
-       for (i = 0; i < 4; i++) {
-               /*
-                * The assumption is that
-                * V4L2_CID_TEST_PATTERN_GREENR == V4L2_CID_TEST_PATTERN_RED + 1
-                * V4L2_CID_TEST_PATTERN_BLUE   == V4L2_CID_TEST_PATTERN_RED + 2
-                * V4L2_CID_TEST_PATTERN_GREENB == V4L2_CID_TEST_PATTERN_RED + 3
-                */
-               v4l2_ctrl_new_std(ctrl_hdlr, &imx219_ctrl_ops,
-                                 V4L2_CID_TEST_PATTERN_RED + i,
-                                 IMX219_TESTP_COLOUR_MIN,
-                                 IMX219_TESTP_COLOUR_MAX,
-                                 IMX219_TESTP_COLOUR_STEP,
-                                 IMX219_TESTP_COLOUR_MAX);
-               /* The "Solid color" pattern is white by default */
-       }
+       return 0;
+}
 
-       if (ctrl_hdlr->error) {
-               ret = ctrl_hdlr->error;
-               dev_err(&client->dev, "%s control init failed (%d)\n",
-                       __func__, ret);
-               goto error;
-       }
+/* -----------------------------------------------------------------------------
+ * Probe & remove
+ */
 
-       ret = v4l2_fwnode_device_parse(&client->dev, &props);
-       if (ret)
-               goto error;
+static int imx219_get_regulators(struct imx219 *imx219)
+{
+       struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+       unsigned int i;
 
-       ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &imx219_ctrl_ops,
-                                             &props);
-       if (ret)
-               goto error;
+       for (i = 0; i < IMX219_NUM_SUPPLIES; i++)
+               imx219->supplies[i].supply = imx219_supply_name[i];
 
-       imx219->sd.ctrl_handler = ctrl_hdlr;
+       return devm_regulator_bulk_get(&client->dev,
+                                      IMX219_NUM_SUPPLIES,
+                                      imx219->supplies);
+}
 
-       return 0;
+/* Verify chip ID */
+static int imx219_identify_module(struct imx219 *imx219)
+{
+       struct i2c_client *client = v4l2_get_subdevdata(&imx219->sd);
+       int ret;
+       u64 val;
 
-error:
-       v4l2_ctrl_handler_free(ctrl_hdlr);
+       ret = cci_read(imx219->regmap, IMX219_REG_CHIP_ID, &val, NULL);
+       if (ret) {
+               dev_err(&client->dev, "failed to read chip id %x\n",
+                       IMX219_CHIP_ID);
+               return ret;
+       }
 
-       return ret;
-}
+       if (val != IMX219_CHIP_ID) {
+               dev_err(&client->dev, "chip id mismatch: %x!=%llx\n",
+                       IMX219_CHIP_ID, val);
+               return -EIO;
+       }
 
-static void imx219_free_controls(struct imx219 *imx219)
-{
-       v4l2_ctrl_handler_free(imx219->sd.ctrl_handler);
+       return 0;
 }
 
 static int imx219_check_hwcfg(struct device *dev, struct imx219 *imx219)
@@ -1336,6 +1103,13 @@ static int imx219_probe(struct i2c_client *client)
        if (imx219_check_hwcfg(dev, imx219))
                return -EINVAL;
 
+       imx219->regmap = devm_cci_regmap_init_i2c(client, 16);
+       if (IS_ERR(imx219->regmap)) {
+               ret = PTR_ERR(imx219->regmap);
+               dev_err(dev, "failed to initialize CCI: %d\n", ret);
+               return ret;
+       }
+
        /* Get system clock (xclk) */
        imx219->xclk = devm_clk_get(dev, NULL);
        if (IS_ERR(imx219->xclk)) {
@@ -1372,24 +1146,24 @@ static int imx219_probe(struct i2c_client *client)
        if (ret)
                goto error_power_off;
 
-       /* Set default mode to max resolution */
-       imx219->mode = &supported_modes[0];
-
-       /* sensor doesn't enter LP-11 state upon power up until and unless
+       /*
+        * Sensor doesn't enter LP-11 state upon power up until and unless
         * streaming is started, so upon power up switch the modes to:
         * streaming -> standby
         */
-       ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
-                              IMX219_REG_VALUE_08BIT, IMX219_MODE_STREAMING);
+       ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT,
+                       IMX219_MODE_STREAMING, NULL);
        if (ret < 0)
                goto error_power_off;
+
        usleep_range(100, 110);
 
        /* put sensor back to standby mode */
-       ret = imx219_write_reg(imx219, IMX219_REG_MODE_SELECT,
-                              IMX219_REG_VALUE_08BIT, IMX219_MODE_STANDBY);
+       ret = cci_write(imx219->regmap, IMX219_REG_MODE_SELECT,
+                       IMX219_MODE_STANDBY, NULL);
        if (ret < 0)
                goto error_power_off;
+
        usleep_range(100, 110);
 
        ret = imx219_init_controls(imx219);
@@ -1468,7 +1242,6 @@ static const struct of_device_id imx219_dt_ids[] = {
 MODULE_DEVICE_TABLE(of, imx219_dt_ids);
 
 static const struct dev_pm_ops imx219_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(imx219_suspend, imx219_resume)
        SET_RUNTIME_PM_OPS(imx219_power_off, imx219_power_on, NULL)
 };
 
index e196565e846e64c9d22defeee5607c9f61347c0e..b3827f4bc0eb35c489dab8c8b0f728d2edf7ad8d 100644 (file)
@@ -622,9 +622,6 @@ struct imx258 {
         */
        struct mutex mutex;
 
-       /* Streaming on/off */
-       bool streaming;
-
        struct clk *clk;
 };
 
@@ -1035,10 +1032,6 @@ static int imx258_set_stream(struct v4l2_subdev *sd, int enable)
        int ret = 0;
 
        mutex_lock(&imx258->mutex);
-       if (imx258->streaming == enable) {
-               mutex_unlock(&imx258->mutex);
-               return 0;
-       }
 
        if (enable) {
                ret = pm_runtime_resume_and_get(&client->dev);
@@ -1057,7 +1050,6 @@ static int imx258_set_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_put(&client->dev);
        }
 
-       imx258->streaming = enable;
        mutex_unlock(&imx258->mutex);
 
        return ret;
@@ -1070,37 +1062,6 @@ err_unlock:
        return ret;
 }
 
-static int __maybe_unused imx258_suspend(struct device *dev)
-{
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct imx258 *imx258 = to_imx258(sd);
-
-       if (imx258->streaming)
-               imx258_stop_streaming(imx258);
-
-       return 0;
-}
-
-static int __maybe_unused imx258_resume(struct device *dev)
-{
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct imx258 *imx258 = to_imx258(sd);
-       int ret;
-
-       if (imx258->streaming) {
-               ret = imx258_start_streaming(imx258);
-               if (ret)
-                       goto error;
-       }
-
-       return 0;
-
-error:
-       imx258_stop_streaming(imx258);
-       imx258->streaming = 0;
-       return ret;
-}
-
 /* Verify chip ID */
 static int imx258_identify_module(struct imx258 *imx258)
 {
@@ -1369,7 +1330,6 @@ static void imx258_remove(struct i2c_client *client)
 }
 
 static const struct dev_pm_ops imx258_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(imx258_suspend, imx258_resume)
        SET_RUNTIME_PM_OPS(imx258_power_off, imx258_power_on, NULL)
 };
 
index 3b4539b622b43b068ae1b933273e7b656b835a48..94aac9d2732f1ffdc3c8abfcedf98b84f9a8b36a 100644 (file)
@@ -201,8 +201,6 @@ struct imx296 {
        const struct imx296_clk_params *clk_params;
        bool mono;
 
-       bool streaming;
-
        struct v4l2_subdev subdev;
        struct media_pad pad;
 
@@ -321,7 +319,7 @@ static int imx296_s_ctrl(struct v4l2_ctrl *ctrl)
        unsigned int vmax;
        int ret = 0;
 
-       if (!sensor->streaming)
+       if (!pm_runtime_get_if_in_use(sensor->dev))
                return 0;
 
        state = v4l2_subdev_get_locked_active_state(&sensor->subdev);
@@ -376,6 +374,8 @@ static int imx296_s_ctrl(struct v4l2_ctrl *ctrl)
                break;
        }
 
+       pm_runtime_put(sensor->dev);
+
        return ret;
 }
 
@@ -607,8 +607,6 @@ static int imx296_s_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_mark_last_busy(sensor->dev);
                pm_runtime_put_autosuspend(sensor->dev);
 
-               sensor->streaming = false;
-
                goto unlock;
        }
 
@@ -620,13 +618,6 @@ static int imx296_s_stream(struct v4l2_subdev *sd, int enable)
        if (ret < 0)
                goto err_pm;
 
-       /*
-        * Set streaming to true to ensure __v4l2_ctrl_handler_setup() will set
-        * the controls. The flag is reset to false further down if an error
-        * occurs.
-        */
-       sensor->streaming = true;
-
        ret = __v4l2_ctrl_handler_setup(&sensor->ctrls);
        if (ret < 0)
                goto err_pm;
@@ -646,7 +637,6 @@ err_pm:
         * likely has no other chance to recover.
         */
        pm_runtime_put_sync(sensor->dev);
-       sensor->streaming = false;
 
        goto unlock;
 }
index 52ebb096e107502cf3d60c9c10b1c8881457e477..5378f607f34017a616ebc7150e4fa718b2dda83f 100644 (file)
@@ -138,8 +138,6 @@ struct imx319 {
         */
        struct mutex mutex;
 
-       /* Streaming on/off */
-       bool streaming;
        /* True if the device has been identified */
        bool identified;
 };
@@ -2166,10 +2164,6 @@ static int imx319_set_stream(struct v4l2_subdev *sd, int enable)
        int ret = 0;
 
        mutex_lock(&imx319->mutex);
-       if (imx319->streaming == enable) {
-               mutex_unlock(&imx319->mutex);
-               return 0;
-       }
 
        if (enable) {
                ret = pm_runtime_resume_and_get(&client->dev);
@@ -2188,8 +2182,6 @@ static int imx319_set_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_put(&client->dev);
        }
 
-       imx319->streaming = enable;
-
        /* vflip and hflip cannot change during streaming */
        __v4l2_ctrl_grab(imx319->vflip, enable);
        __v4l2_ctrl_grab(imx319->hflip, enable);
@@ -2206,37 +2198,6 @@ err_unlock:
        return ret;
 }
 
-static int __maybe_unused imx319_suspend(struct device *dev)
-{
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct imx319 *imx319 = to_imx319(sd);
-
-       if (imx319->streaming)
-               imx319_stop_streaming(imx319);
-
-       return 0;
-}
-
-static int __maybe_unused imx319_resume(struct device *dev)
-{
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct imx319 *imx319 = to_imx319(sd);
-       int ret;
-
-       if (imx319->streaming) {
-               ret = imx319_start_streaming(imx319);
-               if (ret)
-                       goto error;
-       }
-
-       return 0;
-
-error:
-       imx319_stop_streaming(imx319);
-       imx319->streaming = 0;
-       return ret;
-}
-
 static const struct v4l2_subdev_core_ops imx319_subdev_core_ops = {
        .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
        .unsubscribe_event = v4l2_event_subdev_unsubscribe,
@@ -2542,10 +2503,6 @@ static void imx319_remove(struct i2c_client *client)
        mutex_destroy(&imx319->mutex);
 }
 
-static const struct dev_pm_ops imx319_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(imx319_suspend, imx319_resume)
-};
-
 static const struct acpi_device_id imx319_acpi_ids[] __maybe_unused = {
        { "SONY319A" },
        { /* sentinel */ }
@@ -2555,7 +2512,6 @@ MODULE_DEVICE_TABLE(acpi, imx319_acpi_ids);
 static struct i2c_driver imx319_i2c_driver = {
        .driver = {
                .name = "imx319",
-               .pm = &imx319_pm_ops,
                .acpi_match_table = ACPI_PTR(imx319_acpi_ids),
        },
        .probe = imx319_probe,
index d722c9b7cd31d002349d7189436c57d9a4373eef..1196fe93506bd08593eabb70dfbdbb49514d7a32 100644 (file)
 #define IMX334_REG_MIN         0x00
 #define IMX334_REG_MAX         0xfffff
 
+/* Test Pattern Control */
+#define IMX334_REG_TP          0x329e
+#define IMX334_TP_COLOR_HBARS  0xA
+#define IMX334_TP_COLOR_VBARS  0xB
+
+#define IMX334_TPG_EN_DOUT     0x329c
+#define IMX334_TP_ENABLE       0x1
+#define IMX334_TP_DISABLE      0x0
+
+#define IMX334_TPG_COLORW      0x32a0
+#define IMX334_TPG_COLORW_120P 0x13
+
+#define IMX334_TP_CLK_EN       0x3148
+#define IMX334_TP_CLK_EN_VAL   0x10
+#define IMX334_TP_CLK_DIS_VAL  0x0
+
+#define IMX334_DIG_CLP_MODE    0x3280
+
 /**
  * struct imx334_reg - imx334 sensor register
  * @address: Register address
@@ -120,7 +138,6 @@ struct imx334_mode {
  * @mutex: Mutex for serializing sensor controls
  * @menu_skip_mask: Menu skip mask for link_freq_ctrl
  * @cur_code: current selected format code
- * @streaming: Flag indicating streaming state
  */
 struct imx334 {
        struct device *dev;
@@ -143,7 +160,6 @@ struct imx334 {
        struct mutex mutex;
        unsigned long menu_skip_mask;
        u32 cur_code;
-       bool streaming;
 };
 
 static const s64 link_freq[] = {
@@ -430,6 +446,18 @@ static const struct imx334_reg mode_3840x2160_regs[] = {
        {0x3a29, 0x00},
 };
 
+static const char * const imx334_test_pattern_menu[] = {
+       "Disabled",
+       "Vertical Color Bars",
+       "Horizontal Color Bars",
+};
+
+static const int imx334_test_pattern_val[] = {
+       IMX334_TP_DISABLE,
+       IMX334_TP_COLOR_HBARS,
+       IMX334_TP_COLOR_VBARS,
+};
+
 static const struct imx334_reg raw10_framefmt_regs[] = {
        {0x3050, 0x00},
        {0x319d, 0x00},
@@ -716,6 +744,26 @@ static int imx334_set_ctrl(struct v4l2_ctrl *ctrl)
        case V4L2_CID_HBLANK:
                ret = 0;
                break;
+       case V4L2_CID_TEST_PATTERN:
+               if (ctrl->val) {
+                       imx334_write_reg(imx334, IMX334_TP_CLK_EN, 1,
+                                        IMX334_TP_CLK_EN_VAL);
+                       imx334_write_reg(imx334, IMX334_DIG_CLP_MODE, 1, 0x0);
+                       imx334_write_reg(imx334, IMX334_TPG_COLORW, 1,
+                                        IMX334_TPG_COLORW_120P);
+                       imx334_write_reg(imx334, IMX334_REG_TP, 1,
+                                        imx334_test_pattern_val[ctrl->val]);
+                       imx334_write_reg(imx334, IMX334_TPG_EN_DOUT, 1,
+                                        IMX334_TP_ENABLE);
+               } else {
+                       imx334_write_reg(imx334, IMX334_DIG_CLP_MODE, 1, 0x1);
+                       imx334_write_reg(imx334, IMX334_TP_CLK_EN, 1,
+                                        IMX334_TP_CLK_DIS_VAL);
+                       imx334_write_reg(imx334, IMX334_TPG_EN_DOUT, 1,
+                                        IMX334_TP_DISABLE);
+               }
+               ret = 0;
+               break;
        default:
                dev_err(imx334->dev, "Invalid control %d", ctrl->id);
                ret = -EINVAL;
@@ -1001,11 +1049,6 @@ static int imx334_set_stream(struct v4l2_subdev *sd, int enable)
 
        mutex_lock(&imx334->mutex);
 
-       if (imx334->streaming == enable) {
-               mutex_unlock(&imx334->mutex);
-               return 0;
-       }
-
        if (enable) {
                ret = pm_runtime_resume_and_get(imx334->dev);
                if (ret < 0)
@@ -1019,8 +1062,6 @@ static int imx334_set_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_put(imx334->dev);
        }
 
-       imx334->streaming = enable;
-
        mutex_unlock(&imx334->mutex);
 
        return 0;
@@ -1222,7 +1263,7 @@ static int imx334_init_controls(struct imx334 *imx334)
        u32 lpfr;
        int ret;
 
-       ret = v4l2_ctrl_handler_init(ctrl_hdlr, 6);
+       ret = v4l2_ctrl_handler_init(ctrl_hdlr, 7);
        if (ret)
                return ret;
 
@@ -1282,6 +1323,11 @@ static int imx334_init_controls(struct imx334 *imx334)
        if (imx334->hblank_ctrl)
                imx334->hblank_ctrl->flags |= V4L2_CTRL_FLAG_READ_ONLY;
 
+       v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &imx334_ctrl_ops,
+                                    V4L2_CID_TEST_PATTERN,
+                                    ARRAY_SIZE(imx334_test_pattern_menu) - 1,
+                                    0, 0, imx334_test_pattern_menu);
+
        if (ctrl_hdlr->error) {
                dev_err(imx334->dev, "control init failed: %d",
                        ctrl_hdlr->error);
index 482a0b7f040a546d5fc625dab0763bb46c848877..ec729126274b2badd40cb3f8b2f5a552ae8c92e5 100644 (file)
@@ -119,7 +119,6 @@ struct imx335_mode {
  * @vblank: Vertical blanking in lines
  * @cur_mode: Pointer to current selected sensor mode
  * @mutex: Mutex for serializing sensor controls
- * @streaming: Flag indicating streaming state
  */
 struct imx335 {
        struct device *dev;
@@ -140,7 +139,6 @@ struct imx335 {
        u32 vblank;
        const struct imx335_mode *cur_mode;
        struct mutex mutex;
-       bool streaming;
 };
 
 static const s64 link_freq[] = {
@@ -705,11 +703,6 @@ static int imx335_set_stream(struct v4l2_subdev *sd, int enable)
 
        mutex_lock(&imx335->mutex);
 
-       if (imx335->streaming == enable) {
-               mutex_unlock(&imx335->mutex);
-               return 0;
-       }
-
        if (enable) {
                ret = pm_runtime_resume_and_get(imx335->dev);
                if (ret)
@@ -723,8 +716,6 @@ static int imx335_set_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_put(imx335->dev);
        }
 
-       imx335->streaming = enable;
-
        mutex_unlock(&imx335->mutex);
 
        return 0;
index 9c79ae8dc84284e89d31d07b0abf23293950bd73..9c58c1a80cba3bed2e9c2ccf04cbc3d383340754 100644 (file)
@@ -123,9 +123,6 @@ struct imx355 {
         * Protect access to sensor v4l2 controls.
         */
        struct mutex mutex;
-
-       /* Streaming on/off */
-       bool streaming;
 };
 
 static const struct imx355_reg imx355_global_regs[] = {
@@ -1436,10 +1433,6 @@ static int imx355_set_stream(struct v4l2_subdev *sd, int enable)
        int ret = 0;
 
        mutex_lock(&imx355->mutex);
-       if (imx355->streaming == enable) {
-               mutex_unlock(&imx355->mutex);
-               return 0;
-       }
 
        if (enable) {
                ret = pm_runtime_resume_and_get(&client->dev);
@@ -1458,8 +1451,6 @@ static int imx355_set_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_put(&client->dev);
        }
 
-       imx355->streaming = enable;
-
        /* vflip and hflip cannot change during streaming */
        __v4l2_ctrl_grab(imx355->vflip, enable);
        __v4l2_ctrl_grab(imx355->hflip, enable);
@@ -1476,37 +1467,6 @@ err_unlock:
        return ret;
 }
 
-static int __maybe_unused imx355_suspend(struct device *dev)
-{
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct imx355 *imx355 = to_imx355(sd);
-
-       if (imx355->streaming)
-               imx355_stop_streaming(imx355);
-
-       return 0;
-}
-
-static int __maybe_unused imx355_resume(struct device *dev)
-{
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct imx355 *imx355 = to_imx355(sd);
-       int ret;
-
-       if (imx355->streaming) {
-               ret = imx355_start_streaming(imx355);
-               if (ret)
-                       goto error;
-       }
-
-       return 0;
-
-error:
-       imx355_stop_streaming(imx355);
-       imx355->streaming = 0;
-       return ret;
-}
-
 /* Verify chip ID */
 static int imx355_identify_module(struct imx355 *imx355)
 {
@@ -1829,10 +1789,6 @@ static void imx355_remove(struct i2c_client *client)
        mutex_destroy(&imx355->mutex);
 }
 
-static const struct dev_pm_ops imx355_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(imx355_suspend, imx355_resume)
-};
-
 static const struct acpi_device_id imx355_acpi_ids[] __maybe_unused = {
        { "SONY355A" },
        { /* sentinel */ }
@@ -1842,7 +1798,6 @@ MODULE_DEVICE_TABLE(acpi, imx355_acpi_ids);
 static struct i2c_driver imx355_i2c_driver = {
        .driver = {
                .name = "imx355",
-               .pm = &imx355_pm_ops,
                .acpi_match_table = ACPI_PTR(imx355_acpi_ids),
        },
        .probe = imx355_probe,
index c7e862ae4040f58214de303e1373e1f63ed18b7f..962b3136c31ee2a50b84de2fa8fdc67aa9ee5b32 100644 (file)
@@ -127,7 +127,6 @@ static const char * const imx412_supply_names[] = {
  * @vblank: Vertical blanking in lines
  * @cur_mode: Pointer to current selected sensor mode
  * @mutex: Mutex for serializing sensor controls
- * @streaming: Flag indicating streaming state
  */
 struct imx412 {
        struct device *dev;
@@ -149,7 +148,6 @@ struct imx412 {
        u32 vblank;
        const struct imx412_mode *cur_mode;
        struct mutex mutex;
-       bool streaming;
 };
 
 static const s64 link_freq[] = {
@@ -857,11 +855,6 @@ static int imx412_set_stream(struct v4l2_subdev *sd, int enable)
 
        mutex_lock(&imx412->mutex);
 
-       if (imx412->streaming == enable) {
-               mutex_unlock(&imx412->mutex);
-               return 0;
-       }
-
        if (enable) {
                ret = pm_runtime_resume_and_get(imx412->dev);
                if (ret)
@@ -875,8 +868,6 @@ static int imx412_set_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_put(imx412->dev);
        }
 
-       imx412->streaming = enable;
-
        mutex_unlock(&imx412->mutex);
 
        return 0;
index 3f00172df3cc380ad04abf499690507dacbaab68..b3fa71a168397256357bfd00bbabe751fd797db2 100644 (file)
@@ -353,8 +353,6 @@ struct imx415 {
 
        const struct imx415_clk_params *clk_params;
 
-       bool streaming;
-
        struct v4l2_subdev subdev;
        struct media_pad pad;
 
@@ -542,8 +540,9 @@ static int imx415_s_ctrl(struct v4l2_ctrl *ctrl)
        struct v4l2_subdev_state *state;
        unsigned int vmax;
        unsigned int flip;
+       int ret;
 
-       if (!sensor->streaming)
+       if (!pm_runtime_get_if_in_use(sensor->dev))
                return 0;
 
        state = v4l2_subdev_get_locked_active_state(&sensor->subdev);
@@ -554,24 +553,33 @@ static int imx415_s_ctrl(struct v4l2_ctrl *ctrl)
                /* clamp the exposure value to VMAX. */
                vmax = format->height + sensor->vblank->cur.val;
                ctrl->val = min_t(int, ctrl->val, vmax);
-               return imx415_write(sensor, IMX415_SHR0, vmax - ctrl->val);
+               ret = imx415_write(sensor, IMX415_SHR0, vmax - ctrl->val);
+               break;
 
        case V4L2_CID_ANALOGUE_GAIN:
                /* analogue gain in 0.3 dB step size */
-               return imx415_write(sensor, IMX415_GAIN_PCG_0, ctrl->val);
+               ret = imx415_write(sensor, IMX415_GAIN_PCG_0, ctrl->val);
+               break;
 
        case V4L2_CID_HFLIP:
        case V4L2_CID_VFLIP:
                flip = (sensor->hflip->val << IMX415_HREVERSE_SHIFT) |
                       (sensor->vflip->val << IMX415_VREVERSE_SHIFT);
-               return imx415_write(sensor, IMX415_REVERSE, flip);
+               ret = imx415_write(sensor, IMX415_REVERSE, flip);
+               break;
 
        case V4L2_CID_TEST_PATTERN:
-               return imx415_set_testpattern(sensor, ctrl->val);
+               ret = imx415_set_testpattern(sensor, ctrl->val);
+               break;
 
        default:
-               return -EINVAL;
+               ret = -EINVAL;
+               break;
        }
+
+       pm_runtime_put(sensor->dev);
+
+       return ret;
 }
 
 static const struct v4l2_ctrl_ops imx415_ctrl_ops = {
@@ -766,8 +774,6 @@ static int imx415_s_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_mark_last_busy(sensor->dev);
                pm_runtime_put_autosuspend(sensor->dev);
 
-               sensor->streaming = false;
-
                goto unlock;
        }
 
@@ -779,13 +785,6 @@ static int imx415_s_stream(struct v4l2_subdev *sd, int enable)
        if (ret)
                goto err_pm;
 
-       /*
-        * Set streaming to true to ensure __v4l2_ctrl_handler_setup() will set
-        * the controls. The flag is reset to false further down if an error
-        * occurs.
-        */
-       sensor->streaming = true;
-
        ret = __v4l2_ctrl_handler_setup(&sensor->ctrls);
        if (ret < 0)
                goto err_pm;
@@ -807,7 +806,6 @@ err_pm:
         * likely has no other chance to recover.
         */
        pm_runtime_put_sync(sensor->dev);
-       sensor->streaming = false;
 
        goto unlock;
 }
@@ -842,15 +840,6 @@ static int imx415_enum_frame_size(struct v4l2_subdev *sd,
        return 0;
 }
 
-static int imx415_get_format(struct v4l2_subdev *sd,
-                            struct v4l2_subdev_state *state,
-                            struct v4l2_subdev_format *fmt)
-{
-       fmt->format = *v4l2_subdev_get_pad_format(sd, state, fmt->pad);
-
-       return 0;
-}
-
 static int imx415_set_format(struct v4l2_subdev *sd,
                             struct v4l2_subdev_state *state,
                             struct v4l2_subdev_format *fmt)
@@ -913,7 +902,7 @@ static const struct v4l2_subdev_video_ops imx415_subdev_video_ops = {
 static const struct v4l2_subdev_pad_ops imx415_subdev_pad_ops = {
        .enum_mbus_code = imx415_enum_mbus_code,
        .enum_frame_size = imx415_enum_frame_size,
-       .get_fmt = imx415_get_format,
+       .get_fmt = v4l2_subdev_get_fmt,
        .set_fmt = imx415_set_format,
        .get_selection = imx415_get_selection,
        .init_cfg = imx415_init_cfg,
index be84ff1e2b170562226404ded0c33f3064a1cad5..fc1cf196ef015143d9a20bd6d0099405d2f244ef 100644 (file)
@@ -1449,7 +1449,6 @@ static int max9286_parse_dt(struct max9286_priv *priv)
 
                i2c_mux_mask |= BIT(id);
        }
-       of_node_put(node);
        of_node_put(i2c_mux);
 
        /* Parse the endpoints */
@@ -1513,7 +1512,6 @@ static int max9286_parse_dt(struct max9286_priv *priv)
                priv->source_mask |= BIT(ep.port);
                priv->nsources++;
        }
-       of_node_put(node);
 
        of_property_read_u32(dev->of_node, "maxim,bus-width", &priv->bus_width);
        switch (priv->bus_width) {
index bec76801487abf563086a3ba19de455381794dbc..0ed8561edfee6bfaa3f3d9b824ddd741edb794f7 100644 (file)
@@ -561,7 +561,7 @@ static int msp_log_status(struct v4l2_subdev *sd)
        struct msp_state *state = to_state(sd);
        struct i2c_client *client = v4l2_get_subdevdata(sd);
        const char *p;
-       char prefix[V4L2_SUBDEV_NAME_SIZE + 20];
+       char prefix[sizeof(sd->name) + 20];
 
        if (state->opmode == OPMODE_AUTOSELECT)
                msp_detect_stereo(client);
index ce9568e8391cddf8eef06afb14f77dd3dcd7db6d..79192cf79d28745ffc9b6335d043c8321e7664cf 100644 (file)
@@ -93,7 +93,6 @@ struct mt9m001 {
                struct v4l2_ctrl *autoexposure;
                struct v4l2_ctrl *exposure;
        };
-       bool streaming;
        struct mutex mutex;
        struct v4l2_rect rect;  /* Sensor window */
        struct clk *clk;
@@ -213,9 +212,6 @@ static int mt9m001_s_stream(struct v4l2_subdev *sd, int enable)
 
        mutex_lock(&mt9m001->mutex);
 
-       if (mt9m001->streaming == enable)
-               goto done;
-
        if (enable) {
                ret = pm_runtime_resume_and_get(&client->dev);
                if (ret < 0)
@@ -239,8 +235,6 @@ static int mt9m001_s_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_put(&client->dev);
        }
 
-       mt9m001->streaming = enable;
-done:
        mutex_unlock(&mt9m001->mutex);
 
        return 0;
index df8d9c9e6a96cf1b3953f3544dafe954fb87a286..1f44b72e8a70c2c39ecc727613065122b87195cd 100644 (file)
@@ -244,9 +244,7 @@ struct mt9m111 {
        bool is_streaming;
        /* user point of view - 0: falling 1: rising edge */
        unsigned int pclk_sample:1;
-#ifdef CONFIG_MEDIA_CONTROLLER
        struct media_pad pad;
-#endif
 };
 
 static const struct mt9m111_mode_info mt9m111_mode_data[MT9M111_NUM_MODES] = {
@@ -527,13 +525,9 @@ static int mt9m111_get_fmt(struct v4l2_subdev *sd,
                return -EINVAL;
 
        if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
                mf = v4l2_subdev_get_try_format(sd, sd_state, format->pad);
                format->format = *mf;
                return 0;
-#else
-               return -EINVAL;
-#endif
        }
 
        mf->width       = mt9m111->width;
@@ -1120,7 +1114,6 @@ static int mt9m111_s_stream(struct v4l2_subdev *sd, int enable)
 static int mt9m111_init_cfg(struct v4l2_subdev *sd,
                            struct v4l2_subdev_state *sd_state)
 {
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
        struct v4l2_mbus_framefmt *format =
                v4l2_subdev_get_try_format(sd, sd_state, 0);
 
@@ -1132,7 +1125,7 @@ static int mt9m111_init_cfg(struct v4l2_subdev *sd,
        format->ycbcr_enc       = V4L2_YCBCR_ENC_DEFAULT;
        format->quantization    = V4L2_QUANTIZATION_DEFAULT;
        format->xfer_func       = V4L2_XFER_FUNC_DEFAULT;
-#endif
+
        return 0;
 }
 
@@ -1315,13 +1308,11 @@ static int mt9m111_probe(struct i2c_client *client)
                return ret;
        }
 
-#ifdef CONFIG_MEDIA_CONTROLLER
        mt9m111->pad.flags = MEDIA_PAD_FL_SOURCE;
        mt9m111->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR;
        ret = media_entity_pads_init(&mt9m111->subdev.entity, 1, &mt9m111->pad);
        if (ret < 0)
                goto out_hdlfree;
-#endif
 
        mt9m111->current_mode = &mt9m111_mode_data[MT9M111_MODE_SXGA_15FPS];
        mt9m111->frame_interval.numerator = 1;
@@ -1350,10 +1341,8 @@ static int mt9m111_probe(struct i2c_client *client)
        return 0;
 
 out_entityclean:
-#ifdef CONFIG_MEDIA_CONTROLLER
        media_entity_cleanup(&mt9m111->subdev.entity);
 out_hdlfree:
-#endif
        v4l2_ctrl_handler_free(&mt9m111->hdl);
 
        return ret;
diff --git a/drivers/media/i2c/mt9m114.c b/drivers/media/i2c/mt9m114.c
new file mode 100644 (file)
index 0000000..ac19078
--- /dev/null
@@ -0,0 +1,2481 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * mt9m114.c onsemi MT9M114 sensor driver
+ *
+ * Copyright (c) 2020-2023 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Copyright (c) 2012 Analog Devices Inc.
+ *
+ * Almost complete rewrite of work by Scott Jiang <Scott.Jiang.Linux@gmail.com>
+ * itself based on work from Andrew Chew <achew@nvidia.com>.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+#include <linux/videodev2.h>
+
+#include <media/v4l2-async.h>
+#include <media/v4l2-cci.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mediabus.h>
+#include <media/v4l2-subdev.h>
+
+/* Sysctl registers */
+#define MT9M114_CHIP_ID                                        CCI_REG16(0x0000)
+#define MT9M114_COMMAND_REGISTER                       CCI_REG16(0x0080)
+#define MT9M114_COMMAND_REGISTER_APPLY_PATCH                   BIT(0)
+#define MT9M114_COMMAND_REGISTER_SET_STATE                     BIT(1)
+#define MT9M114_COMMAND_REGISTER_REFRESH                       BIT(2)
+#define MT9M114_COMMAND_REGISTER_WAIT_FOR_EVENT                        BIT(3)
+#define MT9M114_COMMAND_REGISTER_OK                            BIT(15)
+#define MT9M114_RESET_AND_MISC_CONTROL                 CCI_REG16(0x001a)
+#define MT9M114_RESET_SOC                                      BIT(0)
+#define MT9M114_PAD_SLEW                               CCI_REG16(0x001e)
+#define MT9M114_PAD_CONTROL                            CCI_REG16(0x0032)
+
+/* XDMA registers */
+#define MT9M114_ACCESS_CTL_STAT                                CCI_REG16(0x0982)
+#define MT9M114_PHYSICAL_ADDRESS_ACCESS                        CCI_REG16(0x098a)
+#define MT9M114_LOGICAL_ADDRESS_ACCESS                 CCI_REG16(0x098e)
+
+/* Sensor Core registers */
+#define MT9M114_COARSE_INTEGRATION_TIME                        CCI_REG16(0x3012)
+#define MT9M114_FINE_INTEGRATION_TIME                  CCI_REG16(0x3014)
+#define MT9M114_RESET_REGISTER                         CCI_REG16(0x301a)
+#define MT9M114_RESET_REGISTER_LOCK_REG                                BIT(3)
+#define MT9M114_RESET_REGISTER_MASK_BAD                                BIT(9)
+#define MT9M114_FLASH                                  CCI_REG16(0x3046)
+#define MT9M114_GREEN1_GAIN                            CCI_REG16(0x3056)
+#define MT9M114_BLUE_GAIN                              CCI_REG16(0x3058)
+#define MT9M114_RED_GAIN                               CCI_REG16(0x305a)
+#define MT9M114_GREEN2_GAIN                            CCI_REG16(0x305c)
+#define MT9M114_GLOBAL_GAIN                            CCI_REG16(0x305e)
+#define MT9M114_GAIN_DIGITAL_GAIN(n)                           ((n) << 12)
+#define MT9M114_GAIN_DIGITAL_GAIN_MASK                         (0xf << 12)
+#define MT9M114_GAIN_ANALOG_GAIN(n)                            ((n) << 0)
+#define MT9M114_GAIN_ANALOG_GAIN_MASK                          (0xff << 0)
+#define MT9M114_CUSTOMER_REV                           CCI_REG16(0x31fe)
+
+/* Monitor registers */
+#define MT9M114_MON_MAJOR_VERSION                      CCI_REG16(0x8000)
+#define MT9M114_MON_MINOR_VERSION                      CCI_REG16(0x8002)
+#define MT9M114_MON_RELEASE_VERSION                    CCI_REG16(0x8004)
+
+/* Auto-Exposure Track registers */
+#define MT9M114_AE_TRACK_ALGO                          CCI_REG16(0xa804)
+#define MT9M114_AE_TRACK_EXEC_AUTOMATIC_EXPOSURE               BIT(0)
+#define MT9M114_AE_TRACK_AE_TRACKING_DAMPENING_SPEED   CCI_REG8(0xa80a)
+
+/* Color Correction Matrix registers */
+#define MT9M114_CCM_ALGO                               CCI_REG16(0xb404)
+#define MT9M114_CCM_EXEC_CALC_CCM_MATRIX                       BIT(4)
+#define MT9M114_CCM_DELTA_GAIN                         CCI_REG8(0xb42a)
+
+/* Camera Control registers */
+#define MT9M114_CAM_SENSOR_CFG_Y_ADDR_START            CCI_REG16(0xc800)
+#define MT9M114_CAM_SENSOR_CFG_X_ADDR_START            CCI_REG16(0xc802)
+#define MT9M114_CAM_SENSOR_CFG_Y_ADDR_END              CCI_REG16(0xc804)
+#define MT9M114_CAM_SENSOR_CFG_X_ADDR_END              CCI_REG16(0xc806)
+#define MT9M114_CAM_SENSOR_CFG_PIXCLK                  CCI_REG32(0xc808)
+#define MT9M114_CAM_SENSOR_CFG_ROW_SPEED               CCI_REG16(0xc80c)
+#define MT9M114_CAM_SENSOR_CFG_FINE_INTEG_TIME_MIN     CCI_REG16(0xc80e)
+#define MT9M114_CAM_SENSOR_CFG_FINE_INTEG_TIME_MAX     CCI_REG16(0xc810)
+#define MT9M114_CAM_SENSOR_CFG_FRAME_LENGTH_LINES      CCI_REG16(0xc812)
+#define MT9M114_CAM_SENSOR_CFG_FRAME_LENGTH_LINES_MAX          65535
+#define MT9M114_CAM_SENSOR_CFG_LINE_LENGTH_PCK         CCI_REG16(0xc814)
+#define MT9M114_CAM_SENSOR_CFG_LINE_LENGTH_PCK_MAX             8191
+#define MT9M114_CAM_SENSOR_CFG_FINE_CORRECTION         CCI_REG16(0xc816)
+#define MT9M114_CAM_SENSOR_CFG_CPIPE_LAST_ROW          CCI_REG16(0xc818)
+#define MT9M114_CAM_SENSOR_CFG_REG_0_DATA              CCI_REG16(0xc826)
+#define MT9M114_CAM_SENSOR_CONTROL_READ_MODE           CCI_REG16(0xc834)
+#define MT9M114_CAM_SENSOR_CONTROL_HORZ_MIRROR_EN              BIT(0)
+#define MT9M114_CAM_SENSOR_CONTROL_VERT_FLIP_EN                        BIT(1)
+#define MT9M114_CAM_SENSOR_CONTROL_X_READ_OUT_NORMAL           (0 << 4)
+#define MT9M114_CAM_SENSOR_CONTROL_X_READ_OUT_SKIPPING         (1 << 4)
+#define MT9M114_CAM_SENSOR_CONTROL_X_READ_OUT_AVERAGE          (2 << 4)
+#define MT9M114_CAM_SENSOR_CONTROL_X_READ_OUT_SUMMING          (3 << 4)
+#define MT9M114_CAM_SENSOR_CONTROL_X_READ_OUT_MASK             (3 << 4)
+#define MT9M114_CAM_SENSOR_CONTROL_Y_READ_OUT_NORMAL           (0 << 8)
+#define MT9M114_CAM_SENSOR_CONTROL_Y_READ_OUT_SKIPPING         (1 << 8)
+#define MT9M114_CAM_SENSOR_CONTROL_Y_READ_OUT_SUMMING          (3 << 8)
+#define MT9M114_CAM_SENSOR_CONTROL_Y_READ_OUT_MASK             (3 << 8)
+#define MT9M114_CAM_SENSOR_CONTROL_ANALOG_GAIN         CCI_REG16(0xc836)
+#define MT9M114_CAM_SENSOR_CONTROL_COARSE_INTEGRATION_TIME     CCI_REG16(0xc83c)
+#define MT9M114_CAM_SENSOR_CONTROL_FINE_INTEGRATION_TIME       CCI_REG16(0xc83e)
+#define MT9M114_CAM_MODE_SELECT                                CCI_REG8(0xc84c)
+#define MT9M114_CAM_MODE_SELECT_NORMAL                         (0 << 0)
+#define MT9M114_CAM_MODE_SELECT_LENS_CALIBRATION               (1 << 0)
+#define MT9M114_CAM_MODE_SELECT_TEST_PATTERN                   (2 << 0)
+#define MT9M114_CAM_MODE_TEST_PATTERN_SELECT           CCI_REG8(0xc84d)
+#define MT9M114_CAM_MODE_TEST_PATTERN_SELECT_SOLID             (1 << 0)
+#define MT9M114_CAM_MODE_TEST_PATTERN_SELECT_SOLID_BARS                (4 << 0)
+#define MT9M114_CAM_MODE_TEST_PATTERN_SELECT_RANDOM            (5 << 0)
+#define MT9M114_CAM_MODE_TEST_PATTERN_SELECT_FADING_BARS       (8 << 0)
+#define MT9M114_CAM_MODE_TEST_PATTERN_SELECT_WALKING_1S_10B    (10 << 0)
+#define MT9M114_CAM_MODE_TEST_PATTERN_SELECT_WALKING_1S_8B     (11 << 0)
+#define MT9M114_CAM_MODE_TEST_PATTERN_RED              CCI_REG16(0xc84e)
+#define MT9M114_CAM_MODE_TEST_PATTERN_GREEN            CCI_REG16(0xc850)
+#define MT9M114_CAM_MODE_TEST_PATTERN_BLUE             CCI_REG16(0xc852)
+#define MT9M114_CAM_CROP_WINDOW_XOFFSET                        CCI_REG16(0xc854)
+#define MT9M114_CAM_CROP_WINDOW_YOFFSET                        CCI_REG16(0xc856)
+#define MT9M114_CAM_CROP_WINDOW_WIDTH                  CCI_REG16(0xc858)
+#define MT9M114_CAM_CROP_WINDOW_HEIGHT                 CCI_REG16(0xc85a)
+#define MT9M114_CAM_CROP_CROPMODE                      CCI_REG8(0xc85c)
+#define MT9M114_CAM_CROP_MODE_AE_AUTO_CROP_EN                  BIT(0)
+#define MT9M114_CAM_CROP_MODE_AWB_AUTO_CROP_EN                 BIT(1)
+#define MT9M114_CAM_OUTPUT_WIDTH                       CCI_REG16(0xc868)
+#define MT9M114_CAM_OUTPUT_HEIGHT                      CCI_REG16(0xc86a)
+#define MT9M114_CAM_OUTPUT_FORMAT                      CCI_REG16(0xc86c)
+#define MT9M114_CAM_OUTPUT_FORMAT_SWAP_RED_BLUE                        BIT(0)
+#define MT9M114_CAM_OUTPUT_FORMAT_SWAP_BYTES                   BIT(1)
+#define MT9M114_CAM_OUTPUT_FORMAT_MONO_ENABLE                  BIT(2)
+#define MT9M114_CAM_OUTPUT_FORMAT_BT656_ENABLE                 BIT(3)
+#define MT9M114_CAM_OUTPUT_FORMAT_BT656_CROP_SCALE_DISABLE     BIT(4)
+#define MT9M114_CAM_OUTPUT_FORMAT_FVLV_DISABLE                 BIT(5)
+#define MT9M114_CAM_OUTPUT_FORMAT_FORMAT_YUV                   (0 << 8)
+#define MT9M114_CAM_OUTPUT_FORMAT_FORMAT_RGB                   (1 << 8)
+#define MT9M114_CAM_OUTPUT_FORMAT_FORMAT_BAYER                 (2 << 8)
+#define MT9M114_CAM_OUTPUT_FORMAT_FORMAT_NONE                  (3 << 8)
+#define MT9M114_CAM_OUTPUT_FORMAT_FORMAT_MASK                  (3 << 8)
+#define MT9M114_CAM_OUTPUT_FORMAT_BAYER_FORMAT_RAWR10          (0 << 10)
+#define MT9M114_CAM_OUTPUT_FORMAT_BAYER_FORMAT_PRELSC_8_2      (1 << 10)
+#define MT9M114_CAM_OUTPUT_FORMAT_BAYER_FORMAT_POSTLSC_8_2     (2 << 10)
+#define MT9M114_CAM_OUTPUT_FORMAT_BAYER_FORMAT_PROCESSED8      (3 << 10)
+#define MT9M114_CAM_OUTPUT_FORMAT_BAYER_FORMAT_MASK            (3 << 10)
+#define MT9M114_CAM_OUTPUT_FORMAT_RGB_FORMAT_565RGB            (0 << 12)
+#define MT9M114_CAM_OUTPUT_FORMAT_RGB_FORMAT_555RGB            (1 << 12)
+#define MT9M114_CAM_OUTPUT_FORMAT_RGB_FORMAT_444xRGB           (2 << 12)
+#define MT9M114_CAM_OUTPUT_FORMAT_RGB_FORMAT_444RGBx           (3 << 12)
+#define MT9M114_CAM_OUTPUT_FORMAT_RGB_FORMAT_MASK              (3 << 12)
+#define MT9M114_CAM_OUTPUT_FORMAT_YUV                  CCI_REG16(0xc86e)
+#define MT9M114_CAM_OUTPUT_FORMAT_YUV_CLIP                     BIT(5)
+#define MT9M114_CAM_OUTPUT_FORMAT_YUV_AUV_OFFSET               BIT(4)
+#define MT9M114_CAM_OUTPUT_FORMAT_YUV_SELECT_601               BIT(3)
+#define MT9M114_CAM_OUTPUT_FORMAT_YUV_NORMALISE                        BIT(2)
+#define MT9M114_CAM_OUTPUT_FORMAT_YUV_SAMPLING_EVEN_UV         (0 << 0)
+#define MT9M114_CAM_OUTPUT_FORMAT_YUV_SAMPLING_ODD_UV          (1 << 0)
+#define MT9M114_CAM_OUTPUT_FORMAT_YUV_SAMPLING_EVENU_ODDV      (2 << 0)
+#define MT9M114_CAM_OUTPUT_Y_OFFSET                    CCI_REG8(0xc870)
+#define MT9M114_CAM_AET_AEMODE                         CCI_REG8(0xc878)
+#define MT9M114_CAM_AET_EXEC_SET_INDOOR                                BIT(0)
+#define MT9M114_CAM_AET_DISCRETE_FRAMERATE                     BIT(1)
+#define MT9M114_CAM_AET_ADAPTATIVE_TARGET_LUMA                 BIT(2)
+#define MT9M114_CAM_AET_ADAPTATIVE_SKIP_FRAMES                 BIT(3)
+#define MT9M114_CAM_AET_SKIP_FRAMES                    CCI_REG8(0xc879)
+#define MT9M114_CAM_AET_TARGET_AVERAGE_LUMA            CCI_REG8(0xc87a)
+#define MT9M114_CAM_AET_TARGET_AVERAGE_LUMA_DARK       CCI_REG8(0xc87b)
+#define MT9M114_CAM_AET_BLACK_CLIPPING_TARGET          CCI_REG16(0xc87c)
+#define MT9M114_CAM_AET_AE_MIN_VIRT_INT_TIME_PCLK      CCI_REG16(0xc87e)
+#define MT9M114_CAM_AET_AE_MIN_VIRT_DGAIN              CCI_REG16(0xc880)
+#define MT9M114_CAM_AET_AE_MAX_VIRT_DGAIN              CCI_REG16(0xc882)
+#define MT9M114_CAM_AET_AE_MIN_VIRT_AGAIN              CCI_REG16(0xc884)
+#define MT9M114_CAM_AET_AE_MAX_VIRT_AGAIN              CCI_REG16(0xc886)
+#define MT9M114_CAM_AET_AE_VIRT_GAIN_TH_EG             CCI_REG16(0xc888)
+#define MT9M114_CAM_AET_AE_EG_GATE_PERCENTAGE          CCI_REG8(0xc88a)
+#define MT9M114_CAM_AET_FLICKER_FREQ_HZ                        CCI_REG8(0xc88b)
+#define MT9M114_CAM_AET_MAX_FRAME_RATE                 CCI_REG16(0xc88c)
+#define MT9M114_CAM_AET_MIN_FRAME_RATE                 CCI_REG16(0xc88e)
+#define MT9M114_CAM_AET_TARGET_GAIN                    CCI_REG16(0xc890)
+#define MT9M114_CAM_AWB_CCM_L(n)                       CCI_REG16(0xc892 + (n) * 2)
+#define MT9M114_CAM_AWB_CCM_M(n)                       CCI_REG16(0xc8a4 + (n) * 2)
+#define MT9M114_CAM_AWB_CCM_R(n)                       CCI_REG16(0xc8b6 + (n) * 2)
+#define MT9M114_CAM_AWB_CCM_L_RG_GAIN                  CCI_REG16(0xc8c8)
+#define MT9M114_CAM_AWB_CCM_L_BG_GAIN                  CCI_REG16(0xc8ca)
+#define MT9M114_CAM_AWB_CCM_M_RG_GAIN                  CCI_REG16(0xc8cc)
+#define MT9M114_CAM_AWB_CCM_M_BG_GAIN                  CCI_REG16(0xc8ce)
+#define MT9M114_CAM_AWB_CCM_R_RG_GAIN                  CCI_REG16(0xc8d0)
+#define MT9M114_CAM_AWB_CCM_R_BG_GAIN                  CCI_REG16(0xc8d2)
+#define MT9M114_CAM_AWB_CCM_L_CTEMP                    CCI_REG16(0xc8d4)
+#define MT9M114_CAM_AWB_CCM_M_CTEMP                    CCI_REG16(0xc8d6)
+#define MT9M114_CAM_AWB_CCM_R_CTEMP                    CCI_REG16(0xc8d8)
+#define MT9M114_CAM_AWB_AWB_XSCALE                     CCI_REG8(0xc8f2)
+#define MT9M114_CAM_AWB_AWB_YSCALE                     CCI_REG8(0xc8f3)
+#define MT9M114_CAM_AWB_AWB_WEIGHTS(n)                 CCI_REG16(0xc8f4 + (n) * 2)
+#define MT9M114_CAM_AWB_AWB_XSHIFT_PRE_ADJ             CCI_REG16(0xc904)
+#define MT9M114_CAM_AWB_AWB_YSHIFT_PRE_ADJ             CCI_REG16(0xc906)
+#define MT9M114_CAM_AWB_AWBMODE                                CCI_REG8(0xc909)
+#define MT9M114_CAM_AWB_MODE_AUTO                              BIT(1)
+#define MT9M114_CAM_AWB_MODE_EXCLUSIVE_AE                      BIT(0)
+#define MT9M114_CAM_AWB_K_R_L                          CCI_REG8(0xc90c)
+#define MT9M114_CAM_AWB_K_G_L                          CCI_REG8(0xc90d)
+#define MT9M114_CAM_AWB_K_B_L                          CCI_REG8(0xc90e)
+#define MT9M114_CAM_AWB_K_R_R                          CCI_REG8(0xc90f)
+#define MT9M114_CAM_AWB_K_G_R                          CCI_REG8(0xc910)
+#define MT9M114_CAM_AWB_K_B_R                          CCI_REG8(0xc911)
+#define MT9M114_CAM_STAT_AWB_CLIP_WINDOW_XSTART                CCI_REG16(0xc914)
+#define MT9M114_CAM_STAT_AWB_CLIP_WINDOW_YSTART                CCI_REG16(0xc916)
+#define MT9M114_CAM_STAT_AWB_CLIP_WINDOW_XEND          CCI_REG16(0xc918)
+#define MT9M114_CAM_STAT_AWB_CLIP_WINDOW_YEND          CCI_REG16(0xc91a)
+#define MT9M114_CAM_STAT_AE_INITIAL_WINDOW_XSTART      CCI_REG16(0xc91c)
+#define MT9M114_CAM_STAT_AE_INITIAL_WINDOW_YSTART      CCI_REG16(0xc91e)
+#define MT9M114_CAM_STAT_AE_INITIAL_WINDOW_XEND                CCI_REG16(0xc920)
+#define MT9M114_CAM_STAT_AE_INITIAL_WINDOW_YEND                CCI_REG16(0xc922)
+#define MT9M114_CAM_LL_LLMODE                          CCI_REG16(0xc924)
+#define MT9M114_CAM_LL_START_BRIGHTNESS                        CCI_REG16(0xc926)
+#define MT9M114_CAM_LL_STOP_BRIGHTNESS                 CCI_REG16(0xc928)
+#define MT9M114_CAM_LL_START_SATURATION                        CCI_REG8(0xc92a)
+#define MT9M114_CAM_LL_END_SATURATION                  CCI_REG8(0xc92b)
+#define MT9M114_CAM_LL_START_DESATURATION              CCI_REG8(0xc92c)
+#define MT9M114_CAM_LL_END_DESATURATION                        CCI_REG8(0xc92d)
+#define MT9M114_CAM_LL_START_DEMOSAICING               CCI_REG8(0xc92e)
+#define MT9M114_CAM_LL_START_AP_GAIN                   CCI_REG8(0xc92f)
+#define MT9M114_CAM_LL_START_AP_THRESH                 CCI_REG8(0xc930)
+#define MT9M114_CAM_LL_STOP_DEMOSAICING                        CCI_REG8(0xc931)
+#define MT9M114_CAM_LL_STOP_AP_GAIN                    CCI_REG8(0xc932)
+#define MT9M114_CAM_LL_STOP_AP_THRESH                  CCI_REG8(0xc933)
+#define MT9M114_CAM_LL_START_NR_RED                    CCI_REG8(0xc934)
+#define MT9M114_CAM_LL_START_NR_GREEN                  CCI_REG8(0xc935)
+#define MT9M114_CAM_LL_START_NR_BLUE                   CCI_REG8(0xc936)
+#define MT9M114_CAM_LL_START_NR_THRESH                 CCI_REG8(0xc937)
+#define MT9M114_CAM_LL_STOP_NR_RED                     CCI_REG8(0xc938)
+#define MT9M114_CAM_LL_STOP_NR_GREEN                   CCI_REG8(0xc939)
+#define MT9M114_CAM_LL_STOP_NR_BLUE                    CCI_REG8(0xc93a)
+#define MT9M114_CAM_LL_STOP_NR_THRESH                  CCI_REG8(0xc93b)
+#define MT9M114_CAM_LL_START_CONTRAST_BM               CCI_REG16(0xc93c)
+#define MT9M114_CAM_LL_STOP_CONTRAST_BM                        CCI_REG16(0xc93e)
+#define MT9M114_CAM_LL_GAMMA                           CCI_REG16(0xc940)
+#define MT9M114_CAM_LL_START_CONTRAST_GRADIENT         CCI_REG8(0xc942)
+#define MT9M114_CAM_LL_STOP_CONTRAST_GRADIENT          CCI_REG8(0xc943)
+#define MT9M114_CAM_LL_START_CONTRAST_LUMA_PERCENTAGE  CCI_REG8(0xc944)
+#define MT9M114_CAM_LL_STOP_CONTRAST_LUMA_PERCENTAGE   CCI_REG8(0xc945)
+#define MT9M114_CAM_LL_START_GAIN_METRIC               CCI_REG16(0xc946)
+#define MT9M114_CAM_LL_STOP_GAIN_METRIC                        CCI_REG16(0xc948)
+#define MT9M114_CAM_LL_START_FADE_TO_BLACK_LUMA                CCI_REG16(0xc94a)
+#define MT9M114_CAM_LL_STOP_FADE_TO_BLACK_LUMA         CCI_REG16(0xc94c)
+#define MT9M114_CAM_LL_CLUSTER_DC_TH_BM                        CCI_REG16(0xc94e)
+#define MT9M114_CAM_LL_CLUSTER_DC_GATE_PERCENTAGE      CCI_REG8(0xc950)
+#define MT9M114_CAM_LL_SUMMING_SENSITIVITY_FACTOR      CCI_REG8(0xc951)
+#define MT9M114_CAM_LL_START_TARGET_LUMA_BM            CCI_REG16(0xc952)
+#define MT9M114_CAM_LL_STOP_TARGET_LUMA_BM             CCI_REG16(0xc954)
+#define MT9M114_CAM_PGA_PGA_CONTROL                    CCI_REG16(0xc95e)
+#define MT9M114_CAM_SYSCTL_PLL_ENABLE                  CCI_REG8(0xc97e)
+#define MT9M114_CAM_SYSCTL_PLL_ENABLE_VALUE                    BIT(0)
+#define MT9M114_CAM_SYSCTL_PLL_DIVIDER_M_N             CCI_REG16(0xc980)
+#define MT9M114_CAM_SYSCTL_PLL_DIVIDER_VALUE(m, n)             (((n) << 8) | (m))
+#define MT9M114_CAM_SYSCTL_PLL_DIVIDER_P               CCI_REG16(0xc982)
+#define MT9M114_CAM_SYSCTL_PLL_DIVIDER_P_VALUE(p)              ((p) << 8)
+#define MT9M114_CAM_PORT_OUTPUT_CONTROL                        CCI_REG16(0xc984)
+#define MT9M114_CAM_PORT_PORT_SELECT_PARALLEL                  (0 << 0)
+#define MT9M114_CAM_PORT_PORT_SELECT_MIPI                      (1 << 0)
+#define MT9M114_CAM_PORT_CLOCK_SLOWDOWN                                BIT(3)
+#define MT9M114_CAM_PORT_TRUNCATE_RAW_BAYER                    BIT(4)
+#define MT9M114_CAM_PORT_PIXCLK_GATE                           BIT(5)
+#define MT9M114_CAM_PORT_CONT_MIPI_CLK                         BIT(6)
+#define MT9M114_CAM_PORT_CHAN_NUM(vc)                          ((vc) << 8)
+#define MT9M114_CAM_PORT_MIPI_TIMING_T_HS_ZERO         CCI_REG16(0xc988)
+#define MT9M114_CAM_PORT_MIPI_TIMING_T_HS_ZERO_VALUE(n)                ((n) << 8)
+#define MT9M114_CAM_PORT_MIPI_TIMING_T_HS_EXIT_TRAIL   CCI_REG16(0xc98a)
+#define MT9M114_CAM_PORT_MIPI_TIMING_T_HS_EXIT_VALUE(n)                ((n) << 8)
+#define MT9M114_CAM_PORT_MIPI_TIMING_T_HS_TRAIL_VALUE(n)       ((n) << 0)
+#define MT9M114_CAM_PORT_MIPI_TIMING_T_CLK_POST_PRE    CCI_REG16(0xc98c)
+#define MT9M114_CAM_PORT_MIPI_TIMING_T_CLK_POST_VALUE(n)       ((n) << 8)
+#define MT9M114_CAM_PORT_MIPI_TIMING_T_CLK_PRE_VALUE(n)                ((n) << 0)
+#define MT9M114_CAM_PORT_MIPI_TIMING_T_CLK_TRAIL_ZERO  CCI_REG16(0xc98e)
+#define MT9M114_CAM_PORT_MIPI_TIMING_T_CLK_TRAIL_VALUE(n)      ((n) << 8)
+#define MT9M114_CAM_PORT_MIPI_TIMING_T_CLK_ZERO_VALUE(n)       ((n) << 0)
+
+/* System Manager registers */
+#define MT9M114_SYSMGR_NEXT_STATE                      CCI_REG8(0xdc00)
+#define MT9M114_SYSMGR_CURRENT_STATE                   CCI_REG8(0xdc01)
+#define MT9M114_SYSMGR_CMD_STATUS                      CCI_REG8(0xdc02)
+
+/* Patch Loader registers */
+#define MT9M114_PATCHLDR_LOADER_ADDRESS                        CCI_REG16(0xe000)
+#define MT9M114_PATCHLDR_PATCH_ID                      CCI_REG16(0xe002)
+#define MT9M114_PATCHLDR_FIRMWARE_ID                   CCI_REG32(0xe004)
+#define MT9M114_PATCHLDR_APPLY_STATUS                  CCI_REG8(0xe008)
+#define MT9M114_PATCHLDR_NUM_PATCHES                   CCI_REG8(0xe009)
+#define MT9M114_PATCHLDR_PATCH_ID_0                    CCI_REG16(0xe00a)
+#define MT9M114_PATCHLDR_PATCH_ID_1                    CCI_REG16(0xe00c)
+#define MT9M114_PATCHLDR_PATCH_ID_2                    CCI_REG16(0xe00e)
+#define MT9M114_PATCHLDR_PATCH_ID_3                    CCI_REG16(0xe010)
+#define MT9M114_PATCHLDR_PATCH_ID_4                    CCI_REG16(0xe012)
+#define MT9M114_PATCHLDR_PATCH_ID_5                    CCI_REG16(0xe014)
+#define MT9M114_PATCHLDR_PATCH_ID_6                    CCI_REG16(0xe016)
+#define MT9M114_PATCHLDR_PATCH_ID_7                    CCI_REG16(0xe018)
+
+/* SYS_STATE values (for SYSMGR_NEXT_STATE and SYSMGR_CURRENT_STATE) */
+#define MT9M114_SYS_STATE_ENTER_CONFIG_CHANGE          0x28
+#define MT9M114_SYS_STATE_STREAMING                    0x31
+#define MT9M114_SYS_STATE_START_STREAMING              0x34
+#define MT9M114_SYS_STATE_ENTER_SUSPEND                        0x40
+#define MT9M114_SYS_STATE_SUSPENDED                    0x41
+#define MT9M114_SYS_STATE_ENTER_STANDBY                        0x50
+#define MT9M114_SYS_STATE_STANDBY                      0x52
+#define MT9M114_SYS_STATE_LEAVE_STANDBY                        0x54
+
+/* Result status of last SET_STATE comamnd */
+#define MT9M114_SET_STATE_RESULT_ENOERR                        0x00
+#define MT9M114_SET_STATE_RESULT_EINVAL                        0x0c
+#define MT9M114_SET_STATE_RESULT_ENOSPC                        0x0d
+
+/*
+ * The minimum amount of horizontal and vertical blanking is undocumented. The
+ * minimum values that have been seen in register lists are 303 and 38, use
+ * them.
+ *
+ * Set the default to achieve 1280x960 at 30fps.
+ */
+#define MT9M114_MIN_HBLANK                             303
+#define MT9M114_MIN_VBLANK                             38
+#define MT9M114_DEF_HBLANK                             323
+#define MT9M114_DEF_VBLANK                             39
+
+#define MT9M114_DEF_FRAME_RATE                         30
+#define MT9M114_MAX_FRAME_RATE                         120
+
+#define MT9M114_PIXEL_ARRAY_WIDTH                      1296U
+#define MT9M114_PIXEL_ARRAY_HEIGHT                     976U
+
+/*
+ * These values are not well documented and are semi-arbitrary. The pixel array
+ * minimum output size is 8 pixels larger than the minimum scaler cropped input
+ * width to account for the demosaicing.
+ */
+#define MT9M114_PIXEL_ARRAY_MIN_OUTPUT_WIDTH           (32U + 8U)
+#define MT9M114_PIXEL_ARRAY_MIN_OUTPUT_HEIGHT          (32U + 8U)
+#define MT9M114_SCALER_CROPPED_INPUT_WIDTH             32U
+#define MT9M114_SCALER_CROPPED_INPUT_HEIGHT            32U
+
+/* Indices into the mt9m114.ifp.tpg array. */
+#define MT9M114_TPG_PATTERN                            0
+#define MT9M114_TPG_RED                                        1
+#define MT9M114_TPG_GREEN                              2
+#define MT9M114_TPG_BLUE                               3
+
+/* -----------------------------------------------------------------------------
+ * Data Structures
+ */
+
+enum mt9m114_format_flag {
+       MT9M114_FMT_FLAG_PARALLEL = BIT(0),
+       MT9M114_FMT_FLAG_CSI2 = BIT(1),
+};
+
+struct mt9m114_format_info {
+       u32 code;
+       u32 output_format;
+       u32 flags;
+};
+
+struct mt9m114 {
+       struct i2c_client *client;
+       struct regmap *regmap;
+
+       struct clk *clk;
+       struct gpio_desc *reset;
+       struct regulator_bulk_data supplies[3];
+       struct v4l2_fwnode_endpoint bus_cfg;
+
+       struct {
+               unsigned int m;
+               unsigned int n;
+               unsigned int p;
+       } pll;
+
+       unsigned int pixrate;
+       bool streaming;
+
+       /* Pixel Array */
+       struct {
+               struct v4l2_subdev sd;
+               struct media_pad pad;
+
+               struct v4l2_ctrl_handler hdl;
+               struct v4l2_ctrl *exposure;
+               struct v4l2_ctrl *gain;
+               struct v4l2_ctrl *hblank;
+               struct v4l2_ctrl *vblank;
+       } pa;
+
+       /* Image Flow Processor */
+       struct {
+               struct v4l2_subdev sd;
+               struct media_pad pads[2];
+
+               struct v4l2_ctrl_handler hdl;
+               unsigned int frame_rate;
+
+               struct v4l2_ctrl *tpg[4];
+       } ifp;
+};
+
+/* -----------------------------------------------------------------------------
+ * Formats
+ */
+
+static const struct mt9m114_format_info mt9m114_format_infos[] = {
+       {
+               /*
+                * The first two entries are used as defaults, for parallel and
+                * CSI-2 buses respectively. Keep them in that order.
+                */
+               .code = MEDIA_BUS_FMT_UYVY8_2X8,
+               .flags = MT9M114_FMT_FLAG_PARALLEL,
+               .output_format = MT9M114_CAM_OUTPUT_FORMAT_FORMAT_YUV,
+       }, {
+               .code = MEDIA_BUS_FMT_UYVY8_1X16,
+               .flags = MT9M114_FMT_FLAG_CSI2,
+               .output_format = MT9M114_CAM_OUTPUT_FORMAT_FORMAT_YUV,
+       }, {
+               .code = MEDIA_BUS_FMT_YUYV8_2X8,
+               .flags = MT9M114_FMT_FLAG_PARALLEL,
+               .output_format = MT9M114_CAM_OUTPUT_FORMAT_FORMAT_YUV
+                              | MT9M114_CAM_OUTPUT_FORMAT_SWAP_BYTES,
+       }, {
+               .code = MEDIA_BUS_FMT_YUYV8_1X16,
+               .flags = MT9M114_FMT_FLAG_CSI2,
+               .output_format = MT9M114_CAM_OUTPUT_FORMAT_FORMAT_YUV
+                              | MT9M114_CAM_OUTPUT_FORMAT_SWAP_BYTES,
+       }, {
+               .code = MEDIA_BUS_FMT_RGB565_2X8_LE,
+               .flags = MT9M114_FMT_FLAG_PARALLEL,
+               .output_format = MT9M114_CAM_OUTPUT_FORMAT_RGB_FORMAT_565RGB
+                              | MT9M114_CAM_OUTPUT_FORMAT_FORMAT_RGB
+                              | MT9M114_CAM_OUTPUT_FORMAT_SWAP_BYTES,
+       }, {
+               .code = MEDIA_BUS_FMT_RGB565_2X8_BE,
+               .flags = MT9M114_FMT_FLAG_PARALLEL,
+               .output_format = MT9M114_CAM_OUTPUT_FORMAT_RGB_FORMAT_565RGB
+                              | MT9M114_CAM_OUTPUT_FORMAT_FORMAT_RGB,
+       }, {
+               .code = MEDIA_BUS_FMT_RGB565_1X16,
+               .flags = MT9M114_FMT_FLAG_CSI2,
+               .output_format = MT9M114_CAM_OUTPUT_FORMAT_RGB_FORMAT_565RGB
+                              | MT9M114_CAM_OUTPUT_FORMAT_FORMAT_RGB,
+       }, {
+               .code = MEDIA_BUS_FMT_SGRBG8_1X8,
+               .output_format = MT9M114_CAM_OUTPUT_FORMAT_BAYER_FORMAT_PROCESSED8
+                              | MT9M114_CAM_OUTPUT_FORMAT_FORMAT_BAYER,
+               .flags = MT9M114_FMT_FLAG_PARALLEL | MT9M114_FMT_FLAG_CSI2,
+       }, {
+               /* Keep the format compatible with the IFP sink pad last. */
+               .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+               .output_format = MT9M114_CAM_OUTPUT_FORMAT_BAYER_FORMAT_RAWR10
+                       | MT9M114_CAM_OUTPUT_FORMAT_FORMAT_BAYER,
+               .flags = MT9M114_FMT_FLAG_PARALLEL | MT9M114_FMT_FLAG_CSI2,
+       }
+};
+
+static const struct mt9m114_format_info *
+mt9m114_default_format_info(struct mt9m114 *sensor)
+{
+       if (sensor->bus_cfg.bus_type == V4L2_MBUS_CSI2_DPHY)
+               return &mt9m114_format_infos[1];
+       else
+               return &mt9m114_format_infos[0];
+}
+
+static const struct mt9m114_format_info *
+mt9m114_format_info(struct mt9m114 *sensor, unsigned int pad, u32 code)
+{
+       const unsigned int num_formats = ARRAY_SIZE(mt9m114_format_infos);
+       unsigned int flag;
+       unsigned int i;
+
+       switch (pad) {
+       case 0:
+               return &mt9m114_format_infos[num_formats - 1];
+
+       case 1:
+               if (sensor->bus_cfg.bus_type == V4L2_MBUS_CSI2_DPHY)
+                       flag = MT9M114_FMT_FLAG_CSI2;
+               else
+                       flag = MT9M114_FMT_FLAG_PARALLEL;
+
+               for (i = 0; i < num_formats; ++i) {
+                       const struct mt9m114_format_info *info =
+                               &mt9m114_format_infos[i];
+
+                       if (info->code == code && info->flags & flag)
+                               return info;
+               }
+
+               return mt9m114_default_format_info(sensor);
+
+       default:
+               return NULL;
+       }
+}
+
+/* -----------------------------------------------------------------------------
+ * Initialization
+ */
+
+static const struct cci_reg_sequence mt9m114_init[] = {
+       { MT9M114_RESET_REGISTER, MT9M114_RESET_REGISTER_MASK_BAD |
+                                 MT9M114_RESET_REGISTER_LOCK_REG |
+                                 0x0010 },
+
+       /* Sensor optimization */
+       { CCI_REG16(0x316a), 0x8270 },
+       { CCI_REG16(0x316c), 0x8270 },
+       { CCI_REG16(0x3ed0), 0x2305 },
+       { CCI_REG16(0x3ed2), 0x77cf },
+       { CCI_REG16(0x316e), 0x8202 },
+       { CCI_REG16(0x3180), 0x87ff },
+       { CCI_REG16(0x30d4), 0x6080 },
+       { CCI_REG16(0xa802), 0x0008 },
+
+       { CCI_REG16(0x3e14), 0xff39 },
+
+       /* APGA */
+       { MT9M114_CAM_PGA_PGA_CONTROL,                  0x0000 },
+
+       /* Automatic White balance */
+       { MT9M114_CAM_AWB_CCM_L(0),                     0x0267 },
+       { MT9M114_CAM_AWB_CCM_L(1),                     0xff1a },
+       { MT9M114_CAM_AWB_CCM_L(2),                     0xffb3 },
+       { MT9M114_CAM_AWB_CCM_L(3),                     0xff80 },
+       { MT9M114_CAM_AWB_CCM_L(4),                     0x0166 },
+       { MT9M114_CAM_AWB_CCM_L(5),                     0x0003 },
+       { MT9M114_CAM_AWB_CCM_L(6),                     0xff9a },
+       { MT9M114_CAM_AWB_CCM_L(7),                     0xfeb4 },
+       { MT9M114_CAM_AWB_CCM_L(8),                     0x024d },
+       { MT9M114_CAM_AWB_CCM_M(0),                     0x01bf },
+       { MT9M114_CAM_AWB_CCM_M(1),                     0xff01 },
+       { MT9M114_CAM_AWB_CCM_M(2),                     0xfff3 },
+       { MT9M114_CAM_AWB_CCM_M(3),                     0xff75 },
+       { MT9M114_CAM_AWB_CCM_M(4),                     0x0198 },
+       { MT9M114_CAM_AWB_CCM_M(5),                     0xfffd },
+       { MT9M114_CAM_AWB_CCM_M(6),                     0xff9a },
+       { MT9M114_CAM_AWB_CCM_M(7),                     0xfee7 },
+       { MT9M114_CAM_AWB_CCM_M(8),                     0x02a8 },
+       { MT9M114_CAM_AWB_CCM_R(0),                     0x01d9 },
+       { MT9M114_CAM_AWB_CCM_R(1),                     0xff26 },
+       { MT9M114_CAM_AWB_CCM_R(2),                     0xfff3 },
+       { MT9M114_CAM_AWB_CCM_R(3),                     0xffb3 },
+       { MT9M114_CAM_AWB_CCM_R(4),                     0x0132 },
+       { MT9M114_CAM_AWB_CCM_R(5),                     0xffe8 },
+       { MT9M114_CAM_AWB_CCM_R(6),                     0xffda },
+       { MT9M114_CAM_AWB_CCM_R(7),                     0xfecd },
+       { MT9M114_CAM_AWB_CCM_R(8),                     0x02c2 },
+       { MT9M114_CAM_AWB_CCM_L_RG_GAIN,                0x0075 },
+       { MT9M114_CAM_AWB_CCM_L_BG_GAIN,                0x011c },
+       { MT9M114_CAM_AWB_CCM_M_RG_GAIN,                0x009a },
+       { MT9M114_CAM_AWB_CCM_M_BG_GAIN,                0x0105 },
+       { MT9M114_CAM_AWB_CCM_R_RG_GAIN,                0x00a4 },
+       { MT9M114_CAM_AWB_CCM_R_BG_GAIN,                0x00ac },
+       { MT9M114_CAM_AWB_CCM_L_CTEMP,                  0x0a8c },
+       { MT9M114_CAM_AWB_CCM_M_CTEMP,                  0x0f0a },
+       { MT9M114_CAM_AWB_CCM_R_CTEMP,                  0x1964 },
+       { MT9M114_CAM_AWB_AWB_XSHIFT_PRE_ADJ,           51 },
+       { MT9M114_CAM_AWB_AWB_YSHIFT_PRE_ADJ,           60 },
+       { MT9M114_CAM_AWB_AWB_XSCALE,                   3 },
+       { MT9M114_CAM_AWB_AWB_YSCALE,                   2 },
+       { MT9M114_CAM_AWB_AWB_WEIGHTS(0),               0x0000 },
+       { MT9M114_CAM_AWB_AWB_WEIGHTS(1),               0x0000 },
+       { MT9M114_CAM_AWB_AWB_WEIGHTS(2),               0x0000 },
+       { MT9M114_CAM_AWB_AWB_WEIGHTS(3),               0xe724 },
+       { MT9M114_CAM_AWB_AWB_WEIGHTS(4),               0x1583 },
+       { MT9M114_CAM_AWB_AWB_WEIGHTS(5),               0x2045 },
+       { MT9M114_CAM_AWB_AWB_WEIGHTS(6),               0x03ff },
+       { MT9M114_CAM_AWB_AWB_WEIGHTS(7),               0x007c },
+       { MT9M114_CAM_AWB_K_R_L,                        0x80 },
+       { MT9M114_CAM_AWB_K_G_L,                        0x80 },
+       { MT9M114_CAM_AWB_K_B_L,                        0x80 },
+       { MT9M114_CAM_AWB_K_R_R,                        0x88 },
+       { MT9M114_CAM_AWB_K_G_R,                        0x80 },
+       { MT9M114_CAM_AWB_K_B_R,                        0x80 },
+
+       /* Low-Light Image Enhancements */
+       { MT9M114_CAM_LL_START_BRIGHTNESS,              0x0020 },
+       { MT9M114_CAM_LL_STOP_BRIGHTNESS,               0x009a },
+       { MT9M114_CAM_LL_START_GAIN_METRIC,             0x0070 },
+       { MT9M114_CAM_LL_STOP_GAIN_METRIC,              0x00f3 },
+       { MT9M114_CAM_LL_START_CONTRAST_LUMA_PERCENTAGE, 0x20 },
+       { MT9M114_CAM_LL_STOP_CONTRAST_LUMA_PERCENTAGE, 0x9a },
+       { MT9M114_CAM_LL_START_SATURATION,              0x80 },
+       { MT9M114_CAM_LL_END_SATURATION,                0x4b },
+       { MT9M114_CAM_LL_START_DESATURATION,            0x00 },
+       { MT9M114_CAM_LL_END_DESATURATION,              0xff },
+       { MT9M114_CAM_LL_START_DEMOSAICING,             0x3c },
+       { MT9M114_CAM_LL_START_AP_GAIN,                 0x02 },
+       { MT9M114_CAM_LL_START_AP_THRESH,               0x06 },
+       { MT9M114_CAM_LL_STOP_DEMOSAICING,              0x64 },
+       { MT9M114_CAM_LL_STOP_AP_GAIN,                  0x01 },
+       { MT9M114_CAM_LL_STOP_AP_THRESH,                0x0c },
+       { MT9M114_CAM_LL_START_NR_RED,                  0x3c },
+       { MT9M114_CAM_LL_START_NR_GREEN,                0x3c },
+       { MT9M114_CAM_LL_START_NR_BLUE,                 0x3c },
+       { MT9M114_CAM_LL_START_NR_THRESH,               0x0f },
+       { MT9M114_CAM_LL_STOP_NR_RED,                   0x64 },
+       { MT9M114_CAM_LL_STOP_NR_GREEN,                 0x64 },
+       { MT9M114_CAM_LL_STOP_NR_BLUE,                  0x64 },
+       { MT9M114_CAM_LL_STOP_NR_THRESH,                0x32 },
+       { MT9M114_CAM_LL_START_CONTRAST_BM,             0x0020 },
+       { MT9M114_CAM_LL_STOP_CONTRAST_BM,              0x009a },
+       { MT9M114_CAM_LL_GAMMA,                         0x00dc },
+       { MT9M114_CAM_LL_START_CONTRAST_GRADIENT,       0x38 },
+       { MT9M114_CAM_LL_STOP_CONTRAST_GRADIENT,        0x30 },
+       { MT9M114_CAM_LL_START_CONTRAST_LUMA_PERCENTAGE, 0x50 },
+       { MT9M114_CAM_LL_STOP_CONTRAST_LUMA_PERCENTAGE, 0x19 },
+       { MT9M114_CAM_LL_START_FADE_TO_BLACK_LUMA,      0x0230 },
+       { MT9M114_CAM_LL_STOP_FADE_TO_BLACK_LUMA,       0x0010 },
+       { MT9M114_CAM_LL_CLUSTER_DC_TH_BM,              0x01cd },
+       { MT9M114_CAM_LL_CLUSTER_DC_GATE_PERCENTAGE,    0x05 },
+       { MT9M114_CAM_LL_SUMMING_SENSITIVITY_FACTOR,    0x40 },
+
+       /* Auto-Exposure */
+       { MT9M114_CAM_AET_TARGET_AVERAGE_LUMA_DARK,     0x1b },
+       { MT9M114_CAM_AET_AEMODE,                       0x00 },
+       { MT9M114_CAM_AET_TARGET_GAIN,                  0x0080 },
+       { MT9M114_CAM_AET_AE_MAX_VIRT_AGAIN,            0x0100 },
+       { MT9M114_CAM_AET_BLACK_CLIPPING_TARGET,        0x005a },
+
+       { MT9M114_CCM_DELTA_GAIN,                       0x05 },
+       { MT9M114_AE_TRACK_AE_TRACKING_DAMPENING_SPEED, 0x20 },
+
+       /* Pixel array timings and integration time */
+       { MT9M114_CAM_SENSOR_CFG_ROW_SPEED,             1 },
+       { MT9M114_CAM_SENSOR_CFG_FINE_INTEG_TIME_MIN,   219 },
+       { MT9M114_CAM_SENSOR_CFG_FINE_INTEG_TIME_MAX,   1459 },
+       { MT9M114_CAM_SENSOR_CFG_FINE_CORRECTION,       96 },
+       { MT9M114_CAM_SENSOR_CFG_REG_0_DATA,            32 },
+
+       /* Miscellaneous settings */
+       { MT9M114_PAD_SLEW,                             0x0777 },
+};
+
+/* -----------------------------------------------------------------------------
+ * Hardware Configuration
+ */
+
+/* Wait for a command to complete. */
+static int mt9m114_poll_command(struct mt9m114 *sensor, u32 command)
+{
+       unsigned int i;
+       u64 value;
+       int ret;
+
+       for (i = 0; i < 100; ++i) {
+               ret = cci_read(sensor->regmap, MT9M114_COMMAND_REGISTER, &value,
+                              NULL);
+               if (ret < 0)
+                       return ret;
+
+               if (!(value & command))
+                       break;
+
+               usleep_range(5000, 6000);
+       }
+
+       if (value & command) {
+               dev_err(&sensor->client->dev, "Command %u completion timeout\n",
+                       command);
+               return -ETIMEDOUT;
+       }
+
+       if (!(value & MT9M114_COMMAND_REGISTER_OK)) {
+               dev_err(&sensor->client->dev, "Command %u failed\n", command);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+/* Wait for a state to be entered. */
+static int mt9m114_poll_state(struct mt9m114 *sensor, u32 state)
+{
+       unsigned int i;
+       u64 value;
+       int ret;
+
+       for (i = 0; i < 100; ++i) {
+               ret = cci_read(sensor->regmap, MT9M114_SYSMGR_CURRENT_STATE,
+                              &value, NULL);
+               if (ret < 0)
+                       return ret;
+
+               if (value == state)
+                       return 0;
+
+               usleep_range(1000, 1500);
+       }
+
+       dev_err(&sensor->client->dev, "Timeout waiting for state 0x%02x\n",
+               state);
+       return -ETIMEDOUT;
+}
+
+static int mt9m114_set_state(struct mt9m114 *sensor, u8 next_state)
+{
+       int ret = 0;
+
+       /* Set the next desired state and start the state transition. */
+       cci_write(sensor->regmap, MT9M114_SYSMGR_NEXT_STATE, next_state, &ret);
+       cci_write(sensor->regmap, MT9M114_COMMAND_REGISTER,
+                 MT9M114_COMMAND_REGISTER_OK |
+                 MT9M114_COMMAND_REGISTER_SET_STATE, &ret);
+       if (ret < 0)
+               return ret;
+
+       /* Wait for the state transition to complete. */
+       ret = mt9m114_poll_command(sensor, MT9M114_COMMAND_REGISTER_SET_STATE);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int mt9m114_initialize(struct mt9m114 *sensor)
+{
+       u32 value;
+       int ret;
+
+       ret = cci_multi_reg_write(sensor->regmap, mt9m114_init,
+                                 ARRAY_SIZE(mt9m114_init), NULL);
+       if (ret < 0) {
+               dev_err(&sensor->client->dev,
+                       "Failed to initialize the sensor\n");
+               return ret;
+       }
+
+       /* Configure the PLL. */
+       cci_write(sensor->regmap, MT9M114_CAM_SYSCTL_PLL_ENABLE,
+                 MT9M114_CAM_SYSCTL_PLL_ENABLE_VALUE, &ret);
+       cci_write(sensor->regmap, MT9M114_CAM_SYSCTL_PLL_DIVIDER_M_N,
+                 MT9M114_CAM_SYSCTL_PLL_DIVIDER_VALUE(sensor->pll.m,
+                                                      sensor->pll.n),
+                 &ret);
+       cci_write(sensor->regmap, MT9M114_CAM_SYSCTL_PLL_DIVIDER_P,
+                 MT9M114_CAM_SYSCTL_PLL_DIVIDER_P_VALUE(sensor->pll.p), &ret);
+       cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CFG_PIXCLK,
+                 sensor->pixrate, &ret);
+
+       /* Configure the output mode. */
+       if (sensor->bus_cfg.bus_type == V4L2_MBUS_CSI2_DPHY) {
+               value = MT9M114_CAM_PORT_PORT_SELECT_MIPI
+                     | MT9M114_CAM_PORT_CHAN_NUM(0)
+                     | 0x8000;
+               if (!(sensor->bus_cfg.bus.mipi_csi2.flags &
+                     V4L2_MBUS_CSI2_NONCONTINUOUS_CLOCK))
+                       value |= MT9M114_CAM_PORT_CONT_MIPI_CLK;
+       } else {
+               value = MT9M114_CAM_PORT_PORT_SELECT_PARALLEL
+                     | 0x8000;
+       }
+       cci_write(sensor->regmap, MT9M114_CAM_PORT_OUTPUT_CONTROL, value, &ret);
+       if (ret < 0)
+               return ret;
+
+       ret = mt9m114_set_state(sensor, MT9M114_SYS_STATE_ENTER_CONFIG_CHANGE);
+       if (ret < 0)
+               return ret;
+
+       ret = mt9m114_set_state(sensor, MT9M114_SYS_STATE_ENTER_SUSPEND);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static int mt9m114_configure(struct mt9m114 *sensor,
+                            struct v4l2_subdev_state *pa_state,
+                            struct v4l2_subdev_state *ifp_state)
+{
+       const struct v4l2_mbus_framefmt *pa_format;
+       const struct v4l2_rect *pa_crop;
+       const struct mt9m114_format_info *ifp_info;
+       const struct v4l2_mbus_framefmt *ifp_format;
+       const struct v4l2_rect *ifp_crop;
+       const struct v4l2_rect *ifp_compose;
+       unsigned int hratio, vratio;
+       u64 output_format;
+       u64 read_mode;
+       int ret = 0;
+
+       pa_format = v4l2_subdev_get_pad_format(&sensor->pa.sd, pa_state, 0);
+       pa_crop = v4l2_subdev_get_pad_crop(&sensor->pa.sd, pa_state, 0);
+
+       ifp_format = v4l2_subdev_get_pad_format(&sensor->ifp.sd, ifp_state, 1);
+       ifp_info = mt9m114_format_info(sensor, 1, ifp_format->code);
+       ifp_crop = v4l2_subdev_get_pad_crop(&sensor->ifp.sd, ifp_state, 0);
+       ifp_compose = v4l2_subdev_get_pad_compose(&sensor->ifp.sd, ifp_state, 0);
+
+       ret = cci_read(sensor->regmap, MT9M114_CAM_SENSOR_CONTROL_READ_MODE,
+                      &read_mode, NULL);
+       if (ret < 0)
+               return ret;
+
+       ret = cci_read(sensor->regmap, MT9M114_CAM_OUTPUT_FORMAT,
+                      &output_format, NULL);
+       if (ret < 0)
+               return ret;
+
+       hratio = pa_crop->width / pa_format->width;
+       vratio = pa_crop->height / pa_format->height;
+
+       /*
+        * Pixel array crop and binning. The CAM_SENSOR_CFG_CPIPE_LAST_ROW
+        * register isn't clearly documented, but is always set to the number
+        * of active rows minus 4 divided by the vertical binning factor in all
+        * example sensor modes.
+        */
+       cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CFG_X_ADDR_START,
+                 pa_crop->left, &ret);
+       cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CFG_Y_ADDR_START,
+                 pa_crop->top, &ret);
+       cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CFG_X_ADDR_END,
+                 pa_crop->width + pa_crop->left - 1, &ret);
+       cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CFG_Y_ADDR_END,
+                 pa_crop->height + pa_crop->top - 1, &ret);
+       cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CFG_CPIPE_LAST_ROW,
+                 (pa_crop->height - 4) / vratio - 1, &ret);
+
+       read_mode &= ~(MT9M114_CAM_SENSOR_CONTROL_X_READ_OUT_MASK |
+                      MT9M114_CAM_SENSOR_CONTROL_Y_READ_OUT_MASK);
+
+       if (hratio > 1)
+               read_mode |= MT9M114_CAM_SENSOR_CONTROL_X_READ_OUT_SUMMING;
+       if (vratio > 1)
+               read_mode |= MT9M114_CAM_SENSOR_CONTROL_Y_READ_OUT_SUMMING;
+
+       cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CONTROL_READ_MODE,
+                 read_mode, &ret);
+
+       /*
+        * Color pipeline (IFP) cropping and scaling. Subtract 4 from the left
+        * and top coordinates to compensate for the lines and columns removed
+        * by demosaicing that are taken into account in the crop rectangle but
+        * not in the hardware.
+        */
+       cci_write(sensor->regmap, MT9M114_CAM_CROP_WINDOW_XOFFSET,
+                 ifp_crop->left - 4, &ret);
+       cci_write(sensor->regmap, MT9M114_CAM_CROP_WINDOW_YOFFSET,
+                 ifp_crop->top - 4, &ret);
+       cci_write(sensor->regmap, MT9M114_CAM_CROP_WINDOW_WIDTH,
+                 ifp_crop->width, &ret);
+       cci_write(sensor->regmap, MT9M114_CAM_CROP_WINDOW_HEIGHT,
+                 ifp_crop->height, &ret);
+
+       cci_write(sensor->regmap, MT9M114_CAM_OUTPUT_WIDTH,
+                 ifp_compose->width, &ret);
+       cci_write(sensor->regmap, MT9M114_CAM_OUTPUT_HEIGHT,
+                 ifp_compose->height, &ret);
+
+       /* AWB and AE windows, use the full frame. */
+       cci_write(sensor->regmap, MT9M114_CAM_STAT_AWB_CLIP_WINDOW_XSTART,
+                 0, &ret);
+       cci_write(sensor->regmap, MT9M114_CAM_STAT_AWB_CLIP_WINDOW_YSTART,
+                 0, &ret);
+       cci_write(sensor->regmap, MT9M114_CAM_STAT_AWB_CLIP_WINDOW_XEND,
+                 ifp_compose->width - 1, &ret);
+       cci_write(sensor->regmap, MT9M114_CAM_STAT_AWB_CLIP_WINDOW_YEND,
+                 ifp_compose->height - 1, &ret);
+
+       cci_write(sensor->regmap, MT9M114_CAM_STAT_AE_INITIAL_WINDOW_XSTART,
+                 0, &ret);
+       cci_write(sensor->regmap, MT9M114_CAM_STAT_AE_INITIAL_WINDOW_YSTART,
+                 0, &ret);
+       cci_write(sensor->regmap, MT9M114_CAM_STAT_AE_INITIAL_WINDOW_XEND,
+                 ifp_compose->width / 5 - 1, &ret);
+       cci_write(sensor->regmap, MT9M114_CAM_STAT_AE_INITIAL_WINDOW_YEND,
+                 ifp_compose->height / 5 - 1, &ret);
+
+       cci_write(sensor->regmap, MT9M114_CAM_CROP_CROPMODE,
+                 MT9M114_CAM_CROP_MODE_AWB_AUTO_CROP_EN |
+                 MT9M114_CAM_CROP_MODE_AE_AUTO_CROP_EN, &ret);
+
+       /* Set the media bus code. */
+       output_format &= ~(MT9M114_CAM_OUTPUT_FORMAT_RGB_FORMAT_MASK |
+                          MT9M114_CAM_OUTPUT_FORMAT_BAYER_FORMAT_MASK |
+                          MT9M114_CAM_OUTPUT_FORMAT_FORMAT_MASK |
+                          MT9M114_CAM_OUTPUT_FORMAT_SWAP_BYTES |
+                          MT9M114_CAM_OUTPUT_FORMAT_SWAP_RED_BLUE);
+       output_format |= ifp_info->output_format;
+
+       cci_write(sensor->regmap, MT9M114_CAM_OUTPUT_FORMAT,
+                 output_format, &ret);
+
+       return ret;
+}
+
+static int mt9m114_set_frame_rate(struct mt9m114 *sensor)
+{
+       u16 frame_rate = sensor->ifp.frame_rate << 8;
+       int ret = 0;
+
+       cci_write(sensor->regmap, MT9M114_CAM_AET_MIN_FRAME_RATE,
+                 frame_rate, &ret);
+       cci_write(sensor->regmap, MT9M114_CAM_AET_MAX_FRAME_RATE,
+                 frame_rate, &ret);
+
+       return ret;
+}
+
+static int mt9m114_start_streaming(struct mt9m114 *sensor,
+                                  struct v4l2_subdev_state *pa_state,
+                                  struct v4l2_subdev_state *ifp_state)
+{
+       int ret;
+
+       ret = pm_runtime_resume_and_get(&sensor->client->dev);
+       if (ret)
+               return ret;
+
+       ret = mt9m114_configure(sensor, pa_state, ifp_state);
+       if (ret)
+               goto error;
+
+       ret = mt9m114_set_frame_rate(sensor);
+       if (ret)
+               goto error;
+
+       ret = __v4l2_ctrl_handler_setup(&sensor->pa.hdl);
+       if (ret)
+               goto error;
+
+       ret = __v4l2_ctrl_handler_setup(&sensor->ifp.hdl);
+       if (ret)
+               goto error;
+
+       /*
+        * The Change-Config state is transient and moves to the streaming
+        * state automatically.
+        */
+       ret = mt9m114_set_state(sensor, MT9M114_SYS_STATE_ENTER_CONFIG_CHANGE);
+       if (ret)
+               goto error;
+
+       sensor->streaming = true;
+
+       return 0;
+
+error:
+       pm_runtime_mark_last_busy(&sensor->client->dev);
+       pm_runtime_put_autosuspend(&sensor->client->dev);
+
+       return ret;
+}
+
+static int mt9m114_stop_streaming(struct mt9m114 *sensor)
+{
+       int ret;
+
+       sensor->streaming = false;
+
+       ret = mt9m114_set_state(sensor, MT9M114_SYS_STATE_ENTER_SUSPEND);
+
+       pm_runtime_mark_last_busy(&sensor->client->dev);
+       pm_runtime_put_autosuspend(&sensor->client->dev);
+
+       return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Common Subdev Operations
+ */
+
+static const struct media_entity_operations mt9m114_entity_ops = {
+       .link_validate = v4l2_subdev_link_validate,
+};
+
+/* -----------------------------------------------------------------------------
+ * Pixel Array Control Operations
+ */
+
+static inline struct mt9m114 *pa_ctrl_to_mt9m114(struct v4l2_ctrl *ctrl)
+{
+       return container_of(ctrl->handler, struct mt9m114, pa.hdl);
+}
+
+static int mt9m114_pa_g_ctrl(struct v4l2_ctrl *ctrl)
+{
+       struct mt9m114 *sensor = pa_ctrl_to_mt9m114(ctrl);
+       u64 value;
+       int ret;
+
+       if (!pm_runtime_get_if_in_use(&sensor->client->dev))
+               return 0;
+
+       switch (ctrl->id) {
+       case V4L2_CID_EXPOSURE:
+               ret = cci_read(sensor->regmap,
+                              MT9M114_CAM_SENSOR_CONTROL_COARSE_INTEGRATION_TIME,
+                              &value, NULL);
+               if (ret)
+                       break;
+
+               ctrl->val = value;
+               break;
+
+       case V4L2_CID_ANALOGUE_GAIN:
+               ret = cci_read(sensor->regmap,
+                              MT9M114_CAM_SENSOR_CONTROL_ANALOG_GAIN,
+                              &value, NULL);
+               if (ret)
+                       break;
+
+               ctrl->val = value;
+               break;
+
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       pm_runtime_mark_last_busy(&sensor->client->dev);
+       pm_runtime_put_autosuspend(&sensor->client->dev);
+
+       return ret;
+}
+
+static int mt9m114_pa_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+       struct mt9m114 *sensor = pa_ctrl_to_mt9m114(ctrl);
+       const struct v4l2_mbus_framefmt *format;
+       struct v4l2_subdev_state *state;
+       int ret = 0;
+       u64 mask;
+
+       /* V4L2 controls values are applied only when power is up. */
+       if (!pm_runtime_get_if_in_use(&sensor->client->dev))
+               return 0;
+
+       state = v4l2_subdev_get_locked_active_state(&sensor->pa.sd);
+       format = v4l2_subdev_get_pad_format(&sensor->pa.sd, state, 0);
+
+       switch (ctrl->id) {
+       case V4L2_CID_HBLANK:
+               cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CFG_LINE_LENGTH_PCK,
+                         ctrl->val + format->width, &ret);
+               break;
+
+       case V4L2_CID_VBLANK:
+               cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CFG_FRAME_LENGTH_LINES,
+                         ctrl->val + format->height, &ret);
+               break;
+
+       case V4L2_CID_EXPOSURE:
+               cci_write(sensor->regmap,
+                         MT9M114_CAM_SENSOR_CONTROL_COARSE_INTEGRATION_TIME,
+                         ctrl->val, &ret);
+               break;
+
+       case V4L2_CID_ANALOGUE_GAIN:
+               /*
+                * The CAM_SENSOR_CONTROL_ANALOG_GAIN contains linear analog
+                * gain values that are mapped to the GLOBAL_GAIN register
+                * values by the sensor firmware.
+                */
+               cci_write(sensor->regmap, MT9M114_CAM_SENSOR_CONTROL_ANALOG_GAIN,
+                         ctrl->val, &ret);
+               break;
+
+       case V4L2_CID_HFLIP:
+               mask = MT9M114_CAM_SENSOR_CONTROL_HORZ_MIRROR_EN;
+               ret = cci_update_bits(sensor->regmap,
+                                     MT9M114_CAM_SENSOR_CONTROL_READ_MODE,
+                                     mask, ctrl->val ? mask : 0, NULL);
+               break;
+
+       case V4L2_CID_VFLIP:
+               mask = MT9M114_CAM_SENSOR_CONTROL_VERT_FLIP_EN;
+               ret = cci_update_bits(sensor->regmap,
+                                     MT9M114_CAM_SENSOR_CONTROL_READ_MODE,
+                                     mask, ctrl->val ? mask : 0, NULL);
+               break;
+
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       pm_runtime_mark_last_busy(&sensor->client->dev);
+       pm_runtime_put_autosuspend(&sensor->client->dev);
+
+       return ret;
+}
+
+static const struct v4l2_ctrl_ops mt9m114_pa_ctrl_ops = {
+       .g_volatile_ctrl = mt9m114_pa_g_ctrl,
+       .s_ctrl = mt9m114_pa_s_ctrl,
+};
+
+static void mt9m114_pa_ctrl_update_exposure(struct mt9m114 *sensor, bool manual)
+{
+       /*
+        * Update the volatile flag on the manual exposure and gain controls.
+        * If the controls have switched to manual, read their current value
+        * from the hardware to ensure that control read and write operations
+        * will behave correctly
+        */
+       if (manual) {
+               mt9m114_pa_g_ctrl(sensor->pa.exposure);
+               sensor->pa.exposure->cur.val = sensor->pa.exposure->val;
+               sensor->pa.exposure->flags &= ~V4L2_CTRL_FLAG_VOLATILE;
+
+               mt9m114_pa_g_ctrl(sensor->pa.gain);
+               sensor->pa.gain->cur.val = sensor->pa.gain->val;
+               sensor->pa.gain->flags &= ~V4L2_CTRL_FLAG_VOLATILE;
+       } else {
+               sensor->pa.exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
+               sensor->pa.gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
+       }
+}
+
+static void mt9m114_pa_ctrl_update_blanking(struct mt9m114 *sensor,
+                                           const struct v4l2_mbus_framefmt *format)
+{
+       unsigned int max_blank;
+
+       /* Update the blanking controls ranges based on the output size. */
+       max_blank = MT9M114_CAM_SENSOR_CFG_LINE_LENGTH_PCK_MAX
+                 - format->width;
+       __v4l2_ctrl_modify_range(sensor->pa.hblank, MT9M114_MIN_HBLANK,
+                                max_blank, 1, MT9M114_DEF_HBLANK);
+
+       max_blank = MT9M114_CAM_SENSOR_CFG_FRAME_LENGTH_LINES_MAX
+                 - format->height;
+       __v4l2_ctrl_modify_range(sensor->pa.vblank, MT9M114_MIN_VBLANK,
+                                max_blank, 1, MT9M114_DEF_VBLANK);
+}
+
+/* -----------------------------------------------------------------------------
+ * Pixel Array Subdev Operations
+ */
+
+static inline struct mt9m114 *pa_to_mt9m114(struct v4l2_subdev *sd)
+{
+       return container_of(sd, struct mt9m114, pa.sd);
+}
+
+static int mt9m114_pa_init_cfg(struct v4l2_subdev *sd,
+                              struct v4l2_subdev_state *state)
+{
+       struct v4l2_mbus_framefmt *format;
+       struct v4l2_rect *crop;
+
+       crop = v4l2_subdev_get_pad_crop(sd, state, 0);
+
+       crop->left = 0;
+       crop->top = 0;
+       crop->width = MT9M114_PIXEL_ARRAY_WIDTH;
+       crop->height = MT9M114_PIXEL_ARRAY_HEIGHT;
+
+       format = v4l2_subdev_get_pad_format(sd, state, 0);
+
+       format->width = MT9M114_PIXEL_ARRAY_WIDTH;
+       format->height = MT9M114_PIXEL_ARRAY_HEIGHT;
+       format->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+       format->field = V4L2_FIELD_NONE;
+       format->colorspace = V4L2_COLORSPACE_RAW;
+       format->ycbcr_enc = V4L2_YCBCR_ENC_601;
+       format->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+       format->xfer_func = V4L2_XFER_FUNC_NONE;
+
+       return 0;
+}
+
+static int mt9m114_pa_enum_mbus_code(struct v4l2_subdev *sd,
+                                    struct v4l2_subdev_state *state,
+                                    struct v4l2_subdev_mbus_code_enum *code)
+{
+       if (code->index > 0)
+               return -EINVAL;
+
+       code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+       return 0;
+}
+
+static int mt9m114_pa_enum_framesizes(struct v4l2_subdev *sd,
+                                     struct v4l2_subdev_state *state,
+                                     struct v4l2_subdev_frame_size_enum *fse)
+{
+       if (fse->index > 1)
+               return -EINVAL;
+
+       if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+               return -EINVAL;
+
+       /* Report binning capability through frame size enumeration. */
+       fse->min_width = MT9M114_PIXEL_ARRAY_WIDTH / (fse->index + 1);
+       fse->max_width = MT9M114_PIXEL_ARRAY_WIDTH / (fse->index + 1);
+       fse->min_height = MT9M114_PIXEL_ARRAY_HEIGHT / (fse->index + 1);
+       fse->max_height = MT9M114_PIXEL_ARRAY_HEIGHT / (fse->index + 1);
+
+       return 0;
+}
+
+static int mt9m114_pa_set_fmt(struct v4l2_subdev *sd,
+                             struct v4l2_subdev_state *state,
+                             struct v4l2_subdev_format *fmt)
+{
+       struct mt9m114 *sensor = pa_to_mt9m114(sd);
+       struct v4l2_mbus_framefmt *format;
+       struct v4l2_rect *crop;
+       unsigned int hscale;
+       unsigned int vscale;
+
+       crop = v4l2_subdev_get_pad_crop(sd, state, fmt->pad);
+       format = v4l2_subdev_get_pad_format(sd, state, fmt->pad);
+
+       /* The sensor can bin horizontally and vertically. */
+       hscale = DIV_ROUND_CLOSEST(crop->width, fmt->format.width ? : 1);
+       vscale = DIV_ROUND_CLOSEST(crop->height, fmt->format.height ? : 1);
+       format->width = crop->width / clamp(hscale, 1U, 2U);
+       format->height = crop->height / clamp(vscale, 1U, 2U);
+
+       fmt->format = *format;
+
+       if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+               mt9m114_pa_ctrl_update_blanking(sensor, format);
+
+       return 0;
+}
+
+static int mt9m114_pa_get_selection(struct v4l2_subdev *sd,
+                                   struct v4l2_subdev_state *state,
+                                   struct v4l2_subdev_selection *sel)
+{
+       switch (sel->target) {
+       case V4L2_SEL_TGT_CROP:
+               sel->r = *v4l2_subdev_get_pad_crop(sd, state, sel->pad);
+               return 0;
+
+       case V4L2_SEL_TGT_CROP_DEFAULT:
+       case V4L2_SEL_TGT_CROP_BOUNDS:
+       case V4L2_SEL_TGT_NATIVE_SIZE:
+               sel->r.left = 0;
+               sel->r.top = 0;
+               sel->r.width = MT9M114_PIXEL_ARRAY_WIDTH;
+               sel->r.height = MT9M114_PIXEL_ARRAY_HEIGHT;
+               return 0;
+
+       default:
+               return -EINVAL;
+       }
+}
+
+static int mt9m114_pa_set_selection(struct v4l2_subdev *sd,
+                                   struct v4l2_subdev_state *state,
+                                   struct v4l2_subdev_selection *sel)
+{
+       struct mt9m114 *sensor = pa_to_mt9m114(sd);
+       struct v4l2_mbus_framefmt *format;
+       struct v4l2_rect *crop;
+
+       if (sel->target != V4L2_SEL_TGT_CROP)
+               return -EINVAL;
+
+       crop = v4l2_subdev_get_pad_crop(sd, state, sel->pad);
+       format = v4l2_subdev_get_pad_format(sd, state, sel->pad);
+
+       /*
+        * Clamp the crop rectangle. The vertical coordinates must be even, and
+        * the horizontal coordinates must be a multiple of 4.
+        *
+        * FIXME: The horizontal coordinates must be a multiple of 8 when
+        * binning, but binning is configured after setting the selection, so
+        * we can't know tell here if it will be used.
+        */
+       crop->left = ALIGN(sel->r.left, 4);
+       crop->top = ALIGN(sel->r.top, 2);
+       crop->width = clamp_t(unsigned int, ALIGN(sel->r.width, 4),
+                             MT9M114_PIXEL_ARRAY_MIN_OUTPUT_WIDTH,
+                             MT9M114_PIXEL_ARRAY_WIDTH - crop->left);
+       crop->height = clamp_t(unsigned int, ALIGN(sel->r.height, 2),
+                              MT9M114_PIXEL_ARRAY_MIN_OUTPUT_HEIGHT,
+                              MT9M114_PIXEL_ARRAY_HEIGHT - crop->top);
+
+       sel->r = *crop;
+
+       /* Reset the format. */
+       format->width = crop->width;
+       format->height = crop->height;
+
+       if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+               mt9m114_pa_ctrl_update_blanking(sensor, format);
+
+       return 0;
+}
+
+static const struct v4l2_subdev_pad_ops mt9m114_pa_pad_ops = {
+       .init_cfg = mt9m114_pa_init_cfg,
+       .enum_mbus_code = mt9m114_pa_enum_mbus_code,
+       .enum_frame_size = mt9m114_pa_enum_framesizes,
+       .get_fmt = v4l2_subdev_get_fmt,
+       .set_fmt = mt9m114_pa_set_fmt,
+       .get_selection = mt9m114_pa_get_selection,
+       .set_selection = mt9m114_pa_set_selection,
+};
+
+static const struct v4l2_subdev_ops mt9m114_pa_ops = {
+       .pad = &mt9m114_pa_pad_ops,
+};
+
+static int mt9m114_pa_init(struct mt9m114 *sensor)
+{
+       struct v4l2_ctrl_handler *hdl = &sensor->pa.hdl;
+       struct v4l2_subdev *sd = &sensor->pa.sd;
+       struct media_pad *pads = &sensor->pa.pad;
+       const struct v4l2_mbus_framefmt *format;
+       struct v4l2_subdev_state *state;
+       unsigned int max_exposure;
+       int ret;
+
+       /* Initialize the subdev. */
+       v4l2_subdev_init(sd, &mt9m114_pa_ops);
+       v4l2_i2c_subdev_set_name(sd, sensor->client, NULL, " pixel array");
+
+       sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+       sd->owner = THIS_MODULE;
+       sd->dev = &sensor->client->dev;
+       v4l2_set_subdevdata(sd, sensor->client);
+
+       /* Initialize the media entity. */
+       sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
+       sd->entity.ops = &mt9m114_entity_ops;
+       pads[0].flags = MEDIA_PAD_FL_SOURCE;
+       ret = media_entity_pads_init(&sd->entity, 1, pads);
+       if (ret < 0)
+               return ret;
+
+       /* Initialize the control handler. */
+       v4l2_ctrl_handler_init(hdl, 7);
+
+       /* The range of the HBLANK and VBLANK controls will be updated below. */
+       sensor->pa.hblank = v4l2_ctrl_new_std(hdl, &mt9m114_pa_ctrl_ops,
+                                             V4L2_CID_HBLANK,
+                                             MT9M114_DEF_HBLANK,
+                                             MT9M114_DEF_HBLANK, 1,
+                                             MT9M114_DEF_HBLANK);
+       sensor->pa.vblank = v4l2_ctrl_new_std(hdl, &mt9m114_pa_ctrl_ops,
+                                             V4L2_CID_VBLANK,
+                                             MT9M114_DEF_VBLANK,
+                                             MT9M114_DEF_VBLANK, 1,
+                                             MT9M114_DEF_VBLANK);
+
+       /*
+        * The maximum coarse integration time is the frame length in lines
+        * minus two. The default is taken directly from the datasheet, but
+        * makes little sense as auto-exposure is enabled by default.
+        */
+       max_exposure = MT9M114_PIXEL_ARRAY_HEIGHT + MT9M114_MIN_VBLANK - 2;
+       sensor->pa.exposure = v4l2_ctrl_new_std(hdl, &mt9m114_pa_ctrl_ops,
+                                               V4L2_CID_EXPOSURE, 1,
+                                               max_exposure, 1, 16);
+       if (sensor->pa.exposure)
+               sensor->pa.exposure->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+       sensor->pa.gain = v4l2_ctrl_new_std(hdl, &mt9m114_pa_ctrl_ops,
+                                           V4L2_CID_ANALOGUE_GAIN, 1,
+                                           511, 1, 32);
+       if (sensor->pa.gain)
+               sensor->pa.gain->flags |= V4L2_CTRL_FLAG_VOLATILE;
+
+       v4l2_ctrl_new_std(hdl, &mt9m114_pa_ctrl_ops,
+                         V4L2_CID_PIXEL_RATE,
+                         sensor->pixrate, sensor->pixrate, 1,
+                         sensor->pixrate);
+
+       v4l2_ctrl_new_std(hdl, &mt9m114_pa_ctrl_ops,
+                         V4L2_CID_HFLIP,
+                         0, 1, 1, 0);
+       v4l2_ctrl_new_std(hdl, &mt9m114_pa_ctrl_ops,
+                         V4L2_CID_VFLIP,
+                         0, 1, 1, 0);
+
+       if (hdl->error) {
+               ret = hdl->error;
+               goto error;
+       }
+
+       sd->state_lock = hdl->lock;
+
+       ret = v4l2_subdev_init_finalize(sd);
+       if (ret)
+               goto error;
+
+       /* Update the range of the blanking controls based on the format. */
+       state = v4l2_subdev_lock_and_get_active_state(sd);
+       format = v4l2_subdev_get_pad_format(sd, state, 0);
+       mt9m114_pa_ctrl_update_blanking(sensor, format);
+       v4l2_subdev_unlock_state(state);
+
+       sd->ctrl_handler = hdl;
+
+       return 0;
+
+error:
+       v4l2_ctrl_handler_free(&sensor->pa.hdl);
+       media_entity_cleanup(&sensor->pa.sd.entity);
+       return ret;
+}
+
+static void mt9m114_pa_cleanup(struct mt9m114 *sensor)
+{
+       v4l2_ctrl_handler_free(&sensor->pa.hdl);
+       media_entity_cleanup(&sensor->pa.sd.entity);
+}
+
+/* -----------------------------------------------------------------------------
+ * Image Flow Processor Control Operations
+ */
+
+static const char * const mt9m114_test_pattern_menu[] = {
+       "Disabled",
+       "Solid Color",
+       "100% Color Bars",
+       "Pseudo-Random",
+       "Fade-to-Gray Color Bars",
+       "Walking Ones 10-bit",
+       "Walking Ones 8-bit",
+};
+
+/* Keep in sync with mt9m114_test_pattern_menu */
+static const unsigned int mt9m114_test_pattern_value[] = {
+       MT9M114_CAM_MODE_TEST_PATTERN_SELECT_SOLID,
+       MT9M114_CAM_MODE_TEST_PATTERN_SELECT_SOLID_BARS,
+       MT9M114_CAM_MODE_TEST_PATTERN_SELECT_RANDOM,
+       MT9M114_CAM_MODE_TEST_PATTERN_SELECT_FADING_BARS,
+       MT9M114_CAM_MODE_TEST_PATTERN_SELECT_WALKING_1S_10B,
+       MT9M114_CAM_MODE_TEST_PATTERN_SELECT_WALKING_1S_8B,
+};
+
+static inline struct mt9m114 *ifp_ctrl_to_mt9m114(struct v4l2_ctrl *ctrl)
+{
+       return container_of(ctrl->handler, struct mt9m114, ifp.hdl);
+}
+
+static int mt9m114_ifp_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+       struct mt9m114 *sensor = ifp_ctrl_to_mt9m114(ctrl);
+       u32 value;
+       int ret = 0;
+
+       if (ctrl->id == V4L2_CID_EXPOSURE_AUTO)
+               mt9m114_pa_ctrl_update_exposure(sensor,
+                                               ctrl->val != V4L2_EXPOSURE_AUTO);
+
+       /* V4L2 controls values are applied only when power is up. */
+       if (!pm_runtime_get_if_in_use(&sensor->client->dev))
+               return 0;
+
+       switch (ctrl->id) {
+       case V4L2_CID_AUTO_WHITE_BALANCE:
+               /* Control both the AWB mode and the CCM algorithm. */
+               if (ctrl->val)
+                       value = MT9M114_CAM_AWB_MODE_AUTO
+                             | MT9M114_CAM_AWB_MODE_EXCLUSIVE_AE;
+               else
+                       value = 0;
+
+               cci_write(sensor->regmap, MT9M114_CAM_AWB_AWBMODE, value, &ret);
+
+               if (ctrl->val)
+                       value = MT9M114_CCM_EXEC_CALC_CCM_MATRIX | 0x22;
+               else
+                       value = 0;
+
+               cci_write(sensor->regmap, MT9M114_CCM_ALGO, value, &ret);
+               break;
+
+       case V4L2_CID_EXPOSURE_AUTO:
+               if (ctrl->val == V4L2_EXPOSURE_AUTO)
+                       value = MT9M114_AE_TRACK_EXEC_AUTOMATIC_EXPOSURE
+                             | 0x00fe;
+               else
+                       value = 0;
+
+               cci_write(sensor->regmap, MT9M114_AE_TRACK_ALGO, value, &ret);
+               if (ret)
+                       break;
+
+               break;
+
+       case V4L2_CID_TEST_PATTERN:
+       case V4L2_CID_TEST_PATTERN_RED:
+       case V4L2_CID_TEST_PATTERN_GREENR:
+       case V4L2_CID_TEST_PATTERN_BLUE: {
+               unsigned int pattern = sensor->ifp.tpg[MT9M114_TPG_PATTERN]->val;
+
+               if (pattern) {
+                       cci_write(sensor->regmap, MT9M114_CAM_MODE_SELECT,
+                                 MT9M114_CAM_MODE_SELECT_TEST_PATTERN, &ret);
+                       cci_write(sensor->regmap,
+                                 MT9M114_CAM_MODE_TEST_PATTERN_SELECT,
+                                 mt9m114_test_pattern_value[pattern - 1], &ret);
+                       cci_write(sensor->regmap,
+                                 MT9M114_CAM_MODE_TEST_PATTERN_RED,
+                                 sensor->ifp.tpg[MT9M114_TPG_RED]->val, &ret);
+                       cci_write(sensor->regmap,
+                                 MT9M114_CAM_MODE_TEST_PATTERN_GREEN,
+                                 sensor->ifp.tpg[MT9M114_TPG_GREEN]->val, &ret);
+                       cci_write(sensor->regmap,
+                                 MT9M114_CAM_MODE_TEST_PATTERN_BLUE,
+                                 sensor->ifp.tpg[MT9M114_TPG_BLUE]->val, &ret);
+               } else {
+                       cci_write(sensor->regmap, MT9M114_CAM_MODE_SELECT,
+                                 MT9M114_CAM_MODE_SELECT_NORMAL, &ret);
+               }
+
+               /*
+                * A Config-Change needs to be issued for the change to take
+                * effect. If we're not streaming ignore this, the change will
+                * be applied when the stream is started.
+                */
+               if (ret || !sensor->streaming)
+                       break;
+
+               ret = mt9m114_set_state(sensor,
+                                       MT9M114_SYS_STATE_ENTER_CONFIG_CHANGE);
+               break;
+       }
+
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       pm_runtime_mark_last_busy(&sensor->client->dev);
+       pm_runtime_put_autosuspend(&sensor->client->dev);
+
+       return ret;
+}
+
+static const struct v4l2_ctrl_ops mt9m114_ifp_ctrl_ops = {
+       .s_ctrl = mt9m114_ifp_s_ctrl,
+};
+
+/* -----------------------------------------------------------------------------
+ * Image Flow Processor Subdev Operations
+ */
+
+static inline struct mt9m114 *ifp_to_mt9m114(struct v4l2_subdev *sd)
+{
+       return container_of(sd, struct mt9m114, ifp.sd);
+}
+
+static int mt9m114_ifp_s_stream(struct v4l2_subdev *sd, int enable)
+{
+       struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+       struct v4l2_subdev_state *pa_state;
+       struct v4l2_subdev_state *ifp_state;
+       int ret;
+
+       if (!enable)
+               return mt9m114_stop_streaming(sensor);
+
+       ifp_state = v4l2_subdev_lock_and_get_active_state(&sensor->ifp.sd);
+       pa_state = v4l2_subdev_lock_and_get_active_state(&sensor->pa.sd);
+
+       ret = mt9m114_start_streaming(sensor, pa_state, ifp_state);
+
+       v4l2_subdev_unlock_state(pa_state);
+       v4l2_subdev_unlock_state(ifp_state);
+
+       return ret;
+}
+
+static int mt9m114_ifp_g_frame_interval(struct v4l2_subdev *sd,
+                                       struct v4l2_subdev_frame_interval *interval)
+{
+       struct v4l2_fract *ival = &interval->interval;
+       struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+
+       mutex_lock(sensor->ifp.hdl.lock);
+
+       ival->numerator = 1;
+       ival->denominator = sensor->ifp.frame_rate;
+
+       mutex_unlock(sensor->ifp.hdl.lock);
+
+       return 0;
+}
+
+static int mt9m114_ifp_s_frame_interval(struct v4l2_subdev *sd,
+                                       struct v4l2_subdev_frame_interval *interval)
+{
+       struct v4l2_fract *ival = &interval->interval;
+       struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+       int ret = 0;
+
+       mutex_lock(sensor->ifp.hdl.lock);
+
+       if (ival->numerator != 0 && ival->denominator != 0)
+               sensor->ifp.frame_rate = min_t(unsigned int,
+                                              ival->denominator / ival->numerator,
+                                              MT9M114_MAX_FRAME_RATE);
+       else
+               sensor->ifp.frame_rate = MT9M114_MAX_FRAME_RATE;
+
+       ival->numerator = 1;
+       ival->denominator = sensor->ifp.frame_rate;
+
+       if (sensor->streaming)
+               ret = mt9m114_set_frame_rate(sensor);
+
+       mutex_unlock(sensor->ifp.hdl.lock);
+
+       return ret;
+}
+
+static int mt9m114_ifp_init_cfg(struct v4l2_subdev *sd,
+                               struct v4l2_subdev_state *state)
+{
+       struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+       struct v4l2_mbus_framefmt *format;
+       struct v4l2_rect *crop;
+       struct v4l2_rect *compose;
+
+       format = v4l2_subdev_get_pad_format(sd, state, 0);
+
+       format->width = MT9M114_PIXEL_ARRAY_WIDTH;
+       format->height = MT9M114_PIXEL_ARRAY_HEIGHT;
+       format->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+       format->field = V4L2_FIELD_NONE;
+       format->colorspace = V4L2_COLORSPACE_RAW;
+       format->ycbcr_enc = V4L2_YCBCR_ENC_601;
+       format->quantization = V4L2_QUANTIZATION_FULL_RANGE;
+       format->xfer_func = V4L2_XFER_FUNC_NONE;
+
+       crop = v4l2_subdev_get_pad_crop(sd, state, 0);
+
+       crop->left = 4;
+       crop->top = 4;
+       crop->width = format->width - 8;
+       crop->height = format->height - 8;
+
+       compose = v4l2_subdev_get_pad_compose(sd, state, 0);
+
+       compose->left = 0;
+       compose->top = 0;
+       compose->width = crop->width;
+       compose->height = crop->height;
+
+       format = v4l2_subdev_get_pad_format(sd, state, 1);
+
+       format->width = compose->width;
+       format->height = compose->height;
+       format->code = mt9m114_default_format_info(sensor)->code;
+       format->field = V4L2_FIELD_NONE;
+       format->colorspace = V4L2_COLORSPACE_SRGB;
+       format->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+       format->quantization = V4L2_QUANTIZATION_DEFAULT;
+       format->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+
+       return 0;
+}
+
+static int mt9m114_ifp_enum_mbus_code(struct v4l2_subdev *sd,
+                                     struct v4l2_subdev_state *state,
+                                     struct v4l2_subdev_mbus_code_enum *code)
+{
+       const unsigned int num_formats = ARRAY_SIZE(mt9m114_format_infos);
+       struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+       unsigned int index = 0;
+       unsigned int flag;
+       unsigned int i;
+
+       switch (code->pad) {
+       case 0:
+               if (code->index != 0)
+                       return -EINVAL;
+
+               code->code = mt9m114_format_infos[num_formats - 1].code;
+               return 0;
+
+       case 1:
+               if (sensor->bus_cfg.bus_type == V4L2_MBUS_CSI2_DPHY)
+                       flag = MT9M114_FMT_FLAG_CSI2;
+               else
+                       flag = MT9M114_FMT_FLAG_PARALLEL;
+
+               for (i = 0; i < num_formats; ++i) {
+                       const struct mt9m114_format_info *info =
+                               &mt9m114_format_infos[i];
+
+                       if (info->flags & flag) {
+                               if (index == code->index) {
+                                       code->code = info->code;
+                                       return 0;
+                               }
+
+                               index++;
+                       }
+               }
+
+               return -EINVAL;
+
+       default:
+               return -EINVAL;
+       }
+}
+
+static int mt9m114_ifp_enum_framesizes(struct v4l2_subdev *sd,
+                                      struct v4l2_subdev_state *state,
+                                      struct v4l2_subdev_frame_size_enum *fse)
+{
+       struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+       const struct mt9m114_format_info *info;
+
+       if (fse->index > 0)
+               return -EINVAL;
+
+       info = mt9m114_format_info(sensor, fse->pad, fse->code);
+       if (!info || info->code != fse->code)
+               return -EINVAL;
+
+       if (fse->pad == 0) {
+               fse->min_width = MT9M114_PIXEL_ARRAY_MIN_OUTPUT_WIDTH;
+               fse->max_width = MT9M114_PIXEL_ARRAY_WIDTH;
+               fse->min_height = MT9M114_PIXEL_ARRAY_MIN_OUTPUT_HEIGHT;
+               fse->max_height = MT9M114_PIXEL_ARRAY_HEIGHT;
+       } else {
+               const struct v4l2_rect *crop;
+
+               crop = v4l2_subdev_get_pad_crop(sd, state, 0);
+
+               fse->max_width = crop->width;
+               fse->max_height = crop->height;
+
+               fse->min_width = fse->max_width / 4;
+               fse->min_height = fse->max_height / 4;
+       }
+
+       return 0;
+}
+
+static int mt9m114_ifp_enum_frameintervals(struct v4l2_subdev *sd,
+                                          struct v4l2_subdev_state *state,
+                                          struct v4l2_subdev_frame_interval_enum *fie)
+{
+       struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+       const struct mt9m114_format_info *info;
+
+       if (fie->index > 0)
+               return -EINVAL;
+
+       info = mt9m114_format_info(sensor, fie->pad, fie->code);
+       if (!info || info->code != fie->code)
+               return -EINVAL;
+
+       fie->interval.numerator = 1;
+       fie->interval.denominator = MT9M114_MAX_FRAME_RATE;
+
+       return 0;
+}
+
+static int mt9m114_ifp_set_fmt(struct v4l2_subdev *sd,
+                              struct v4l2_subdev_state *state,
+                              struct v4l2_subdev_format *fmt)
+{
+       struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+       struct v4l2_mbus_framefmt *format;
+
+       format = v4l2_subdev_get_pad_format(sd, state, fmt->pad);
+
+       if (fmt->pad == 0) {
+               /* Only the size can be changed on the sink pad. */
+               format->width = clamp(ALIGN(fmt->format.width, 8),
+                                     MT9M114_PIXEL_ARRAY_MIN_OUTPUT_WIDTH,
+                                     MT9M114_PIXEL_ARRAY_WIDTH);
+               format->height = clamp(ALIGN(fmt->format.height, 8),
+                                      MT9M114_PIXEL_ARRAY_MIN_OUTPUT_HEIGHT,
+                                      MT9M114_PIXEL_ARRAY_HEIGHT);
+       } else {
+               const struct mt9m114_format_info *info;
+
+               /* Only the media bus code can be changed on the source pad. */
+               info = mt9m114_format_info(sensor, 1, fmt->format.code);
+
+               format->code = info->code;
+
+               /* If the output format is RAW10, bypass the scaler. */
+               if (format->code == MEDIA_BUS_FMT_SGRBG10_1X10)
+                       *format = *v4l2_subdev_get_pad_format(sd, state, 0);
+       }
+
+       fmt->format = *format;
+
+       return 0;
+}
+
+static int mt9m114_ifp_get_selection(struct v4l2_subdev *sd,
+                                    struct v4l2_subdev_state *state,
+                                    struct v4l2_subdev_selection *sel)
+{
+       const struct v4l2_mbus_framefmt *format;
+       const struct v4l2_rect *crop;
+       int ret = 0;
+
+       /* Crop and compose are only supported on the sink pad. */
+       if (sel->pad != 0)
+               return -EINVAL;
+
+       switch (sel->target) {
+       case V4L2_SEL_TGT_CROP:
+               sel->r = *v4l2_subdev_get_pad_crop(sd, state, 0);
+               break;
+
+       case V4L2_SEL_TGT_CROP_DEFAULT:
+       case V4L2_SEL_TGT_CROP_BOUNDS:
+               /*
+                * The crop default and bounds are equal to the sink
+                * format size minus 4 pixels on each side for demosaicing.
+                */
+               format = v4l2_subdev_get_pad_format(sd, state, 0);
+
+               sel->r.left = 4;
+               sel->r.top = 4;
+               sel->r.width = format->width - 8;
+               sel->r.height = format->height - 8;
+               break;
+
+       case V4L2_SEL_TGT_COMPOSE:
+               sel->r = *v4l2_subdev_get_pad_compose(sd, state, 0);
+               break;
+
+       case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+       case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+               /*
+                * The compose default and bounds sizes are equal to the sink
+                * crop rectangle size.
+                */
+               crop = v4l2_subdev_get_pad_crop(sd, state, 0);
+               sel->r.left = 0;
+               sel->r.top = 0;
+               sel->r.width = crop->width;
+               sel->r.height = crop->height;
+               break;
+
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static int mt9m114_ifp_set_selection(struct v4l2_subdev *sd,
+                                    struct v4l2_subdev_state *state,
+                                    struct v4l2_subdev_selection *sel)
+{
+       struct v4l2_mbus_framefmt *format;
+       struct v4l2_rect *crop;
+       struct v4l2_rect *compose;
+
+       if (sel->target != V4L2_SEL_TGT_CROP &&
+           sel->target != V4L2_SEL_TGT_COMPOSE)
+               return -EINVAL;
+
+       /* Crop and compose are only supported on the sink pad. */
+       if (sel->pad != 0)
+               return -EINVAL;
+
+       format = v4l2_subdev_get_pad_format(sd, state, 0);
+       crop = v4l2_subdev_get_pad_crop(sd, state, 0);
+       compose = v4l2_subdev_get_pad_compose(sd, state, 0);
+
+       if (sel->target == V4L2_SEL_TGT_CROP) {
+               /*
+                * Clamp the crop rectangle. Demosaicing removes 4 pixels on
+                * each side of the image.
+                */
+               crop->left = clamp_t(unsigned int, ALIGN(sel->r.left, 2), 4,
+                                    format->width - 4 -
+                                    MT9M114_SCALER_CROPPED_INPUT_WIDTH);
+               crop->top = clamp_t(unsigned int, ALIGN(sel->r.top, 2), 4,
+                                   format->height - 4 -
+                                   MT9M114_SCALER_CROPPED_INPUT_HEIGHT);
+               crop->width = clamp_t(unsigned int, ALIGN(sel->r.width, 2),
+                                     MT9M114_SCALER_CROPPED_INPUT_WIDTH,
+                                     format->width - 4 - crop->left);
+               crop->height = clamp_t(unsigned int, ALIGN(sel->r.height, 2),
+                                      MT9M114_SCALER_CROPPED_INPUT_HEIGHT,
+                                      format->height - 4 - crop->top);
+
+               sel->r = *crop;
+
+               /* Propagate to the compose rectangle. */
+               compose->width = crop->width;
+               compose->height = crop->height;
+       } else {
+               /*
+                * Clamp the compose rectangle. The scaler can only downscale.
+                */
+               compose->left = 0;
+               compose->top = 0;
+               compose->width = clamp_t(unsigned int, ALIGN(sel->r.width, 2),
+                                        MT9M114_SCALER_CROPPED_INPUT_WIDTH,
+                                        crop->width);
+               compose->height = clamp_t(unsigned int, ALIGN(sel->r.height, 2),
+                                         MT9M114_SCALER_CROPPED_INPUT_HEIGHT,
+                                         crop->height);
+
+               sel->r = *compose;
+       }
+
+       /* Propagate the compose rectangle to the source format. */
+       format = v4l2_subdev_get_pad_format(sd, state, 1);
+       format->width = compose->width;
+       format->height = compose->height;
+
+       return 0;
+}
+
+static void mt9m114_ifp_unregistered(struct v4l2_subdev *sd)
+{
+       struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+
+       v4l2_device_unregister_subdev(&sensor->pa.sd);
+}
+
+static int mt9m114_ifp_registered(struct v4l2_subdev *sd)
+{
+       struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+       int ret;
+
+       ret = v4l2_device_register_subdev(sd->v4l2_dev, &sensor->pa.sd);
+       if (ret < 0) {
+               dev_err(&sensor->client->dev,
+                       "Failed to register pixel array subdev\n");
+               return ret;
+       }
+
+       ret = media_create_pad_link(&sensor->pa.sd.entity, 0,
+                                   &sensor->ifp.sd.entity, 0,
+                                   MEDIA_LNK_FL_ENABLED |
+                                   MEDIA_LNK_FL_IMMUTABLE);
+       if (ret < 0) {
+               dev_err(&sensor->client->dev,
+                       "Failed to link pixel array to ifp\n");
+               v4l2_device_unregister_subdev(&sensor->pa.sd);
+               return ret;
+       }
+
+       return 0;
+}
+
+static const struct v4l2_subdev_video_ops mt9m114_ifp_video_ops = {
+       .s_stream = mt9m114_ifp_s_stream,
+       .g_frame_interval = mt9m114_ifp_g_frame_interval,
+       .s_frame_interval = mt9m114_ifp_s_frame_interval,
+};
+
+static const struct v4l2_subdev_pad_ops mt9m114_ifp_pad_ops = {
+       .init_cfg = mt9m114_ifp_init_cfg,
+       .enum_mbus_code = mt9m114_ifp_enum_mbus_code,
+       .enum_frame_size = mt9m114_ifp_enum_framesizes,
+       .enum_frame_interval = mt9m114_ifp_enum_frameintervals,
+       .get_fmt = v4l2_subdev_get_fmt,
+       .set_fmt = mt9m114_ifp_set_fmt,
+       .get_selection = mt9m114_ifp_get_selection,
+       .set_selection = mt9m114_ifp_set_selection,
+};
+
+static const struct v4l2_subdev_ops mt9m114_ifp_ops = {
+       .video = &mt9m114_ifp_video_ops,
+       .pad = &mt9m114_ifp_pad_ops,
+};
+
+static const struct v4l2_subdev_internal_ops mt9m114_ifp_internal_ops = {
+       .registered = mt9m114_ifp_registered,
+       .unregistered = mt9m114_ifp_unregistered,
+};
+
+static int mt9m114_ifp_init(struct mt9m114 *sensor)
+{
+       struct v4l2_subdev *sd = &sensor->ifp.sd;
+       struct media_pad *pads = sensor->ifp.pads;
+       struct v4l2_ctrl_handler *hdl = &sensor->ifp.hdl;
+       struct v4l2_ctrl *link_freq;
+       int ret;
+
+       /* Initialize the subdev. */
+       v4l2_i2c_subdev_init(sd, sensor->client, &mt9m114_ifp_ops);
+       v4l2_i2c_subdev_set_name(sd, sensor->client, NULL, " ifp");
+
+       sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+       sd->internal_ops = &mt9m114_ifp_internal_ops;
+
+       /* Initialize the media entity. */
+       sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_ISP;
+       sd->entity.ops = &mt9m114_entity_ops;
+       pads[0].flags = MEDIA_PAD_FL_SINK;
+       pads[1].flags = MEDIA_PAD_FL_SOURCE;
+       ret = media_entity_pads_init(&sd->entity, 2, pads);
+       if (ret < 0)
+               return ret;
+
+       sensor->ifp.frame_rate = MT9M114_DEF_FRAME_RATE;
+
+       /* Initialize the control handler. */
+       v4l2_ctrl_handler_init(hdl, 8);
+       v4l2_ctrl_new_std(hdl, &mt9m114_ifp_ctrl_ops,
+                         V4L2_CID_AUTO_WHITE_BALANCE,
+                         0, 1, 1, 1);
+       v4l2_ctrl_new_std_menu(hdl, &mt9m114_ifp_ctrl_ops,
+                              V4L2_CID_EXPOSURE_AUTO,
+                              V4L2_EXPOSURE_MANUAL, 0,
+                              V4L2_EXPOSURE_AUTO);
+
+       link_freq = v4l2_ctrl_new_int_menu(hdl, &mt9m114_ifp_ctrl_ops,
+                                          V4L2_CID_LINK_FREQ,
+                                          sensor->bus_cfg.nr_of_link_frequencies - 1,
+                                          0, sensor->bus_cfg.link_frequencies);
+       if (link_freq)
+               link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+       v4l2_ctrl_new_std(hdl, &mt9m114_ifp_ctrl_ops,
+                         V4L2_CID_PIXEL_RATE,
+                         sensor->pixrate, sensor->pixrate, 1,
+                         sensor->pixrate);
+
+       sensor->ifp.tpg[MT9M114_TPG_PATTERN] =
+               v4l2_ctrl_new_std_menu_items(hdl, &mt9m114_ifp_ctrl_ops,
+                                            V4L2_CID_TEST_PATTERN,
+                                            ARRAY_SIZE(mt9m114_test_pattern_menu) - 1,
+                                            0, 0, mt9m114_test_pattern_menu);
+       sensor->ifp.tpg[MT9M114_TPG_RED] =
+               v4l2_ctrl_new_std(hdl, &mt9m114_ifp_ctrl_ops,
+                                 V4L2_CID_TEST_PATTERN_RED,
+                                 0, 1023, 1, 1023);
+       sensor->ifp.tpg[MT9M114_TPG_GREEN] =
+               v4l2_ctrl_new_std(hdl, &mt9m114_ifp_ctrl_ops,
+                                 V4L2_CID_TEST_PATTERN_GREENR,
+                                 0, 1023, 1, 1023);
+       sensor->ifp.tpg[MT9M114_TPG_BLUE] =
+               v4l2_ctrl_new_std(hdl, &mt9m114_ifp_ctrl_ops,
+                                 V4L2_CID_TEST_PATTERN_BLUE,
+                                 0, 1023, 1, 1023);
+
+       v4l2_ctrl_cluster(ARRAY_SIZE(sensor->ifp.tpg), sensor->ifp.tpg);
+
+       if (hdl->error) {
+               ret = hdl->error;
+               goto error;
+       }
+
+       sd->ctrl_handler = hdl;
+       sd->state_lock = hdl->lock;
+
+       ret = v4l2_subdev_init_finalize(sd);
+       if (ret)
+               goto error;
+
+       return 0;
+
+error:
+       v4l2_ctrl_handler_free(&sensor->ifp.hdl);
+       media_entity_cleanup(&sensor->ifp.sd.entity);
+       return ret;
+}
+
+static void mt9m114_ifp_cleanup(struct mt9m114 *sensor)
+{
+       v4l2_ctrl_handler_free(&sensor->ifp.hdl);
+       media_entity_cleanup(&sensor->ifp.sd.entity);
+}
+
+/* -----------------------------------------------------------------------------
+ * Power Management
+ */
+
+static int mt9m114_power_on(struct mt9m114 *sensor)
+{
+       int ret;
+
+       /* Enable power and clocks. */
+       ret = regulator_bulk_enable(ARRAY_SIZE(sensor->supplies),
+                                   sensor->supplies);
+       if (ret < 0)
+               return ret;
+
+       ret = clk_prepare_enable(sensor->clk);
+       if (ret < 0)
+               goto error_regulator;
+
+       /* Perform a hard reset if available, or a soft reset otherwise. */
+       if (sensor->reset) {
+               long freq = clk_get_rate(sensor->clk);
+               unsigned int duration;
+
+               /*
+                * The minimum duration is 50 clock cycles, thus typically
+                * around 2µs. Double it to be safe.
+                */
+               duration = DIV_ROUND_UP(2 * 50 * 1000000, freq);
+
+               gpiod_set_value(sensor->reset, 1);
+               udelay(duration);
+               gpiod_set_value(sensor->reset, 0);
+       } else {
+               /*
+                * The power may have just been turned on, we need to wait for
+                * the sensor to be ready to accept I2C commands.
+                */
+               usleep_range(44500, 50000);
+
+               cci_write(sensor->regmap, MT9M114_RESET_AND_MISC_CONTROL,
+                         MT9M114_RESET_SOC, &ret);
+               cci_write(sensor->regmap, MT9M114_RESET_AND_MISC_CONTROL, 0,
+                         &ret);
+
+               if (ret < 0) {
+                       dev_err(&sensor->client->dev, "Soft reset failed\n");
+                       goto error_clock;
+               }
+       }
+
+       /*
+        * Wait for the sensor to be ready to accept I2C commands by polling the
+        * command register to wait for initialization to complete.
+        */
+       usleep_range(44500, 50000);
+
+       ret = mt9m114_poll_command(sensor, MT9M114_COMMAND_REGISTER_SET_STATE);
+       if (ret < 0)
+               goto error_clock;
+
+       if (sensor->bus_cfg.bus_type == V4L2_MBUS_PARALLEL) {
+               /*
+                * In parallel mode (OE set to low), the sensor will enter the
+                * streaming state after initialization. Enter the standby
+                * manually to stop streaming.
+                */
+               ret = mt9m114_set_state(sensor,
+                                       MT9M114_SYS_STATE_ENTER_STANDBY);
+               if (ret < 0)
+                       goto error_clock;
+       }
+
+       /*
+        * Before issuing any Set-State command, we must ensure that the sensor
+        * reaches the standby mode (either initiated manually above in
+        * parallel mode, or automatically after reset in MIPI mode).
+        */
+       ret = mt9m114_poll_state(sensor, MT9M114_SYS_STATE_STANDBY);
+       if (ret < 0)
+               goto error_clock;
+
+       return 0;
+
+error_clock:
+       clk_disable_unprepare(sensor->clk);
+error_regulator:
+       regulator_bulk_disable(ARRAY_SIZE(sensor->supplies), sensor->supplies);
+       return ret;
+}
+
+static void mt9m114_power_off(struct mt9m114 *sensor)
+{
+       clk_disable_unprepare(sensor->clk);
+       regulator_bulk_disable(ARRAY_SIZE(sensor->supplies), sensor->supplies);
+}
+
+static int __maybe_unused mt9m114_runtime_resume(struct device *dev)
+{
+       struct v4l2_subdev *sd = dev_get_drvdata(dev);
+       struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+       int ret;
+
+       ret = mt9m114_power_on(sensor);
+       if (ret)
+               return ret;
+
+       ret = mt9m114_initialize(sensor);
+       if (ret) {
+               mt9m114_power_off(sensor);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int __maybe_unused mt9m114_runtime_suspend(struct device *dev)
+{
+       struct v4l2_subdev *sd = dev_get_drvdata(dev);
+       struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+
+       mt9m114_power_off(sensor);
+
+       return 0;
+}
+
+static const struct dev_pm_ops mt9m114_pm_ops = {
+       SET_RUNTIME_PM_OPS(mt9m114_runtime_suspend, mt9m114_runtime_resume, NULL)
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe & Remove
+ */
+
+static int mt9m114_clk_init(struct mt9m114 *sensor)
+{
+       unsigned int link_freq;
+
+       /* Hardcode the PLL multiplier and dividers to default settings. */
+       sensor->pll.m = 32;
+       sensor->pll.n = 1;
+       sensor->pll.p = 7;
+
+       /*
+        * Calculate the pixel rate and link frequency. The CSI-2 bus is clocked
+        * for 16-bit per pixel, transmitted in DDR over a single lane. For
+        * parallel mode, the sensor ouputs one pixel in two PIXCLK cycles.
+        */
+       sensor->pixrate = clk_get_rate(sensor->clk) * sensor->pll.m
+                       / ((sensor->pll.n + 1) * (sensor->pll.p + 1));
+
+       link_freq = sensor->bus_cfg.bus_type == V4L2_MBUS_CSI2_DPHY
+                 ? sensor->pixrate * 8 : sensor->pixrate * 2;
+
+       if (sensor->bus_cfg.nr_of_link_frequencies != 1 ||
+           sensor->bus_cfg.link_frequencies[0] != link_freq) {
+               dev_err(&sensor->client->dev, "Unsupported DT link-frequencies\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int mt9m114_identify(struct mt9m114 *sensor)
+{
+       u64 major, minor, release, customer;
+       u64 value;
+       int ret;
+
+       ret = cci_read(sensor->regmap, MT9M114_CHIP_ID, &value, NULL);
+       if (ret) {
+               dev_err(&sensor->client->dev, "Failed to read chip ID\n");
+               return -ENXIO;
+       }
+
+       if (value != 0x2481) {
+               dev_err(&sensor->client->dev, "Invalid chip ID 0x%04llx\n",
+                       value);
+               return -ENXIO;
+       }
+
+       cci_read(sensor->regmap, MT9M114_MON_MAJOR_VERSION, &major, &ret);
+       cci_read(sensor->regmap, MT9M114_MON_MINOR_VERSION, &minor, &ret);
+       cci_read(sensor->regmap, MT9M114_MON_RELEASE_VERSION, &release, &ret);
+       cci_read(sensor->regmap, MT9M114_CUSTOMER_REV, &customer, &ret);
+       if (ret) {
+               dev_err(&sensor->client->dev, "Failed to read version\n");
+               return -ENXIO;
+       }
+
+       dev_dbg(&sensor->client->dev,
+               "monitor v%llu.%llu.%04llx customer rev 0x%04llx\n",
+               major, minor, release, customer);
+
+       return 0;
+}
+
+static int mt9m114_parse_dt(struct mt9m114 *sensor)
+{
+       struct fwnode_handle *fwnode = dev_fwnode(&sensor->client->dev);
+       struct fwnode_handle *ep;
+       int ret;
+
+       ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+       if (!ep) {
+               dev_err(&sensor->client->dev, "No endpoint found\n");
+               return -EINVAL;
+       }
+
+       sensor->bus_cfg.bus_type = V4L2_MBUS_UNKNOWN;
+       ret = v4l2_fwnode_endpoint_alloc_parse(ep, &sensor->bus_cfg);
+       fwnode_handle_put(ep);
+       if (ret < 0) {
+               dev_err(&sensor->client->dev, "Failed to parse endpoint\n");
+               goto error;
+       }
+
+       switch (sensor->bus_cfg.bus_type) {
+       case V4L2_MBUS_CSI2_DPHY:
+       case V4L2_MBUS_PARALLEL:
+               break;
+
+       default:
+               dev_err(&sensor->client->dev, "unsupported bus type %u\n",
+                       sensor->bus_cfg.bus_type);
+               ret = -EINVAL;
+               goto error;
+       }
+
+       return 0;
+
+error:
+       v4l2_fwnode_endpoint_free(&sensor->bus_cfg);
+       return ret;
+}
+
+static int mt9m114_probe(struct i2c_client *client)
+{
+       struct device *dev = &client->dev;
+       struct mt9m114 *sensor;
+       int ret;
+
+       sensor = devm_kzalloc(dev, sizeof(*sensor), GFP_KERNEL);
+       if (!sensor)
+               return -ENOMEM;
+
+       sensor->client = client;
+
+       sensor->regmap = devm_cci_regmap_init_i2c(client, 16);
+       if (IS_ERR(sensor->regmap)) {
+               dev_err(dev, "Unable to initialize I2C\n");
+               return -ENODEV;
+       }
+
+       ret = mt9m114_parse_dt(sensor);
+       if (ret < 0)
+               return ret;
+
+       /* Acquire clocks, GPIOs and regulators. */
+       sensor->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(sensor->clk)) {
+               ret = PTR_ERR(sensor->clk);
+               dev_err_probe(dev, ret, "Failed to get clock\n");
+               goto error_ep_free;
+       }
+
+       sensor->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+       if (IS_ERR(sensor->reset)) {
+               ret = PTR_ERR(sensor->reset);
+               dev_err_probe(dev, ret, "Failed to get reset GPIO\n");
+               goto error_ep_free;
+       }
+
+       sensor->supplies[0].supply = "vddio";
+       sensor->supplies[1].supply = "vdd";
+       sensor->supplies[2].supply = "vaa";
+
+       ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(sensor->supplies),
+                                     sensor->supplies);
+       if (ret < 0) {
+               dev_err_probe(dev, ret, "Failed to get regulators\n");
+               goto error_ep_free;
+       }
+
+       ret = mt9m114_clk_init(sensor);
+       if (ret)
+               goto error_ep_free;
+
+       /*
+        * Identify the sensor. The driver supports runtime PM, but needs to
+        * work when runtime PM is disabled in the kernel. To that end, power
+        * the sensor on manually here, and initialize it after identification
+        * to reach the same state as if resumed through runtime PM.
+        */
+       ret = mt9m114_power_on(sensor);
+       if (ret < 0) {
+               dev_err_probe(dev, ret, "Could not power on the device\n");
+               goto error_ep_free;
+       }
+
+       ret = mt9m114_identify(sensor);
+       if (ret < 0)
+               goto error_power_off;
+
+       ret = mt9m114_initialize(sensor);
+       if (ret < 0)
+               goto error_power_off;
+
+       /*
+        * Enable runtime PM with autosuspend. As the device has been powered
+        * manually, mark it as active, and increase the usage count without
+        * resuming the device.
+        */
+       pm_runtime_set_active(dev);
+       pm_runtime_get_noresume(dev);
+       pm_runtime_enable(dev);
+       pm_runtime_set_autosuspend_delay(dev, 1000);
+       pm_runtime_use_autosuspend(dev);
+
+       /* Initialize the subdevices. */
+       ret = mt9m114_pa_init(sensor);
+       if (ret < 0)
+               goto error_pm_cleanup;
+
+       ret = mt9m114_ifp_init(sensor);
+       if (ret < 0)
+               goto error_pa_cleanup;
+
+       ret = v4l2_async_register_subdev(&sensor->ifp.sd);
+       if (ret < 0)
+               goto error_ifp_cleanup;
+
+       /*
+        * Decrease the PM usage count. The device will get suspended after the
+        * autosuspend delay, turning the power off.
+        */
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_put_autosuspend(dev);
+
+       return 0;
+
+error_ifp_cleanup:
+       mt9m114_ifp_cleanup(sensor);
+error_pa_cleanup:
+       mt9m114_pa_cleanup(sensor);
+error_pm_cleanup:
+       pm_runtime_disable(dev);
+       pm_runtime_put_noidle(dev);
+error_power_off:
+       mt9m114_power_off(sensor);
+error_ep_free:
+       v4l2_fwnode_endpoint_free(&sensor->bus_cfg);
+       return ret;
+}
+
+static void mt9m114_remove(struct i2c_client *client)
+{
+       struct v4l2_subdev *sd = i2c_get_clientdata(client);
+       struct mt9m114 *sensor = ifp_to_mt9m114(sd);
+       struct device *dev = &client->dev;
+
+       v4l2_async_unregister_subdev(&sensor->ifp.sd);
+
+       mt9m114_ifp_cleanup(sensor);
+       mt9m114_pa_cleanup(sensor);
+       v4l2_fwnode_endpoint_free(&sensor->bus_cfg);
+
+       /*
+        * Disable runtime PM. In case runtime PM is disabled in the kernel,
+        * make sure to turn power off manually.
+        */
+       pm_runtime_disable(dev);
+       if (!pm_runtime_status_suspended(dev))
+               mt9m114_power_off(sensor);
+       pm_runtime_set_suspended(dev);
+}
+
+static const struct of_device_id mt9m114_of_ids[] = {
+       { .compatible = "onnn,mt9m114" },
+       { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mt9m114_of_ids);
+
+static struct i2c_driver mt9m114_driver = {
+       .driver = {
+               .name   = "mt9m114",
+               .pm     = &mt9m114_pm_ops,
+               .of_match_table = mt9m114_of_ids,
+       },
+       .probe          = mt9m114_probe,
+       .remove         = mt9m114_remove,
+};
+
+module_i2c_driver(mt9m114_driver);
+
+MODULE_DESCRIPTION("onsemi MT9M114 Sensor Driver");
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_LICENSE("GPL");
index 774861ba7747d5a7cdd32a7fa119acba290272d1..37a634b92cd56884d38a51fcc9914a8d7f8bfe2e 100644 (file)
@@ -49,9 +49,7 @@ MODULE_PARM_DESC(debug, "Debug level (0-2)");
 
 struct mt9v011 {
        struct v4l2_subdev sd;
-#ifdef CONFIG_MEDIA_CONTROLLER
        struct media_pad pad;
-#endif
        struct v4l2_ctrl_handler ctrls;
        unsigned width, height;
        unsigned xtal;
@@ -483,9 +481,7 @@ static int mt9v011_probe(struct i2c_client *c)
        u16 version;
        struct mt9v011 *core;
        struct v4l2_subdev *sd;
-#ifdef CONFIG_MEDIA_CONTROLLER
        int ret;
-#endif
 
        /* Check if the adapter supports the needed features */
        if (!i2c_check_functionality(c->adapter,
@@ -499,14 +495,12 @@ static int mt9v011_probe(struct i2c_client *c)
        sd = &core->sd;
        v4l2_i2c_subdev_init(sd, c, &mt9v011_ops);
 
-#ifdef CONFIG_MEDIA_CONTROLLER
        core->pad.flags = MEDIA_PAD_FL_SOURCE;
        sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
 
        ret = media_entity_pads_init(&sd->entity, 1, &core->pad);
        if (ret < 0)
                return ret;
-#endif
 
        /* Check if the sensor is really a MT9V011 */
        version = mt9v011_read(sd, R00_MT9V011_CHIP_VERSION);
index 00e7bc6e3235c669647ea912f076bf1a8f31a19e..1c6f6cea1204591f44100be91c3e5630ae467853 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
 #include <linux/log2.h>
+#include <linux/mod_devicetable.h>
 #include <linux/mutex.h>
 #include <linux/of.h>
 #include <linux/of_graph.h>
@@ -1046,7 +1047,6 @@ done:
 
 static int mt9v032_probe(struct i2c_client *client)
 {
-       const struct i2c_device_id *did = i2c_client_get_device_id(client);
        struct mt9v032_platform_data *pdata = mt9v032_get_pdata(client);
        struct mt9v032 *mt9v032;
        unsigned int i;
@@ -1076,7 +1076,7 @@ static int mt9v032_probe(struct i2c_client *client)
 
        mutex_init(&mt9v032->power_lock);
        mt9v032->pdata = pdata;
-       mt9v032->model = (const void *)did->driver_data;
+       mt9v032->model = i2c_get_match_data(client);
 
        v4l2_ctrl_handler_init(&mt9v032->ctrls, 11 +
                               ARRAY_SIZE(mt9v032_aegc_controls));
@@ -1272,29 +1272,27 @@ static const struct i2c_device_id mt9v032_id[] = {
        { "mt9v032m", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V032_MONO] },
        { "mt9v034", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V034_COLOR] },
        { "mt9v034m", (kernel_ulong_t)&mt9v032_models[MT9V032_MODEL_V034_MONO] },
-       { }
+       { /* Sentinel */ }
 };
 MODULE_DEVICE_TABLE(i2c, mt9v032_id);
 
-#if IS_ENABLED(CONFIG_OF)
 static const struct of_device_id mt9v032_of_match[] = {
-       { .compatible = "aptina,mt9v022" },
-       { .compatible = "aptina,mt9v022m" },
-       { .compatible = "aptina,mt9v024" },
-       { .compatible = "aptina,mt9v024m" },
-       { .compatible = "aptina,mt9v032" },
-       { .compatible = "aptina,mt9v032m" },
-       { .compatible = "aptina,mt9v034" },
-       { .compatible = "aptina,mt9v034m" },
+       { .compatible = "aptina,mt9v022", .data = &mt9v032_models[MT9V032_MODEL_V022_COLOR] },
+       { .compatible = "aptina,mt9v022m", .data = &mt9v032_models[MT9V032_MODEL_V022_MONO] },
+       { .compatible = "aptina,mt9v024", .data = &mt9v032_models[MT9V032_MODEL_V024_COLOR] },
+       { .compatible = "aptina,mt9v024m", .data = &mt9v032_models[MT9V032_MODEL_V024_MONO] },
+       { .compatible = "aptina,mt9v032", .data = &mt9v032_models[MT9V032_MODEL_V032_COLOR] },
+       { .compatible = "aptina,mt9v032m", .data = &mt9v032_models[MT9V032_MODEL_V032_MONO] },
+       { .compatible = "aptina,mt9v034", .data = &mt9v032_models[MT9V032_MODEL_V034_COLOR] },
+       { .compatible = "aptina,mt9v034m", .data = &mt9v032_models[MT9V032_MODEL_V034_MONO] },
        { /* Sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, mt9v032_of_match);
-#endif
 
 static struct i2c_driver mt9v032_driver = {
        .driver = {
                .name = "mt9v032",
-               .of_match_table = of_match_ptr(mt9v032_of_match),
+               .of_match_table = mt9v032_of_match,
        },
        .probe          = mt9v032_probe,
        .remove         = mt9v032_remove,
index 1f7edc0f5b1abe542ab26024e081521c8b169d67..f859b49e13bf37cea817ef880da3379fbd6d7b2c 100644 (file)
@@ -121,9 +121,7 @@ struct mt9v111_dev {
        u8 addr_space;
 
        struct v4l2_subdev sd;
-#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
        struct media_pad pad;
-#endif
 
        struct v4l2_ctrl *auto_awb;
        struct v4l2_ctrl *auto_exp;
@@ -797,11 +795,7 @@ static struct v4l2_mbus_framefmt *__mt9v111_get_pad_format(
 {
        switch (which) {
        case V4L2_SUBDEV_FORMAT_TRY:
-#if IS_ENABLED(CONFIG_VIDEO_V4L2_SUBDEV_API)
                return v4l2_subdev_get_try_format(&mt9v111->sd, sd_state, pad);
-#else
-               return &sd_state->pads->try_fmt;
-#endif
        case V4L2_SUBDEV_FORMAT_ACTIVE:
                return &mt9v111->fmt;
        default:
@@ -987,11 +981,9 @@ static const struct v4l2_subdev_ops mt9v111_ops = {
        .pad    = &mt9v111_pad_ops,
 };
 
-#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
 static const struct media_entity_operations mt9v111_subdev_entity_ops = {
        .link_validate = v4l2_subdev_link_validate,
 };
-#endif
 
 /* --- V4L2 ctrl --- */
 static int mt9v111_s_ctrl(struct v4l2_ctrl *ctrl)
@@ -1203,7 +1195,6 @@ static int mt9v111_probe(struct i2c_client *client)
 
        v4l2_i2c_subdev_init(&mt9v111->sd, client, &mt9v111_ops);
 
-#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
        mt9v111->sd.flags       |= V4L2_SUBDEV_FL_HAS_DEVNODE;
        mt9v111->sd.entity.ops  = &mt9v111_subdev_entity_ops;
        mt9v111->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
@@ -1212,7 +1203,6 @@ static int mt9v111_probe(struct i2c_client *client)
        ret = media_entity_pads_init(&mt9v111->sd.entity, 1, &mt9v111->pad);
        if (ret)
                goto error_free_entity;
-#endif
 
        ret = mt9v111_chip_probe(mt9v111);
        if (ret)
@@ -1225,9 +1215,7 @@ static int mt9v111_probe(struct i2c_client *client)
        return 0;
 
 error_free_entity:
-#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
        media_entity_cleanup(&mt9v111->sd.entity);
-#endif
 
 error_free_ctrls:
        v4l2_ctrl_handler_free(&mt9v111->ctrls);
@@ -1245,9 +1233,7 @@ static void mt9v111_remove(struct i2c_client *client)
 
        v4l2_async_unregister_subdev(sd);
 
-#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
        media_entity_cleanup(&sd->entity);
-#endif
 
        v4l2_ctrl_handler_free(&mt9v111->ctrls);
 
index 365ce56845836094a09b6fe1903aaa07b3bcf514..51378ba16a5d4d2cf4d9bf66f65b5008c85f4659 100644 (file)
@@ -434,9 +434,6 @@ struct og01a1b {
 
        /* To serialize asynchronus callbacks */
        struct mutex mutex;
-
-       /* Streaming on/off */
-       bool streaming;
 };
 
 static u64 to_pixel_rate(u32 f_index)
@@ -732,14 +729,10 @@ static int og01a1b_set_stream(struct v4l2_subdev *sd, int enable)
        struct i2c_client *client = v4l2_get_subdevdata(sd);
        int ret = 0;
 
-       if (og01a1b->streaming == enable)
-               return 0;
-
        mutex_lock(&og01a1b->mutex);
        if (enable) {
-               ret = pm_runtime_get_sync(&client->dev);
-               if (ret < 0) {
-                       pm_runtime_put_noidle(&client->dev);
+               ret = pm_runtime_resume_and_get(&client->dev);
+               if (ret) {
                        mutex_unlock(&og01a1b->mutex);
                        return ret;
                }
@@ -755,50 +748,11 @@ static int og01a1b_set_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_put(&client->dev);
        }
 
-       og01a1b->streaming = enable;
        mutex_unlock(&og01a1b->mutex);
 
        return ret;
 }
 
-static int __maybe_unused og01a1b_suspend(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct v4l2_subdev *sd = i2c_get_clientdata(client);
-       struct og01a1b *og01a1b = to_og01a1b(sd);
-
-       mutex_lock(&og01a1b->mutex);
-       if (og01a1b->streaming)
-               og01a1b_stop_streaming(og01a1b);
-
-       mutex_unlock(&og01a1b->mutex);
-
-       return 0;
-}
-
-static int __maybe_unused og01a1b_resume(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct v4l2_subdev *sd = i2c_get_clientdata(client);
-       struct og01a1b *og01a1b = to_og01a1b(sd);
-       int ret;
-
-       mutex_lock(&og01a1b->mutex);
-       if (og01a1b->streaming) {
-               ret = og01a1b_start_streaming(og01a1b);
-               if (ret) {
-                       og01a1b->streaming = false;
-                       og01a1b_stop_streaming(og01a1b);
-                       mutex_unlock(&og01a1b->mutex);
-                       return ret;
-               }
-       }
-
-       mutex_unlock(&og01a1b->mutex);
-
-       return 0;
-}
-
 static int og01a1b_set_format(struct v4l2_subdev *sd,
                              struct v4l2_subdev_state *sd_state,
                              struct v4l2_subdev_format *fmt)
@@ -1096,10 +1050,6 @@ probe_error_v4l2_ctrl_handler_free:
        return ret;
 }
 
-static const struct dev_pm_ops og01a1b_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(og01a1b_suspend, og01a1b_resume)
-};
-
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id og01a1b_acpi_ids[] = {
        {"OVTI01AC"},
@@ -1112,7 +1062,6 @@ MODULE_DEVICE_TABLE(acpi, og01a1b_acpi_ids);
 static struct i2c_driver og01a1b_i2c_driver = {
        .driver = {
                .name = "og01a1b",
-               .pm = &og01a1b_pm_ops,
                .acpi_match_table = ACPI_PTR(og01a1b_acpi_ids),
        },
        .probe = og01a1b_probe,
index 2b9e1b3a3bf4fcede7e5e58f7616bfcb395967a4..bbd5740d2280b6ff8a9c62254630d8fae01effe2 100644 (file)
@@ -287,9 +287,6 @@ struct ov01a10 {
        struct v4l2_ctrl *exposure;
 
        const struct ov01a10_mode *cur_mode;
-
-       /* streaming state */
-       bool streaming;
 };
 
 static inline struct ov01a10 *to_ov01a10(struct v4l2_subdev *subdev)
@@ -672,8 +669,6 @@ static int ov01a10_set_stream(struct v4l2_subdev *sd, int enable)
        int ret = 0;
 
        state = v4l2_subdev_lock_and_get_active_state(sd);
-       if (ov01a10->streaming == enable)
-               goto unlock;
 
        if (enable) {
                ret = pm_runtime_resume_and_get(&client->dev);
@@ -685,55 +680,12 @@ static int ov01a10_set_stream(struct v4l2_subdev *sd, int enable)
                        pm_runtime_put(&client->dev);
                        goto unlock;
                }
-
-               goto done;
-       }
-
-       ov01a10_stop_streaming(ov01a10);
-       pm_runtime_put(&client->dev);
-done:
-       ov01a10->streaming = enable;
-unlock:
-       v4l2_subdev_unlock_state(state);
-
-       return ret;
-}
-
-static int __maybe_unused ov01a10_suspend(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct v4l2_subdev *sd = i2c_get_clientdata(client);
-       struct ov01a10 *ov01a10 = to_ov01a10(sd);
-       struct v4l2_subdev_state *state;
-
-       state = v4l2_subdev_lock_and_get_active_state(sd);
-       if (ov01a10->streaming)
-               ov01a10_stop_streaming(ov01a10);
-
-       v4l2_subdev_unlock_state(state);
-
-       return 0;
-}
-
-static int __maybe_unused ov01a10_resume(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct v4l2_subdev *sd = i2c_get_clientdata(client);
-       struct ov01a10 *ov01a10 = to_ov01a10(sd);
-       struct v4l2_subdev_state *state;
-       int ret = 0;
-
-       state = v4l2_subdev_lock_and_get_active_state(sd);
-       if (!ov01a10->streaming)
-               goto exit;
-
-       ret = ov01a10_start_streaming(ov01a10);
-       if (ret) {
-               ov01a10->streaming = false;
+       } else {
                ov01a10_stop_streaming(ov01a10);
+               pm_runtime_put(&client->dev);
        }
 
-exit:
+unlock:
        v4l2_subdev_unlock_state(state);
 
        return ret;
@@ -973,10 +925,6 @@ err_handler_free:
        return ret;
 }
 
-static const struct dev_pm_ops ov01a10_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(ov01a10_suspend, ov01a10_resume)
-};
-
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id ov01a10_acpi_ids[] = {
        { "OVTI01A0" },
@@ -989,7 +937,6 @@ MODULE_DEVICE_TABLE(acpi, ov01a10_acpi_ids);
 static struct i2c_driver ov01a10_i2c_driver = {
        .driver = {
                .name = "ov01a10",
-               .pm = &ov01a10_pm_ops,
                .acpi_match_table = ACPI_PTR(ov01a10_acpi_ids),
        },
        .probe = ov01a10_probe,
index 741d977a76f32fd0a1dd259f36dd03eaf8da9771..848e47a464acf48d551484eade0fae3f2b32bf4a 100644 (file)
@@ -570,8 +570,6 @@ unlock_and_return:
 }
 
 static const struct dev_pm_ops ov02a10_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
-                               pm_runtime_force_resume)
        SET_RUNTIME_PM_OPS(ov02a10_power_off, ov02a10_power_on, NULL)
 };
 
index 7d55d4ca24de188b2233e8787e371ed7b34e4de9..3d49e3fa8e5621c93fb253102acbdbab2d8fea16 100644 (file)
@@ -536,9 +536,6 @@ struct ov08d10 {
        /* To serialize asynchronus callbacks */
        struct mutex mutex;
 
-       /* Streaming on/off */
-       bool streaming;
-
        /* lanes index */
        u8 nlanes;
 
@@ -1103,9 +1100,6 @@ static int ov08d10_set_stream(struct v4l2_subdev *sd, int enable)
        struct i2c_client *client = v4l2_get_subdevdata(sd);
        int ret = 0;
 
-       if (ov08d10->streaming == enable)
-               return 0;
-
        mutex_lock(&ov08d10->mutex);
        if (enable) {
                ret = pm_runtime_resume_and_get(&client->dev);
@@ -1125,8 +1119,6 @@ static int ov08d10_set_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_put(&client->dev);
        }
 
-       ov08d10->streaming = enable;
-
        /* vflip and hflip cannot change during streaming */
        __v4l2_ctrl_grab(ov08d10->vflip, enable);
        __v4l2_ctrl_grab(ov08d10->hflip, enable);
@@ -1136,45 +1128,6 @@ static int ov08d10_set_stream(struct v4l2_subdev *sd, int enable)
        return ret;
 }
 
-static int __maybe_unused ov08d10_suspend(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct v4l2_subdev *sd = i2c_get_clientdata(client);
-       struct ov08d10 *ov08d10 = to_ov08d10(sd);
-
-       mutex_lock(&ov08d10->mutex);
-       if (ov08d10->streaming)
-               ov08d10_stop_streaming(ov08d10);
-
-       mutex_unlock(&ov08d10->mutex);
-
-       return 0;
-}
-
-static int __maybe_unused ov08d10_resume(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct v4l2_subdev *sd = i2c_get_clientdata(client);
-       struct ov08d10 *ov08d10 = to_ov08d10(sd);
-       int ret;
-
-       mutex_lock(&ov08d10->mutex);
-
-       if (ov08d10->streaming) {
-               ret = ov08d10_start_streaming(ov08d10);
-               if (ret) {
-                       ov08d10->streaming = false;
-                       ov08d10_stop_streaming(ov08d10);
-                       mutex_unlock(&ov08d10->mutex);
-                       return ret;
-               }
-       }
-
-       mutex_unlock(&ov08d10->mutex);
-
-       return 0;
-}
-
 static int ov08d10_set_format(struct v4l2_subdev *sd,
                              struct v4l2_subdev_state *sd_state,
                              struct v4l2_subdev_format *fmt)
@@ -1501,10 +1454,6 @@ probe_error_v4l2_ctrl_handler_free:
        return ret;
 }
 
-static const struct dev_pm_ops ov08d10_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(ov08d10_suspend, ov08d10_resume)
-};
-
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id ov08d10_acpi_ids[] = {
        { "OVTI08D1" },
@@ -1517,7 +1466,6 @@ MODULE_DEVICE_TABLE(acpi, ov08d10_acpi_ids);
 static struct i2c_driver ov08d10_i2c_driver = {
        .driver = {
                .name = "ov08d10",
-               .pm = &ov08d10_pm_ops,
                .acpi_match_table = ACPI_PTR(ov08d10_acpi_ids),
        },
        .probe = ov08d10_probe,
index 637da4df69011d22c31e0a9356f6ce9a73c3811a..b41b6866a0ab2cde2aa601d8c38fc24040a39643 100644 (file)
@@ -2432,9 +2432,6 @@ struct ov08x40 {
 
        /* Mutex for serialized access */
        struct mutex mutex;
-
-       /* Streaming on/off */
-       bool streaming;
 };
 
 #define to_ov08x40(_sd)        container_of(_sd, struct ov08x40, sd)
@@ -2915,10 +2912,6 @@ static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
        int ret = 0;
 
        mutex_lock(&ov08x->mutex);
-       if (ov08x->streaming == enable) {
-               mutex_unlock(&ov08x->mutex);
-               return 0;
-       }
 
        if (enable) {
                ret = pm_runtime_resume_and_get(&client->dev);
@@ -2937,7 +2930,6 @@ static int ov08x40_set_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_put(&client->dev);
        }
 
-       ov08x->streaming = enable;
        mutex_unlock(&ov08x->mutex);
 
        return ret;
@@ -2950,37 +2942,6 @@ err_unlock:
        return ret;
 }
 
-static int __maybe_unused ov08x40_suspend(struct device *dev)
-{
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct ov08x40 *ov08x = to_ov08x40(sd);
-
-       if (ov08x->streaming)
-               ov08x40_stop_streaming(ov08x);
-
-       return 0;
-}
-
-static int __maybe_unused ov08x40_resume(struct device *dev)
-{
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct ov08x40 *ov08x = to_ov08x40(sd);
-       int ret;
-
-       if (ov08x->streaming) {
-               ret = ov08x40_start_streaming(ov08x);
-               if (ret)
-                       goto error;
-       }
-
-       return 0;
-
-error:
-       ov08x40_stop_streaming(ov08x);
-       ov08x->streaming = false;
-       return ret;
-}
-
 /* Verify chip ID */
 static int ov08x40_identify_module(struct ov08x40 *ov08x)
 {
@@ -3294,10 +3255,6 @@ static void ov08x40_remove(struct i2c_client *client)
        pm_runtime_set_suspended(&client->dev);
 }
 
-static const struct dev_pm_ops ov08x40_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(ov08x40_suspend, ov08x40_resume)
-};
-
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id ov08x40_acpi_ids[] = {
        {"OVTI08F4"},
@@ -3310,7 +3267,6 @@ MODULE_DEVICE_TABLE(acpi, ov08x40_acpi_ids);
 static struct i2c_driver ov08x40_i2c_driver = {
        .driver = {
                .name = "ov08x40",
-               .pm = &ov08x40_pm_ops,
                .acpi_match_table = ACPI_PTR(ov08x40_acpi_ids),
        },
        .probe = ov08x40_probe,
index 35652b362347265368738ca2aa932c2f82dfa08f..4c419014dd7b28b6ce60097263bc0b7b45323f96 100644 (file)
@@ -1044,9 +1044,6 @@ struct ov13858 {
 
        /* Mutex for serialized access */
        struct mutex mutex;
-
-       /* Streaming on/off */
-       bool streaming;
 };
 
 #define to_ov13858(_sd)        container_of(_sd, struct ov13858, sd)
@@ -1467,10 +1464,6 @@ static int ov13858_set_stream(struct v4l2_subdev *sd, int enable)
        int ret = 0;
 
        mutex_lock(&ov13858->mutex);
-       if (ov13858->streaming == enable) {
-               mutex_unlock(&ov13858->mutex);
-               return 0;
-       }
 
        if (enable) {
                ret = pm_runtime_resume_and_get(&client->dev);
@@ -1489,7 +1482,6 @@ static int ov13858_set_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_put(&client->dev);
        }
 
-       ov13858->streaming = enable;
        mutex_unlock(&ov13858->mutex);
 
        return ret;
@@ -1502,37 +1494,6 @@ err_unlock:
        return ret;
 }
 
-static int __maybe_unused ov13858_suspend(struct device *dev)
-{
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct ov13858 *ov13858 = to_ov13858(sd);
-
-       if (ov13858->streaming)
-               ov13858_stop_streaming(ov13858);
-
-       return 0;
-}
-
-static int __maybe_unused ov13858_resume(struct device *dev)
-{
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct ov13858 *ov13858 = to_ov13858(sd);
-       int ret;
-
-       if (ov13858->streaming) {
-               ret = ov13858_start_streaming(ov13858);
-               if (ret)
-                       goto error;
-       }
-
-       return 0;
-
-error:
-       ov13858_stop_streaming(ov13858);
-       ov13858->streaming = false;
-       return ret;
-}
-
 /* Verify chip ID */
 static int ov13858_identify_module(struct ov13858 *ov13858)
 {
@@ -1787,10 +1748,6 @@ static const struct i2c_device_id ov13858_id_table[] = {
 
 MODULE_DEVICE_TABLE(i2c, ov13858_id_table);
 
-static const struct dev_pm_ops ov13858_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(ov13858_suspend, ov13858_resume)
-};
-
 #ifdef CONFIG_ACPI
 static const struct acpi_device_id ov13858_acpi_ids[] = {
        {"OVTID858"},
@@ -1803,7 +1760,6 @@ MODULE_DEVICE_TABLE(acpi, ov13858_acpi_ids);
 static struct i2c_driver ov13858_i2c_driver = {
        .driver = {
                .name = "ov13858",
-               .pm = &ov13858_pm_ops,
                .acpi_match_table = ACPI_PTR(ov13858_acpi_ids),
        },
        .probe = ov13858_probe,
index dbc642c5995b62e3cd190cd1969b5738f5559fbe..970d2caeb3d62f675347b734cfe7c499636576b1 100644 (file)
@@ -31,6 +31,7 @@
 #define OV13B10_REG_VTS                        0x380e
 #define OV13B10_VTS_30FPS              0x0c7c
 #define OV13B10_VTS_60FPS              0x063e
+#define OV13B10_VTS_120FPS             0x0320
 #define OV13B10_VTS_MAX                        0x7fff
 
 /* HBLANK control - read only */
@@ -468,6 +469,50 @@ static const struct ov13b10_reg mode_2080x1170_regs[] = {
        {0x5001, 0x0d},
 };
 
+static const struct ov13b10_reg mode_1364x768_120fps_regs[] = {
+       {0x0305, 0xaf},
+       {0x3011, 0x7c},
+       {0x3501, 0x03},
+       {0x3502, 0x00},
+       {0x3662, 0x88},
+       {0x3714, 0x28},
+       {0x3739, 0x10},
+       {0x37c2, 0x14},
+       {0x37d9, 0x06},
+       {0x37e2, 0x0c},
+       {0x37e4, 0x00},
+       {0x3800, 0x02},
+       {0x3801, 0xe4},
+       {0x3802, 0x03},
+       {0x3803, 0x48},
+       {0x3804, 0x0d},
+       {0x3805, 0xab},
+       {0x3806, 0x09},
+       {0x3807, 0x60},
+       {0x3808, 0x05},
+       {0x3809, 0x54},
+       {0x380a, 0x03},
+       {0x380b, 0x00},
+       {0x380c, 0x04},
+       {0x380d, 0x8e},
+       {0x380e, 0x03},
+       {0x380f, 0x20},
+       {0x3811, 0x07},
+       {0x3813, 0x07},
+       {0x3814, 0x03},
+       {0x3816, 0x03},
+       {0x3820, 0x8b},
+       {0x3c8c, 0x18},
+       {0x4008, 0x00},
+       {0x4009, 0x05},
+       {0x4050, 0x00},
+       {0x4051, 0x05},
+       {0x4501, 0x08},
+       {0x4505, 0x04},
+       {0x5000, 0xfd},
+       {0x5001, 0x0d},
+};
+
 static const char * const ov13b10_test_pattern_menu[] = {
        "Disabled",
        "Vertical Color Bar Type 1",
@@ -568,7 +613,18 @@ static const struct ov13b10_mode supported_modes[] = {
                        .regs = mode_2080x1170_regs,
                },
                .link_freq_index = OV13B10_LINK_FREQ_INDEX_0,
-       }
+       },
+       {
+               .width = 1364,
+               .height = 768,
+               .vts_def = OV13B10_VTS_120FPS,
+               .vts_min = OV13B10_VTS_120FPS,
+               .link_freq_index = OV13B10_LINK_FREQ_INDEX_0,
+               .reg_list = {
+                       .num_of_regs = ARRAY_SIZE(mode_1364x768_120fps_regs),
+                       .regs = mode_1364x768_120fps_regs,
+               },
+       },
 };
 
 struct ov13b10 {
@@ -594,9 +650,6 @@ struct ov13b10 {
        /* Mutex for serialized access */
        struct mutex mutex;
 
-       /* Streaming on/off */
-       bool streaming;
-
        /* True if the device has been identified */
        bool identified;
 };
@@ -1161,10 +1214,6 @@ static int ov13b10_set_stream(struct v4l2_subdev *sd, int enable)
        int ret = 0;
 
        mutex_lock(&ov13b->mutex);
-       if (ov13b->streaming == enable) {
-               mutex_unlock(&ov13b->mutex);
-               return 0;
-       }
 
        if (enable) {
                ret = pm_runtime_resume_and_get(&client->dev);
@@ -1183,7 +1232,6 @@ static int ov13b10_set_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_put(&client->dev);
        }
 
-       ov13b->streaming = enable;
        mutex_unlock(&ov13b->mutex);
 
        return ret;
@@ -1198,12 +1246,6 @@ err_unlock:
 
 static int ov13b10_suspend(struct device *dev)
 {
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct ov13b10 *ov13b = to_ov13b10(sd);
-
-       if (ov13b->streaming)
-               ov13b10_stop_streaming(ov13b);
-
        ov13b10_power_off(dev);
 
        return 0;
@@ -1211,29 +1253,7 @@ static int ov13b10_suspend(struct device *dev)
 
 static int ov13b10_resume(struct device *dev)
 {
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct ov13b10 *ov13b = to_ov13b10(sd);
-       int ret;
-
-       ret = ov13b10_power_on(dev);
-       if (ret)
-               goto pm_fail;
-
-       if (ov13b->streaming) {
-               ret = ov13b10_start_streaming(ov13b);
-               if (ret)
-                       goto stop_streaming;
-       }
-
-       return 0;
-
-stop_streaming:
-       ov13b10_stop_streaming(ov13b);
-       ov13b10_power_off(dev);
-pm_fail:
-       ov13b->streaming = false;
-
-       return ret;
+       return ov13b10_power_on(dev);
 }
 
 static const struct v4l2_subdev_video_ops ov13b10_video_ops = {
@@ -1501,7 +1521,7 @@ static int ov13b10_probe(struct i2c_client *client)
 
        full_power = acpi_dev_state_d0(&client->dev);
        if (full_power) {
-               ov13b10_power_on(&client->dev);
+               ret = ov13b10_power_on(&client->dev);
                if (ret) {
                        dev_err(&client->dev, "failed to power on\n");
                        return ret;
index bb6c9863a5460f6408aaf6b66382abfaa947d9a4..28a01c6eff64f278717426031a3afa849ea952f4 100644 (file)
@@ -293,9 +293,7 @@ struct ov2640_win_size {
 
 struct ov2640_priv {
        struct v4l2_subdev              subdev;
-#if defined(CONFIG_MEDIA_CONTROLLER)
        struct media_pad pad;
-#endif
        struct v4l2_ctrl_handler        hdl;
        u32     cfmt_code;
        struct clk                      *clk;
@@ -922,13 +920,9 @@ static int ov2640_get_fmt(struct v4l2_subdev *sd,
                return -EINVAL;
 
        if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
                mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
                format->format = *mf;
                return 0;
-#else
-               return -EINVAL;
-#endif
        }
 
        mf->width       = priv->win->width;
@@ -1005,7 +999,6 @@ out:
 static int ov2640_init_cfg(struct v4l2_subdev *sd,
                           struct v4l2_subdev_state *sd_state)
 {
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
        struct v4l2_mbus_framefmt *try_fmt =
                v4l2_subdev_get_try_format(sd, sd_state, 0);
        const struct ov2640_win_size *win =
@@ -1019,7 +1012,7 @@ static int ov2640_init_cfg(struct v4l2_subdev *sd,
        try_fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
        try_fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
        try_fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
-#endif
+
        return 0;
 }
 
@@ -1205,17 +1198,14 @@ static int ov2640_probe(struct i2c_client *client)
                return -ENOMEM;
 
        if (client->dev.of_node) {
-               priv->clk = devm_clk_get(&client->dev, "xvclk");
+               priv->clk = devm_clk_get_enabled(&client->dev, "xvclk");
                if (IS_ERR(priv->clk))
                        return PTR_ERR(priv->clk);
-               ret = clk_prepare_enable(priv->clk);
-               if (ret)
-                       return ret;
        }
 
        ret = ov2640_probe_dt(client, priv);
        if (ret)
-               goto err_clk;
+               return ret;
 
        priv->win = ov2640_select_win(SVGA_WIDTH, SVGA_HEIGHT);
        priv->cfmt_code = MEDIA_BUS_FMT_UYVY8_2X8;
@@ -1239,13 +1229,11 @@ static int ov2640_probe(struct i2c_client *client)
                ret = priv->hdl.error;
                goto err_hdl;
        }
-#if defined(CONFIG_MEDIA_CONTROLLER)
        priv->pad.flags = MEDIA_PAD_FL_SOURCE;
        priv->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR;
        ret = media_entity_pads_init(&priv->subdev.entity, 1, &priv->pad);
        if (ret < 0)
                goto err_hdl;
-#endif
 
        ret = ov2640_video_probe(client);
        if (ret < 0)
@@ -1264,8 +1252,6 @@ err_videoprobe:
 err_hdl:
        v4l2_ctrl_handler_free(&priv->hdl);
        mutex_destroy(&priv->lock);
-err_clk:
-       clk_disable_unprepare(priv->clk);
        return ret;
 }
 
@@ -1278,7 +1264,6 @@ static void ov2640_remove(struct i2c_client *client)
        mutex_destroy(&priv->lock);
        media_entity_cleanup(&priv->subdev.entity);
        v4l2_device_unregister_subdev(&priv->subdev);
-       clk_disable_unprepare(priv->clk);
 }
 
 static const struct i2c_device_id ov2640_id[] = {
index 5429bd2eb05318fa70f18054a745f4f655910393..2c3dbe164eb69604ec0676c5a4663b179543d8c9 100644 (file)
@@ -1031,7 +1031,6 @@ static int ov2659_get_fmt(struct v4l2_subdev *sd,
        dev_dbg(&client->dev, "ov2659_get_fmt\n");
 
        if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
                struct v4l2_mbus_framefmt *mf;
 
                mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
@@ -1039,9 +1038,6 @@ static int ov2659_get_fmt(struct v4l2_subdev *sd,
                fmt->format = *mf;
                mutex_unlock(&ov2659->lock);
                return 0;
-#else
-               return -EINVAL;
-#endif
        }
 
        mutex_lock(&ov2659->lock);
@@ -1113,10 +1109,8 @@ static int ov2659_set_fmt(struct v4l2_subdev *sd,
        mutex_lock(&ov2659->lock);
 
        if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
                mf = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
                *mf = fmt->format;
-#endif
        } else {
                s64 val;
 
@@ -1306,7 +1300,6 @@ static int ov2659_power_on(struct device *dev)
  * V4L2 subdev internal operations
  */
 
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
 static int ov2659_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
 {
        struct i2c_client *client = v4l2_get_subdevdata(sd);
@@ -1319,7 +1312,6 @@ static int ov2659_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
 
        return 0;
 }
-#endif
 
 static const struct v4l2_subdev_core_ops ov2659_subdev_core_ops = {
        .log_status = v4l2_ctrl_subdev_log_status,
@@ -1338,7 +1330,6 @@ static const struct v4l2_subdev_pad_ops ov2659_subdev_pad_ops = {
        .set_fmt = ov2659_set_fmt,
 };
 
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
 static const struct v4l2_subdev_ops ov2659_subdev_ops = {
        .core  = &ov2659_subdev_core_ops,
        .video = &ov2659_subdev_video_ops,
@@ -1348,7 +1339,6 @@ static const struct v4l2_subdev_ops ov2659_subdev_ops = {
 static const struct v4l2_subdev_internal_ops ov2659_subdev_internal_ops = {
        .open = ov2659_open,
 };
-#endif
 
 static int ov2659_detect(struct v4l2_subdev *sd)
 {
@@ -1489,15 +1479,12 @@ static int ov2659_probe(struct i2c_client *client)
 
        sd = &ov2659->sd;
        client->flags |= I2C_CLIENT_SCCB;
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
-       v4l2_i2c_subdev_init(sd, client, &ov2659_subdev_ops);
 
+       v4l2_i2c_subdev_init(sd, client, &ov2659_subdev_ops);
        sd->internal_ops = &ov2659_subdev_internal_ops;
        sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
                     V4L2_SUBDEV_FL_HAS_EVENTS;
-#endif
 
-#if defined(CONFIG_MEDIA_CONTROLLER)
        ov2659->pad.flags = MEDIA_PAD_FL_SOURCE;
        sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
        ret = media_entity_pads_init(&sd->entity, 1, &ov2659->pad);
@@ -1505,7 +1492,6 @@ static int ov2659_probe(struct i2c_client *client)
                v4l2_ctrl_handler_free(&ov2659->ctrls);
                return ret;
        }
-#endif
 
        mutex_init(&ov2659->lock);
 
index 303793e1f97d647b8ad024e10882a893c12efe0c..396583826ae91ac9fa8c712ed7bff9dc12767811 100644 (file)
@@ -91,7 +91,6 @@ struct ov2685 {
        struct gpio_desc        *reset_gpio;
        struct regulator_bulk_data supplies[OV2685_NUM_SUPPLIES];
 
-       bool                    streaming;
        struct mutex            mutex;
        struct v4l2_subdev      subdev;
        struct media_pad        pad;
@@ -513,10 +512,6 @@ static int ov2685_s_stream(struct v4l2_subdev *sd, int on)
 
        mutex_lock(&ov2685->mutex);
 
-       on = !!on;
-       if (on == ov2685->streaming)
-               goto unlock_and_return;
-
        if (on) {
                ret = pm_runtime_resume_and_get(&ov2685->client->dev);
                if (ret < 0)
@@ -539,15 +534,12 @@ static int ov2685_s_stream(struct v4l2_subdev *sd, int on)
                pm_runtime_put(&ov2685->client->dev);
        }
 
-       ov2685->streaming = on;
-
 unlock_and_return:
        mutex_unlock(&ov2685->mutex);
 
        return ret;
 }
 
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
 static int ov2685_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
 {
        struct ov2685 *ov2685 = to_ov2685(sd);
@@ -563,7 +555,6 @@ static int ov2685_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
 
        return 0;
 }
-#endif
 
 static int __maybe_unused ov2685_runtime_resume(struct device *dev)
 {
@@ -660,11 +651,9 @@ static const struct v4l2_subdev_ops ov2685_subdev_ops = {
        .pad    = &ov2685_pad_ops,
 };
 
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
 static const struct v4l2_subdev_internal_ops ov2685_internal_ops = {
        .open = ov2685_open,
 };
-#endif
 
 static const struct v4l2_ctrl_ops ov2685_ctrl_ops = {
        .s_ctrl = ov2685_set_ctrl,
@@ -833,17 +822,13 @@ static int ov2685_probe(struct i2c_client *client)
        if (ret)
                goto err_power_off;
 
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
        ov2685->subdev.internal_ops = &ov2685_internal_ops;
        ov2685->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-#endif
-#if defined(CONFIG_MEDIA_CONTROLLER)
        ov2685->pad.flags = MEDIA_PAD_FL_SOURCE;
        ov2685->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR;
        ret = media_entity_pads_init(&ov2685->subdev.entity, 1, &ov2685->pad);
        if (ret < 0)
                goto err_power_off;
-#endif
 
        ret = v4l2_async_register_subdev(&ov2685->subdev);
        if (ret) {
@@ -858,9 +843,7 @@ static int ov2685_probe(struct i2c_client *client)
        return 0;
 
 err_clean_entity:
-#if defined(CONFIG_MEDIA_CONTROLLER)
        media_entity_cleanup(&ov2685->subdev.entity);
-#endif
 err_power_off:
        __ov2685_power_off(ov2685);
 err_free_handler:
@@ -877,9 +860,7 @@ static void ov2685_remove(struct i2c_client *client)
        struct ov2685 *ov2685 = to_ov2685(sd);
 
        v4l2_async_unregister_subdev(sd);
-#if defined(CONFIG_MEDIA_CONTROLLER)
        media_entity_cleanup(&sd->entity);
-#endif
        v4l2_ctrl_handler_free(&ov2685->ctrl_handler);
        mutex_destroy(&ov2685->mutex);
 
index 41d4f85470fd21056ebe36efcbb5a9fbcc02211b..24e468485fbf01d80da32dc8345abd52f4440f45 100644 (file)
@@ -336,12 +336,6 @@ struct ov2740 {
        /* Current mode */
        const struct ov2740_mode *cur_mode;
 
-       /* To serialize asynchronus callbacks */
-       struct mutex mutex;
-
-       /* Streaming on/off */
-       bool streaming;
-
        /* NVM data inforamtion */
        struct nvm_data *nvm;
 
@@ -582,7 +576,6 @@ static int ov2740_init_controls(struct ov2740 *ov2740)
        if (ret)
                return ret;
 
-       ctrl_hdlr->lock = &ov2740->mutex;
        cur_mode = ov2740->cur_mode;
        size = ARRAY_SIZE(link_freq_menu_items);
 
@@ -792,18 +785,15 @@ static int ov2740_set_stream(struct v4l2_subdev *sd, int enable)
 {
        struct ov2740 *ov2740 = to_ov2740(sd);
        struct i2c_client *client = v4l2_get_subdevdata(sd);
+       struct v4l2_subdev_state *sd_state;
        int ret = 0;
 
-       if (ov2740->streaming == enable)
-               return 0;
+       sd_state = v4l2_subdev_lock_and_get_active_state(&ov2740->sd);
 
-       mutex_lock(&ov2740->mutex);
        if (enable) {
                ret = pm_runtime_resume_and_get(&client->dev);
-               if (ret < 0) {
-                       mutex_unlock(&ov2740->mutex);
-                       return ret;
-               }
+               if (ret < 0)
+                       goto out_unlock;
 
                ret = ov2740_start_streaming(ov2740);
                if (ret) {
@@ -816,44 +806,9 @@ static int ov2740_set_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_put(&client->dev);
        }
 
-       ov2740->streaming = enable;
-       mutex_unlock(&ov2740->mutex);
-
-       return ret;
-}
-
-static int ov2740_suspend(struct device *dev)
-{
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct ov2740 *ov2740 = to_ov2740(sd);
-
-       mutex_lock(&ov2740->mutex);
-       if (ov2740->streaming)
-               ov2740_stop_streaming(ov2740);
-
-       mutex_unlock(&ov2740->mutex);
-
-       return 0;
-}
-
-static int ov2740_resume(struct device *dev)
-{
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct ov2740 *ov2740 = to_ov2740(sd);
-       int ret = 0;
-
-       mutex_lock(&ov2740->mutex);
-       if (!ov2740->streaming)
-               goto exit;
-
-       ret = ov2740_start_streaming(ov2740);
-       if (ret) {
-               ov2740->streaming = false;
-               ov2740_stop_streaming(ov2740);
-       }
+out_unlock:
+       v4l2_subdev_unlock_state(sd_state);
 
-exit:
-       mutex_unlock(&ov2740->mutex);
        return ret;
 }
 
@@ -870,48 +825,26 @@ static int ov2740_set_format(struct v4l2_subdev *sd,
                                      height, fmt->format.width,
                                      fmt->format.height);
 
-       mutex_lock(&ov2740->mutex);
        ov2740_update_pad_format(mode, &fmt->format);
-       if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
-               *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
-       } else {
-               ov2740->cur_mode = mode;
-               __v4l2_ctrl_s_ctrl(ov2740->link_freq, mode->link_freq_index);
-               __v4l2_ctrl_s_ctrl_int64(ov2740->pixel_rate,
-                                        to_pixel_rate(mode->link_freq_index));
-
-               /* Update limits and set FPS to default */
-               vblank_def = mode->vts_def - mode->height;
-               __v4l2_ctrl_modify_range(ov2740->vblank,
-                                        mode->vts_min - mode->height,
-                                        OV2740_VTS_MAX - mode->height, 1,
-                                        vblank_def);
-               __v4l2_ctrl_s_ctrl(ov2740->vblank, vblank_def);
-               h_blank = to_pixels_per_line(mode->hts, mode->link_freq_index) -
-                         mode->width;
-               __v4l2_ctrl_modify_range(ov2740->hblank, h_blank, h_blank, 1,
-                                        h_blank);
-       }
-       mutex_unlock(&ov2740->mutex);
-
-       return 0;
-}
-
-static int ov2740_get_format(struct v4l2_subdev *sd,
-                            struct v4l2_subdev_state *sd_state,
-                            struct v4l2_subdev_format *fmt)
-{
-       struct ov2740 *ov2740 = to_ov2740(sd);
+       *v4l2_subdev_get_pad_format(sd, sd_state, fmt->pad) = fmt->format;
 
-       mutex_lock(&ov2740->mutex);
        if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
-               fmt->format = *v4l2_subdev_get_try_format(&ov2740->sd,
-                                                         sd_state,
-                                                         fmt->pad);
-       else
-               ov2740_update_pad_format(ov2740->cur_mode, &fmt->format);
+               return 0;
 
-       mutex_unlock(&ov2740->mutex);
+       ov2740->cur_mode = mode;
+       __v4l2_ctrl_s_ctrl(ov2740->link_freq, mode->link_freq_index);
+       __v4l2_ctrl_s_ctrl_int64(ov2740->pixel_rate,
+                                to_pixel_rate(mode->link_freq_index));
+
+       /* Update limits and set FPS to default */
+       vblank_def = mode->vts_def - mode->height;
+       __v4l2_ctrl_modify_range(ov2740->vblank,
+                                mode->vts_min - mode->height,
+                                OV2740_VTS_MAX - mode->height, 1, vblank_def);
+       __v4l2_ctrl_s_ctrl(ov2740->vblank, vblank_def);
+       h_blank = to_pixels_per_line(mode->hts, mode->link_freq_index) -
+               mode->width;
+       __v4l2_ctrl_modify_range(ov2740->hblank, h_blank, h_blank, 1, h_blank);
 
        return 0;
 }
@@ -946,14 +879,11 @@ static int ov2740_enum_frame_size(struct v4l2_subdev *sd,
        return 0;
 }
 
-static int ov2740_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+static int ov2740_init_cfg(struct v4l2_subdev *sd,
+                          struct v4l2_subdev_state *sd_state)
 {
-       struct ov2740 *ov2740 = to_ov2740(sd);
-
-       mutex_lock(&ov2740->mutex);
        ov2740_update_pad_format(&supported_modes[0],
-                                v4l2_subdev_get_try_format(sd, fh->state, 0));
-       mutex_unlock(&ov2740->mutex);
+                                v4l2_subdev_get_pad_format(sd, sd_state, 0));
 
        return 0;
 }
@@ -963,10 +893,11 @@ static const struct v4l2_subdev_video_ops ov2740_video_ops = {
 };
 
 static const struct v4l2_subdev_pad_ops ov2740_pad_ops = {
+       .get_fmt = v4l2_subdev_get_fmt,
        .set_fmt = ov2740_set_format,
-       .get_fmt = ov2740_get_format,
        .enum_mbus_code = ov2740_enum_mbus_code,
        .enum_frame_size = ov2740_enum_frame_size,
+       .init_cfg = ov2740_init_cfg,
 };
 
 static const struct v4l2_subdev_ops ov2740_subdev_ops = {
@@ -978,10 +909,6 @@ static const struct media_entity_operations ov2740_subdev_entity_ops = {
        .link_validate = v4l2_subdev_link_validate,
 };
 
-static const struct v4l2_subdev_internal_ops ov2740_internal_ops = {
-       .open = ov2740_open,
-};
-
 static int ov2740_check_hwcfg(struct device *dev)
 {
        struct fwnode_handle *ep;
@@ -1004,7 +931,7 @@ static int ov2740_check_hwcfg(struct device *dev)
 
        ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
        if (!ep)
-               return -ENXIO;
+               return -EPROBE_DEFER;
 
        ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
        fwnode_handle_put(ep);
@@ -1047,13 +974,12 @@ check_hwcfg_error:
 static void ov2740_remove(struct i2c_client *client)
 {
        struct v4l2_subdev *sd = i2c_get_clientdata(client);
-       struct ov2740 *ov2740 = to_ov2740(sd);
 
        v4l2_async_unregister_subdev(sd);
        media_entity_cleanup(&sd->entity);
+       v4l2_subdev_cleanup(sd);
        v4l2_ctrl_handler_free(sd->ctrl_handler);
        pm_runtime_disable(&client->dev);
-       mutex_destroy(&ov2740->mutex);
 }
 
 static int ov2740_nvmem_read(void *priv, unsigned int off, void *val,
@@ -1062,9 +988,11 @@ static int ov2740_nvmem_read(void *priv, unsigned int off, void *val,
        struct nvm_data *nvm = priv;
        struct device *dev = regmap_get_device(nvm->regmap);
        struct ov2740 *ov2740 = to_ov2740(dev_get_drvdata(dev));
+       struct v4l2_subdev_state *sd_state;
        int ret = 0;
 
-       mutex_lock(&ov2740->mutex);
+       /* Serialise sensor access */
+       sd_state = v4l2_subdev_lock_and_get_active_state(&ov2740->sd);
 
        if (nvm->nvm_buffer) {
                memcpy(val, nvm->nvm_buffer + off, count);
@@ -1082,7 +1010,7 @@ static int ov2740_nvmem_read(void *priv, unsigned int off, void *val,
 
        pm_runtime_put(dev);
 exit:
-       mutex_unlock(&ov2740->mutex);
+       v4l2_subdev_unlock_state(sd_state);
        return ret;
 }
 
@@ -1153,7 +1081,6 @@ static int ov2740_probe(struct i2c_client *client)
                        return dev_err_probe(dev, ret, "failed to find sensor\n");
        }
 
-       mutex_init(&ov2740->mutex);
        ov2740->cur_mode = &supported_modes[0];
        ret = ov2740_init_controls(ov2740);
        if (ret) {
@@ -1161,7 +1088,7 @@ static int ov2740_probe(struct i2c_client *client)
                goto probe_error_v4l2_ctrl_handler_free;
        }
 
-       ov2740->sd.internal_ops = &ov2740_internal_ops;
+       ov2740->sd.state_lock = ov2740->ctrl_handler.lock;
        ov2740->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
        ov2740->sd.entity.ops = &ov2740_subdev_entity_ops;
        ov2740->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
@@ -1172,36 +1099,42 @@ static int ov2740_probe(struct i2c_client *client)
                goto probe_error_v4l2_ctrl_handler_free;
        }
 
+       ret = v4l2_subdev_init_finalize(&ov2740->sd);
+       if (ret)
+               goto probe_error_media_entity_cleanup;
+
+       /* Set the device's state to active if it's in D0 state. */
+       if (full_power)
+               pm_runtime_set_active(&client->dev);
+       pm_runtime_enable(&client->dev);
+       pm_runtime_idle(&client->dev);
+
        ret = v4l2_async_register_subdev_sensor(&ov2740->sd);
        if (ret < 0) {
                dev_err_probe(dev, ret, "failed to register V4L2 subdev\n");
-               goto probe_error_media_entity_cleanup;
+               goto probe_error_v4l2_subdev_cleanup;
        }
 
        ret = ov2740_register_nvmem(client, ov2740);
        if (ret)
                dev_warn(&client->dev, "register nvmem failed, ret %d\n", ret);
 
-       /* Set the device's state to active if it's in D0 state. */
-       if (full_power)
-               pm_runtime_set_active(&client->dev);
-       pm_runtime_enable(&client->dev);
-       pm_runtime_idle(&client->dev);
-
        return 0;
 
+probe_error_v4l2_subdev_cleanup:
+       v4l2_subdev_cleanup(&ov2740->sd);
+
 probe_error_media_entity_cleanup:
        media_entity_cleanup(&ov2740->sd.entity);
+       pm_runtime_disable(&client->dev);
+       pm_runtime_set_suspended(&client->dev);
 
 probe_error_v4l2_ctrl_handler_free:
        v4l2_ctrl_handler_free(ov2740->sd.ctrl_handler);
-       mutex_destroy(&ov2740->mutex);
 
        return ret;
 }
 
-static DEFINE_SIMPLE_DEV_PM_OPS(ov2740_pm_ops, ov2740_suspend, ov2740_resume);
-
 static const struct acpi_device_id ov2740_acpi_ids[] = {
        {"INT3474"},
        {}
@@ -1212,7 +1145,6 @@ MODULE_DEVICE_TABLE(acpi, ov2740_acpi_ids);
 static struct i2c_driver ov2740_i2c_driver = {
        .driver = {
                .name = "ov2740",
-               .pm = pm_sleep_ptr(&ov2740_pm_ops),
                .acpi_match_table = ov2740_acpi_ids,
        },
        .probe = ov2740_probe,
index fda217d2cb10af78b3680c597fb0b95716269d85..3bd972a822e7ba8a80b44a2f68aba1aacffb4455 100644 (file)
@@ -99,8 +99,7 @@ struct ov4689 {
 
        u32 clock_rate;
 
-       struct mutex mutex; /* lock to protect streaming, ctrls and cur_mode */
-       bool streaming;
+       struct mutex mutex; /* lock to protect ctrls and cur_mode */
        struct v4l2_ctrl_handler ctrl_handler;
        struct v4l2_ctrl *exposure;
 
@@ -468,10 +467,6 @@ static int ov4689_s_stream(struct v4l2_subdev *sd, int on)
 
        mutex_lock(&ov4689->mutex);
 
-       on = !!on;
-       if (on == ov4689->streaming)
-               goto unlock_and_return;
-
        if (on) {
                ret = pm_runtime_resume_and_get(&client->dev);
                if (ret < 0)
@@ -504,8 +499,6 @@ static int ov4689_s_stream(struct v4l2_subdev *sd, int on)
                pm_runtime_put(&client->dev);
        }
 
-       ov4689->streaming = on;
-
 unlock_and_return:
        mutex_unlock(&ov4689->mutex);
 
index 5fe85aa2d2ec4217cbb6fa83f390432632128a87..40532f7bcabea84cfc1f534b70e04c7c5ad789a7 100644 (file)
@@ -2850,12 +2850,22 @@ static int ov5640_try_fmt_internal(struct v4l2_subdev *sd,
        return 0;
 }
 
+static void __v4l2_ctrl_vblank_update(struct ov5640_dev *sensor, u32 vblank)
+{
+       const struct ov5640_mode_info *mode = sensor->current_mode;
+
+       __v4l2_ctrl_modify_range(sensor->ctrls.vblank, OV5640_MIN_VBLANK,
+                                OV5640_MAX_VTS - mode->height, 1, vblank);
+
+       __v4l2_ctrl_s_ctrl(sensor->ctrls.vblank, vblank);
+}
+
 static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
 {
        const struct ov5640_mode_info *mode = sensor->current_mode;
        enum ov5640_pixel_rate_id pixel_rate_id = mode->pixel_rate;
        struct v4l2_mbus_framefmt *fmt = &sensor->fmt;
-       const struct ov5640_timings *timings;
+       const struct ov5640_timings *timings = ov5640_timings(sensor, mode);
        s32 exposure_val, exposure_max;
        unsigned int hblank;
        unsigned int i = 0;
@@ -2874,6 +2884,8 @@ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
                __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate,
                                         ov5640_calc_pixel_rate(sensor));
 
+               __v4l2_ctrl_vblank_update(sensor, timings->vblank_def);
+
                return 0;
        }
 
@@ -2916,15 +2928,12 @@ static int ov5640_update_pixel_rate(struct ov5640_dev *sensor)
        __v4l2_ctrl_s_ctrl_int64(sensor->ctrls.pixel_rate, pixel_rate);
        __v4l2_ctrl_s_ctrl(sensor->ctrls.link_freq, i);
 
-       timings = ov5640_timings(sensor, mode);
        hblank = timings->htot - mode->width;
        __v4l2_ctrl_modify_range(sensor->ctrls.hblank,
                                 hblank, hblank, 1, hblank);
 
        vblank = timings->vblank_def;
-       __v4l2_ctrl_modify_range(sensor->ctrls.vblank, OV5640_MIN_VBLANK,
-                                OV5640_MAX_VTS - mode->height, 1, vblank);
-       __v4l2_ctrl_s_ctrl(sensor->ctrls.vblank, vblank);
+       __v4l2_ctrl_vblank_update(sensor, vblank);
 
        exposure_max = timings->crop.height + vblank - 4;
        exposure_val = clamp_t(s32, sensor->ctrls.exposure->val,
@@ -3919,7 +3928,7 @@ static int ov5640_probe(struct i2c_client *client)
        ret = ov5640_sensor_resume(dev);
        if (ret) {
                dev_err(dev, "failed to power on\n");
-               goto entity_cleanup;
+               goto free_ctrls;
        }
 
        pm_runtime_set_active(dev);
@@ -3944,8 +3953,9 @@ static int ov5640_probe(struct i2c_client *client)
 err_pm_runtime:
        pm_runtime_put_noidle(dev);
        pm_runtime_disable(dev);
-       v4l2_ctrl_handler_free(&sensor->ctrls.handler);
        ov5640_sensor_suspend(dev);
+free_ctrls:
+       v4l2_ctrl_handler_free(&sensor->ctrls.handler);
 entity_cleanup:
        media_entity_cleanup(&sensor->sd.entity);
        mutex_destroy(&sensor->lock);
index 8de398423b7cb9c482f2a246ef5bf8e59e4598e5..dcfe3129c63acedbfc951f2fdfcf8d0fd783983b 100644 (file)
@@ -109,7 +109,6 @@ struct ov5647 {
        struct v4l2_ctrl                *hblank;
        struct v4l2_ctrl                *vblank;
        struct v4l2_ctrl                *exposure;
-       bool                            streaming;
 };
 
 static inline struct ov5647 *to_sensor(struct v4l2_subdev *sd)
@@ -898,10 +897,6 @@ static int ov5647_s_stream(struct v4l2_subdev *sd, int enable)
        int ret;
 
        mutex_lock(&sensor->lock);
-       if (sensor->streaming == enable) {
-               mutex_unlock(&sensor->lock);
-               return 0;
-       }
 
        if (enable) {
                ret = pm_runtime_resume_and_get(&client->dev);
@@ -922,7 +917,6 @@ static int ov5647_s_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_put(&client->dev);
        }
 
-       sensor->streaming = enable;
        mutex_unlock(&sensor->lock);
 
        return 0;
index 29e773a997dd4d9279f109152d82f4959a8049ef..e80db3ecd4f806787d42c0b19b17da2f0c70f590 100644 (file)
@@ -1882,8 +1882,6 @@ struct ov5670 {
        /* To serialize asynchronus callbacks */
        struct mutex mutex;
 
-       /* Streaming on/off */
-       bool streaming;
        /* True if the device has been identified */
        bool identified;
 };
@@ -2471,8 +2469,6 @@ static int ov5670_set_stream(struct v4l2_subdev *sd, int enable)
        int ret = 0;
 
        mutex_lock(&ov5670->mutex);
-       if (ov5670->streaming == enable)
-               goto unlock_and_return;
 
        if (enable) {
                ret = pm_runtime_resume_and_get(&client->dev);
@@ -2486,7 +2482,6 @@ static int ov5670_set_stream(struct v4l2_subdev *sd, int enable)
                ret = ov5670_stop_streaming(ov5670);
                pm_runtime_put(&client->dev);
        }
-       ov5670->streaming = enable;
        goto unlock_and_return;
 
 error:
@@ -2541,34 +2536,6 @@ static int __maybe_unused ov5670_runtime_suspend(struct device *dev)
        return 0;
 }
 
-static int __maybe_unused ov5670_suspend(struct device *dev)
-{
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct ov5670 *ov5670 = to_ov5670(sd);
-
-       if (ov5670->streaming)
-               ov5670_stop_streaming(ov5670);
-
-       return 0;
-}
-
-static int __maybe_unused ov5670_resume(struct device *dev)
-{
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct ov5670 *ov5670 = to_ov5670(sd);
-       int ret;
-
-       if (ov5670->streaming) {
-               ret = ov5670_start_streaming(ov5670);
-               if (ret) {
-                       ov5670_stop_streaming(ov5670);
-                       return ret;
-               }
-       }
-
-       return 0;
-}
-
 static const struct v4l2_subdev_core_ops ov5670_core_ops = {
        .log_status = v4l2_ctrl_subdev_log_status,
        .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
@@ -2771,8 +2738,6 @@ static int ov5670_probe(struct i2c_client *client)
                goto error_handler_free;
        }
 
-       ov5670->streaming = false;
-
        /* Set the device's state to active if it's in D0 state. */
        if (full_power)
                pm_runtime_set_active(&client->dev);
@@ -2827,7 +2792,6 @@ static void ov5670_remove(struct i2c_client *client)
 }
 
 static const struct dev_pm_ops ov5670_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(ov5670_suspend, ov5670_resume)
        SET_RUNTIME_PM_OPS(ov5670_runtime_suspend, ov5670_runtime_resume, NULL)
 };
 
index d5a2a5f823124b81bf900c1161baf9b503583888..e63d9d402d34065d0b938c3631cac3e32f088e7e 100644 (file)
@@ -513,9 +513,6 @@ struct ov5675 {
        /* To serialize asynchronus callbacks */
        struct mutex mutex;
 
-       /* Streaming on/off */
-       bool streaming;
-
        /* True if the device has been identified */
        bool identified;
 };
@@ -949,9 +946,6 @@ static int ov5675_set_stream(struct v4l2_subdev *sd, int enable)
        struct i2c_client *client = v4l2_get_subdevdata(sd);
        int ret = 0;
 
-       if (ov5675->streaming == enable)
-               return 0;
-
        mutex_lock(&ov5675->mutex);
        if (enable) {
                ret = pm_runtime_resume_and_get(&client->dev);
@@ -971,7 +965,6 @@ static int ov5675_set_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_put(&client->dev);
        }
 
-       ov5675->streaming = enable;
        mutex_unlock(&ov5675->mutex);
 
        return ret;
@@ -1027,42 +1020,6 @@ static int ov5675_power_on(struct device *dev)
        return 0;
 }
 
-static int __maybe_unused ov5675_suspend(struct device *dev)
-{
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct ov5675 *ov5675 = to_ov5675(sd);
-
-       mutex_lock(&ov5675->mutex);
-       if (ov5675->streaming)
-               ov5675_stop_streaming(ov5675);
-
-       mutex_unlock(&ov5675->mutex);
-
-       return 0;
-}
-
-static int __maybe_unused ov5675_resume(struct device *dev)
-{
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct ov5675 *ov5675 = to_ov5675(sd);
-       int ret;
-
-       mutex_lock(&ov5675->mutex);
-       if (ov5675->streaming) {
-               ret = ov5675_start_streaming(ov5675);
-               if (ret) {
-                       ov5675->streaming = false;
-                       ov5675_stop_streaming(ov5675);
-                       mutex_unlock(&ov5675->mutex);
-                       return ret;
-               }
-       }
-
-       mutex_unlock(&ov5675->mutex);
-
-       return 0;
-}
-
 static int ov5675_set_format(struct v4l2_subdev *sd,
                             struct v4l2_subdev_state *sd_state,
                             struct v4l2_subdev_format *fmt)
@@ -1409,7 +1366,6 @@ probe_power_off:
 }
 
 static const struct dev_pm_ops ov5675_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(ov5675_suspend, ov5675_resume)
        SET_RUNTIME_PM_OPS(ov5675_power_off, ov5675_power_on, NULL)
 };
 
index 488ee6d9d30101c8cd15ca70cd07049d19ef0335..819425e2134995d7d9bbd396ce3dbecd168686a4 100644 (file)
@@ -154,7 +154,6 @@ struct ov5693_device {
                unsigned int inc_y_odd;
                unsigned int vts;
        } mode;
-       bool streaming;
 
        struct v4l2_subdev sd;
        struct media_pad pad;
@@ -975,9 +974,9 @@ static int ov5693_s_stream(struct v4l2_subdev *sd, int enable)
        int ret;
 
        if (enable) {
-               ret = pm_runtime_get_sync(ov5693->dev);
-               if (ret < 0)
-                       goto err_power_down;
+               ret = pm_runtime_resume_and_get(ov5693->dev);
+               if (ret)
+                       return ret;
 
                mutex_lock(&ov5693->lock);
                ret = __v4l2_ctrl_handler_setup(&ov5693->ctrls.handler);
@@ -996,8 +995,6 @@ static int ov5693_s_stream(struct v4l2_subdev *sd, int enable)
        if (ret)
                goto err_power_down;
 
-       ov5693->streaming = !!enable;
-
        if (!enable)
                pm_runtime_put(ov5693->dev);
 
index 3023b72541677d0ef7b46341f6d7b5000e318c97..c8f57ce1578dccc909db31886b928dd306d4eb19 100644 (file)
@@ -108,7 +108,6 @@ struct ov5695 {
        struct v4l2_ctrl        *vblank;
        struct v4l2_ctrl        *test_pattern;
        struct mutex            mutex;
-       bool                    streaming;
        const struct ov5695_mode *cur_mode;
 };
 
@@ -821,9 +820,7 @@ static int ov5695_set_fmt(struct v4l2_subdev *sd,
        fmt->format.height = mode->height;
        fmt->format.field = V4L2_FIELD_NONE;
        if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
                *v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
-#endif
        } else {
                ov5695->cur_mode = mode;
                h_blank = mode->hts_def - mode->width;
@@ -849,13 +846,8 @@ static int ov5695_get_fmt(struct v4l2_subdev *sd,
 
        mutex_lock(&ov5695->mutex);
        if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
                fmt->format = *v4l2_subdev_get_try_format(sd, sd_state,
                                                          fmt->pad);
-#else
-               mutex_unlock(&ov5695->mutex);
-               return -EINVAL;
-#endif
        } else {
                fmt->format.width = mode->width;
                fmt->format.height = mode->height;
@@ -942,9 +934,6 @@ static int ov5695_s_stream(struct v4l2_subdev *sd, int on)
        int ret = 0;
 
        mutex_lock(&ov5695->mutex);
-       on = !!on;
-       if (on == ov5695->streaming)
-               goto unlock_and_return;
 
        if (on) {
                ret = pm_runtime_resume_and_get(&client->dev);
@@ -962,8 +951,6 @@ static int ov5695_s_stream(struct v4l2_subdev *sd, int on)
                pm_runtime_put(&client->dev);
        }
 
-       ov5695->streaming = on;
-
 unlock_and_return:
        mutex_unlock(&ov5695->mutex);
 
@@ -1048,7 +1035,6 @@ static int __maybe_unused ov5695_runtime_suspend(struct device *dev)
        return 0;
 }
 
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
 static int ov5695_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
 {
        struct ov5695 *ov5695 = to_ov5695(sd);
@@ -1068,18 +1054,15 @@ static int ov5695_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
 
        return 0;
 }
-#endif
 
 static const struct dev_pm_ops ov5695_pm_ops = {
        SET_RUNTIME_PM_OPS(ov5695_runtime_suspend,
                           ov5695_runtime_resume, NULL)
 };
 
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
 static const struct v4l2_subdev_internal_ops ov5695_internal_ops = {
        .open = ov5695_open,
 };
-#endif
 
 static const struct v4l2_subdev_video_ops ov5695_video_ops = {
        .s_stream = ov5695_s_stream,
@@ -1322,17 +1305,13 @@ static int ov5695_probe(struct i2c_client *client)
        if (ret)
                goto err_power_off;
 
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
        sd->internal_ops = &ov5695_internal_ops;
        sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-#endif
-#if defined(CONFIG_MEDIA_CONTROLLER)
        ov5695->pad.flags = MEDIA_PAD_FL_SOURCE;
        sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
        ret = media_entity_pads_init(&sd->entity, 1, &ov5695->pad);
        if (ret < 0)
                goto err_power_off;
-#endif
 
        ret = v4l2_async_register_subdev_sensor(sd);
        if (ret) {
@@ -1347,9 +1326,7 @@ static int ov5695_probe(struct i2c_client *client)
        return 0;
 
 err_clean_entity:
-#if defined(CONFIG_MEDIA_CONTROLLER)
        media_entity_cleanup(&sd->entity);
-#endif
 err_power_off:
        __ov5695_power_off(ov5695);
 err_free_handler:
@@ -1366,9 +1343,7 @@ static void ov5695_remove(struct i2c_client *client)
        struct ov5695 *ov5695 = to_ov5695(sd);
 
        v4l2_async_unregister_subdev(sd);
-#if defined(CONFIG_MEDIA_CONTROLLER)
        media_entity_cleanup(&sd->entity);
-#endif
        v4l2_ctrl_handler_free(&ov5695->ctrl_handler);
        mutex_destroy(&ov5695->mutex);
 
index 675fb37a6feaec82614028c3bb9428a878d94c64..6582cc0e2384e4277560d153bc92026422fc83c6 100644 (file)
@@ -1340,9 +1340,11 @@ static int ov7251_s_stream(struct v4l2_subdev *subdev, int enable)
        mutex_lock(&ov7251->lock);
 
        if (enable) {
-               ret = pm_runtime_get_sync(ov7251->dev);
-               if (ret < 0)
-                       goto err_power_down;
+               ret = pm_runtime_resume_and_get(ov7251->dev);
+               if (ret) {
+                       mutex_unlock(&ov7251->lock);
+                       return ret;
+               }
 
                ret = ov7251_pll_configure(ov7251);
                if (ret) {
index 2f55491ef571f00d71b1e18c8d28ab544ae7ef92..172483597c542c0e0a6470c94a0e3304d7b5ff67 100644 (file)
@@ -10,6 +10,7 @@
  */
 #include <linux/clk.h>
 #include <linux/init.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
@@ -186,11 +187,6 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)");
 #define REG_HAECC7     0xaa    /* Hist AEC/AGC control 7 */
 #define REG_BD60MAX    0xab    /* 60hz banding step limit */
 
-enum ov7670_model {
-       MODEL_OV7670 = 0,
-       MODEL_OV7675,
-};
-
 struct ov7670_win_size {
        int     width;
        int     height;
@@ -217,9 +213,7 @@ struct ov7670_devtype {
 struct ov7670_format_struct;  /* coming later */
 struct ov7670_info {
        struct v4l2_subdev sd;
-#if defined(CONFIG_MEDIA_CONTROLLER)
        struct media_pad pad;
-#endif
        struct v4l2_ctrl_handler hdl;
        struct {
                /* gain cluster */
@@ -1108,9 +1102,7 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd,
                struct v4l2_subdev_format *format)
 {
        struct ov7670_info *info = to_state(sd);
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
        struct v4l2_mbus_framefmt *mbus_fmt;
-#endif
        int ret;
 
        if (format->pad)
@@ -1120,11 +1112,9 @@ static int ov7670_set_fmt(struct v4l2_subdev *sd,
                ret = ov7670_try_fmt_internal(sd, &format->format, NULL, NULL);
                if (ret)
                        return ret;
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
                mbus_fmt = v4l2_subdev_get_try_format(sd, sd_state,
                                                      format->pad);
                *mbus_fmt = format->format;
-#endif
                return 0;
        }
 
@@ -1148,18 +1138,12 @@ static int ov7670_get_fmt(struct v4l2_subdev *sd,
                          struct v4l2_subdev_format *format)
 {
        struct ov7670_info *info = to_state(sd);
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
        struct v4l2_mbus_framefmt *mbus_fmt;
-#endif
 
        if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
                mbus_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
                format->format = *mbus_fmt;
                return 0;
-#else
-               return -EINVAL;
-#endif
        } else {
                format->format = info->format;
        }
@@ -1720,7 +1704,6 @@ static void ov7670_get_default_format(struct v4l2_subdev *sd,
        format->field = V4L2_FIELD_NONE;
 }
 
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
 static int ov7670_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
 {
        struct v4l2_mbus_framefmt *format =
@@ -1730,7 +1713,6 @@ static int ov7670_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
 
        return 0;
 }
-#endif
 
 /* ----------------------------------------------------------------------- */
 
@@ -1766,29 +1748,12 @@ static const struct v4l2_subdev_ops ov7670_ops = {
        .pad = &ov7670_pad_ops,
 };
 
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
 static const struct v4l2_subdev_internal_ops ov7670_subdev_internal_ops = {
        .open = ov7670_open,
 };
-#endif
 
 /* ----------------------------------------------------------------------- */
 
-static const struct ov7670_devtype ov7670_devdata[] = {
-       [MODEL_OV7670] = {
-               .win_sizes = ov7670_win_sizes,
-               .n_win_sizes = ARRAY_SIZE(ov7670_win_sizes),
-               .set_framerate = ov7670_set_framerate_legacy,
-               .get_framerate = ov7670_get_framerate_legacy,
-       },
-       [MODEL_OV7675] = {
-               .win_sizes = ov7675_win_sizes,
-               .n_win_sizes = ARRAY_SIZE(ov7675_win_sizes),
-               .set_framerate = ov7675_set_framerate,
-               .get_framerate = ov7675_get_framerate,
-       },
-};
-
 static int ov7670_init_gpio(struct i2c_client *client, struct ov7670_info *info)
 {
        info->pwdn_gpio = devm_gpiod_get_optional(&client->dev, "powerdown",
@@ -1849,7 +1814,6 @@ static int ov7670_parse_dt(struct device *dev,
 
 static int ov7670_probe(struct i2c_client *client)
 {
-       const struct i2c_device_id *id = i2c_client_get_device_id(client);
        struct v4l2_fract tpf;
        struct v4l2_subdev *sd;
        struct ov7670_info *info;
@@ -1861,10 +1825,8 @@ static int ov7670_probe(struct i2c_client *client)
        sd = &info->sd;
        v4l2_i2c_subdev_init(sd, client, &ov7670_ops);
 
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
        sd->internal_ops = &ov7670_subdev_internal_ops;
        sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
-#endif
 
        info->clock_speed = 30; /* default: a guess */
 
@@ -1923,7 +1885,7 @@ static int ov7670_probe(struct i2c_client *client)
        v4l_info(client, "chip found @ 0x%02x (%s)\n",
                        client->addr << 1, client->adapter->name);
 
-       info->devtype = &ov7670_devdata[id->driver_data];
+       info->devtype = i2c_get_match_data(client);
        info->fmt = &ov7670_formats[0];
        info->wsize = &info->devtype->win_sizes[0];
 
@@ -1977,13 +1939,11 @@ static int ov7670_probe(struct i2c_client *client)
                               V4L2_EXPOSURE_MANUAL, false);
        v4l2_ctrl_cluster(2, &info->saturation);
 
-#if defined(CONFIG_MEDIA_CONTROLLER)
        info->pad.flags = MEDIA_PAD_FL_SOURCE;
        info->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
        ret = media_entity_pads_init(&info->sd.entity, 1, &info->pad);
        if (ret < 0)
                goto hdl_free;
-#endif
 
        v4l2_ctrl_handler_setup(&info->hdl);
 
@@ -2013,25 +1973,37 @@ static void ov7670_remove(struct i2c_client *client)
        media_entity_cleanup(&info->sd.entity);
 }
 
+static const struct ov7670_devtype ov7670_devdata = {
+       .win_sizes = ov7670_win_sizes,
+       .n_win_sizes = ARRAY_SIZE(ov7670_win_sizes),
+       .set_framerate = ov7670_set_framerate_legacy,
+       .get_framerate = ov7670_get_framerate_legacy,
+};
+
+static const struct ov7670_devtype ov7675_devdata = {
+       .win_sizes = ov7675_win_sizes,
+       .n_win_sizes = ARRAY_SIZE(ov7675_win_sizes),
+       .set_framerate = ov7675_set_framerate,
+       .get_framerate = ov7675_get_framerate,
+};
+
 static const struct i2c_device_id ov7670_id[] = {
-       { "ov7670", MODEL_OV7670 },
-       { "ov7675", MODEL_OV7675 },
-       { }
+       { "ov7670", (kernel_ulong_t)&ov7670_devdata },
+       { "ov7675", (kernel_ulong_t)&ov7675_devdata },
+       { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(i2c, ov7670_id);
 
-#if IS_ENABLED(CONFIG_OF)
 static const struct of_device_id ov7670_of_match[] = {
-       { .compatible = "ovti,ov7670", },
-       { /* sentinel */ },
+       { .compatible = "ovti,ov7670", &ov7670_devdata },
+       { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, ov7670_of_match);
-#endif
 
 static struct i2c_driver ov7670_driver = {
        .driver = {
                .name   = "ov7670",
-               .of_match_table = of_match_ptr(ov7670_of_match),
+               .of_match_table = ov7670_of_match,
        },
        .probe          = ov7670_probe,
        .remove         = ov7670_remove,
index 386d69c8e074658cc5063a8fc2bad2e6f559889c..7618b58a7ad05380a82f2061faaded2f82c5bb1d 100644 (file)
@@ -433,9 +433,7 @@ struct ov772x_priv {
        struct mutex                      lock;
        int                               power_count;
        int                               streaming;
-#ifdef CONFIG_MEDIA_CONTROLLER
        struct media_pad pad;
-#endif
        enum v4l2_mbus_type               bus_type;
 };
 
@@ -1488,13 +1486,11 @@ static int ov772x_probe(struct i2c_client *client)
        if (ret < 0)
                goto error_gpio_put;
 
-#ifdef CONFIG_MEDIA_CONTROLLER
        priv->pad.flags = MEDIA_PAD_FL_SOURCE;
        priv->subdev.entity.function = MEDIA_ENT_F_CAM_SENSOR;
        ret = media_entity_pads_init(&priv->subdev.entity, 1, &priv->pad);
        if (ret < 0)
                goto error_gpio_put;
-#endif
 
        priv->cfmt = &ov772x_cfmts[0];
        priv->win = &ov772x_win_sizes[0];
index dffdb475e43391e0f4d351569fe93bc95de2fe7f..356a45e65b81bf1bb666dae69a85e4789b3b56e6 100644 (file)
@@ -83,9 +83,7 @@
 
 struct ov7740 {
        struct v4l2_subdev subdev;
-#if defined(CONFIG_MEDIA_CONTROLLER)
        struct media_pad pad;
-#endif
        struct v4l2_mbus_framefmt format;
        const struct ov7740_pixfmt *fmt;  /* Current format */
        const struct ov7740_framesize *frmsize;
@@ -120,7 +118,6 @@ struct ov7740 {
        struct v4l2_ctrl *contrast;
 
        struct mutex mutex;     /* To serialize asynchronus callbacks */
-       bool streaming;         /* Streaming on/off */
 
        struct gpio_desc *resetb_gpio;
        struct gpio_desc *pwdn_gpio;
@@ -618,10 +615,6 @@ static int ov7740_set_stream(struct v4l2_subdev *sd, int enable)
        int ret = 0;
 
        mutex_lock(&ov7740->mutex);
-       if (ov7740->streaming == enable) {
-               mutex_unlock(&ov7740->mutex);
-               return 0;
-       }
 
        if (enable) {
                ret = pm_runtime_resume_and_get(&client->dev);
@@ -635,8 +628,6 @@ static int ov7740_set_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_put(&client->dev);
        }
 
-       ov7740->streaming = enable;
-
        mutex_unlock(&ov7740->mutex);
        return ret;
 
@@ -807,9 +798,7 @@ static int ov7740_set_fmt(struct v4l2_subdev *sd,
        struct ov7740 *ov7740 = container_of(sd, struct ov7740, subdev);
        const struct ov7740_pixfmt *ovfmt;
        const struct ov7740_framesize *fsize;
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
        struct v4l2_mbus_framefmt *mbus_fmt;
-#endif
        int ret;
 
        mutex_lock(&ov7740->mutex);
@@ -822,11 +811,10 @@ static int ov7740_set_fmt(struct v4l2_subdev *sd,
                ret = ov7740_try_fmt_internal(sd, &format->format, NULL, NULL);
                if (ret)
                        goto error;
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
+
                mbus_fmt = v4l2_subdev_get_try_format(sd, sd_state,
                                                      format->pad);
                *mbus_fmt = format->format;
-#endif
                mutex_unlock(&ov7740->mutex);
                return 0;
        }
@@ -851,26 +839,18 @@ static int ov7740_get_fmt(struct v4l2_subdev *sd,
                          struct v4l2_subdev_format *format)
 {
        struct ov7740 *ov7740 = container_of(sd, struct ov7740, subdev);
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
        struct v4l2_mbus_framefmt *mbus_fmt;
-#endif
-       int ret = 0;
 
        mutex_lock(&ov7740->mutex);
        if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
                mbus_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
                format->format = *mbus_fmt;
-               ret = 0;
-#else
-               ret = -EINVAL;
-#endif
        } else {
                format->format = ov7740->format;
        }
        mutex_unlock(&ov7740->mutex);
 
-       return ret;
+       return 0;
 }
 
 static const struct v4l2_subdev_pad_ops ov7740_subdev_pad_ops = {
@@ -899,7 +879,6 @@ static void ov7740_get_default_format(struct v4l2_subdev *sd,
        format->field = V4L2_FIELD_NONE;
 }
 
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
 static int ov7740_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
 {
        struct ov7740 *ov7740 = container_of(sd, struct ov7740, subdev);
@@ -916,7 +895,6 @@ static int ov7740_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
 static const struct v4l2_subdev_internal_ops ov7740_subdev_internal_ops = {
        .open = ov7740_open,
 };
-#endif
 
 static int ov7740_probe_dt(struct i2c_client *client,
                           struct ov7740 *ov7740)
@@ -1094,18 +1072,14 @@ static int ov7740_probe(struct i2c_client *client)
        sd = &ov7740->subdev;
        v4l2_i2c_subdev_init(sd, client, &ov7740_subdev_ops);
 
-#ifdef CONFIG_VIDEO_V4L2_SUBDEV_API
        sd->internal_ops = &ov7740_subdev_internal_ops;
        sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
-#endif
 
-#if defined(CONFIG_MEDIA_CONTROLLER)
        ov7740->pad.flags = MEDIA_PAD_FL_SOURCE;
        sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
        ret = media_entity_pads_init(&sd->entity, 1, &ov7740->pad);
        if (ret)
                return ret;
-#endif
 
        ret = ov7740_set_power(ov7740, 1);
        if (ret)
index f053c3a7676a09e171830c716088dbef6842c7ad..a0f673a24e529929a2abe501853b4b868b064edb 100644 (file)
@@ -1438,9 +1438,6 @@ struct ov8856 {
        /* To serialize asynchronus callbacks */
        struct mutex mutex;
 
-       /* Streaming on/off */
-       bool streaming;
-
        /* lanes index */
        u8 nlanes;
 
@@ -2042,9 +2039,6 @@ static int ov8856_set_stream(struct v4l2_subdev *sd, int enable)
        struct i2c_client *client = v4l2_get_subdevdata(sd);
        int ret = 0;
 
-       if (ov8856->streaming == enable)
-               return 0;
-
        mutex_lock(&ov8856->mutex);
        if (enable) {
                ret = pm_runtime_resume_and_get(&client->dev);
@@ -2064,7 +2058,6 @@ static int ov8856_set_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_put(&client->dev);
        }
 
-       ov8856->streaming = enable;
        mutex_unlock(&ov8856->mutex);
 
        return ret;
@@ -2125,45 +2118,6 @@ static int ov8856_power_off(struct device *dev)
        return 0;
 }
 
-static int __maybe_unused ov8856_suspend(struct device *dev)
-{
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct ov8856 *ov8856 = to_ov8856(sd);
-
-       mutex_lock(&ov8856->mutex);
-       if (ov8856->streaming)
-               ov8856_stop_streaming(ov8856);
-
-       ov8856_power_off(dev);
-       mutex_unlock(&ov8856->mutex);
-
-       return 0;
-}
-
-static int __maybe_unused ov8856_resume(struct device *dev)
-{
-       struct v4l2_subdev *sd = dev_get_drvdata(dev);
-       struct ov8856 *ov8856 = to_ov8856(sd);
-       int ret;
-
-       mutex_lock(&ov8856->mutex);
-
-       ov8856_power_on(dev);
-       if (ov8856->streaming) {
-               ret = ov8856_start_streaming(ov8856);
-               if (ret) {
-                       ov8856->streaming = false;
-                       ov8856_stop_streaming(ov8856);
-                       mutex_unlock(&ov8856->mutex);
-                       return ret;
-               }
-       }
-
-       mutex_unlock(&ov8856->mutex);
-
-       return 0;
-}
-
 static int ov8856_set_format(struct v4l2_subdev *sd,
                             struct v4l2_subdev_state *sd_state,
                             struct v4l2_subdev_format *fmt)
@@ -2501,7 +2455,6 @@ probe_power_off:
 }
 
 static const struct dev_pm_ops ov8856_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(ov8856_suspend, ov8856_resume)
        SET_RUNTIME_PM_OPS(ov8856_power_off, ov8856_power_on, NULL)
 };
 
index 068c7449f50ed53bc1c96401a8d63fc68526fecc..bf6dfce1b5dd8867ef120e452b3d59c916f22bad 100644 (file)
@@ -165,7 +165,6 @@ struct ov9282_mode {
  * @cur_mode: Pointer to current selected sensor mode
  * @code: Mbus code currently selected
  * @mutex: Mutex for serializing sensor controls
- * @streaming: Flag indicating streaming state
  */
 struct ov9282 {
        struct device *dev;
@@ -188,7 +187,6 @@ struct ov9282 {
        const struct ov9282_mode *cur_mode;
        u32 code;
        struct mutex mutex;
-       bool streaming;
 };
 
 static const s64 link_freq[] = {
@@ -1037,11 +1035,6 @@ static int ov9282_set_stream(struct v4l2_subdev *sd, int enable)
 
        mutex_lock(&ov9282->mutex);
 
-       if (ov9282->streaming == enable) {
-               mutex_unlock(&ov9282->mutex);
-               return 0;
-       }
-
        if (enable) {
                ret = pm_runtime_resume_and_get(ov9282->dev);
                if (ret)
@@ -1055,8 +1048,6 @@ static int ov9282_set_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_put(ov9282->dev);
        }
 
-       ov9282->streaming = enable;
-
        mutex_unlock(&ov9282->mutex);
 
        return 0;
index b6244772bc5933b6c46309cef085fd922f0d65b7..ee33152996055292a35d708ad2cc96b27fa4bd6d 100644 (file)
@@ -337,9 +337,6 @@ struct ov9734 {
 
        /* To serialize asynchronus callbacks */
        struct mutex mutex;
-
-       /* Streaming on/off */
-       bool streaming;
 };
 
 static inline struct ov9734 *to_ov9734(struct v4l2_subdev *subdev)
@@ -660,10 +657,6 @@ static int ov9734_set_stream(struct v4l2_subdev *sd, int enable)
        int ret = 0;
 
        mutex_lock(&ov9734->mutex);
-       if (ov9734->streaming == enable) {
-               mutex_unlock(&ov9734->mutex);
-               return 0;
-       }
 
        if (enable) {
                ret = pm_runtime_resume_and_get(&client->dev);
@@ -683,46 +676,8 @@ static int ov9734_set_stream(struct v4l2_subdev *sd, int enable)
                pm_runtime_put(&client->dev);
        }
 
-       ov9734->streaming = enable;
-       mutex_unlock(&ov9734->mutex);
-
-       return ret;
-}
-
-static int __maybe_unused ov9734_suspend(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct v4l2_subdev *sd = i2c_get_clientdata(client);
-       struct ov9734 *ov9734 = to_ov9734(sd);
-
-       mutex_lock(&ov9734->mutex);
-       if (ov9734->streaming)
-               ov9734_stop_streaming(ov9734);
-
        mutex_unlock(&ov9734->mutex);
 
-       return 0;
-}
-
-static int __maybe_unused ov9734_resume(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-       struct v4l2_subdev *sd = i2c_get_clientdata(client);
-       struct ov9734 *ov9734 = to_ov9734(sd);
-       int ret = 0;
-
-       mutex_lock(&ov9734->mutex);
-       if (!ov9734->streaming)
-               goto exit;
-
-       ret = ov9734_start_streaming(ov9734);
-       if (ret) {
-               ov9734->streaming = false;
-               ov9734_stop_streaming(ov9734);
-       }
-
-exit:
-       mutex_unlock(&ov9734->mutex);
        return ret;
 }
 
@@ -1011,10 +966,6 @@ probe_error_v4l2_ctrl_handler_free:
        return ret;
 }
 
-static const struct dev_pm_ops ov9734_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(ov9734_suspend, ov9734_resume)
-};
-
 static const struct acpi_device_id ov9734_acpi_ids[] = {
        { "OVTI9734", },
        {}
@@ -1025,7 +976,6 @@ MODULE_DEVICE_TABLE(acpi, ov9734_acpi_ids);
 static struct i2c_driver ov9734_i2c_driver = {
        .driver = {
                .name = "ov9734",
-               .pm = &ov9734_pm_ops,
                .acpi_match_table = ov9734_acpi_ids,
        },
        .probe = ov9734_probe,
index f4e2e2f3972a979bf049d0ba331bb600c769344f..b4647bda8c21f83f4f68cefc5c57228aa9e77f5a 100644 (file)
@@ -625,8 +625,7 @@ error_free_ctrls:
        v4l2_ctrl_handler_free(&dev->ctrls);
 error:
        media_entity_cleanup(&dev->sd.entity);
-       if (dev->sensor)
-               i2c_unregister_device(dev->sensor);
+       i2c_unregister_device(dev->sensor);
 
        dev_err(&client->dev, "probe failed\n");
 
index 30f82ca344c44657a7549de7f93d17790908382f..5dbfb04b31248c02a206779db8209a5b7472bb59 100644 (file)
@@ -1170,14 +1170,9 @@ static int vgxy61_stream_enable(struct vgxy61_dev *sensor)
        if (ret)
                return ret;
 
-       ret = pm_runtime_get_sync(&client->dev);
-       if (ret < 0) {
-               pm_runtime_put_autosuspend(&client->dev);
+       ret = pm_runtime_resume_and_get(&client->dev);
+       if (ret)
                return ret;
-       }
-
-       /* pm_runtime_get_sync() can return 1 as a valid return code */
-       ret = 0;
 
        vgxy61_write_reg(sensor, VGXY61_REG_FORMAT_CTRL,
                         get_bpp_by_code(sensor->fmt.code), &ret);
index 566f5eaddd572ef2fa7a1d4192773c741212a028..ce612a47ba84830918515e40d3105c3b003826fb 100644 (file)
@@ -784,8 +784,12 @@ static int tc358746_set_fmt(struct v4l2_subdev *sd,
        sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state, TC358746_SINK);
 
        fmt = tc358746_get_format_by_code(format->pad, format->format.code);
-       if (IS_ERR(fmt))
+       if (IS_ERR(fmt)) {
                fmt = tc358746_get_format_by_code(format->pad, tc358746_def_fmt.code);
+               // Can't happen, but just in case...
+               if (WARN_ON(IS_ERR(fmt)))
+                       return -EINVAL;
+       }
 
        format->format.code = fmt->code;
        format->format.field = V4L2_FIELD_NONE;
index aa6d4b67b6d50be0d1b08083b17498d44b914bb8..c37f605cb75f67173467cda8ff7a42efe6f800e4 100644 (file)
  *     Prabhakar Lad <prabhakar.lad@ti.com>
  */
 
-#include <linux/i2c.h>
-#include <linux/slab.h>
 #include <linux/delay.h>
-#include <linux/videodev2.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/v4l2-mediabus.h>
 #include <linux/of.h>
 #include <linux/of_graph.h>
+#include <linux/slab.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/videodev2.h>
 
+#include <media/i2c/tvp514x.h>
+#include <media/media-entity.h>
 #include <media/v4l2-async.h>
-#include <media/v4l2-device.h>
 #include <media/v4l2-common.h>
-#include <media/v4l2-mediabus.h>
-#include <media/v4l2-fwnode.h>
 #include <media/v4l2-ctrls.h>
-#include <media/i2c/tvp514x.h>
-#include <media/media-entity.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-mediabus.h>
 
 #include "tvp514x_regs.h"
 
@@ -118,7 +119,7 @@ struct tvp514x_decoder {
        struct media_pad pad;
        struct v4l2_mbus_framefmt format;
 
-       struct tvp514x_reg *int_seq;
+       const struct tvp514x_reg *int_seq;
 };
 
 /* TVP514x default register values */
@@ -1024,7 +1025,6 @@ done:
 static int
 tvp514x_probe(struct i2c_client *client)
 {
-       const struct i2c_device_id *id = i2c_client_get_device_id(client);
        struct tvp514x_platform_data *pdata = tvp514x_get_pdata(client);
        struct tvp514x_decoder *decoder;
        struct v4l2_subdev *sd;
@@ -1049,7 +1049,7 @@ tvp514x_probe(struct i2c_client *client)
        memcpy(decoder->tvp514x_regs, tvp514x_reg_list_default,
                        sizeof(tvp514x_reg_list_default));
 
-       decoder->int_seq = (struct tvp514x_reg *)id->driver_data;
+       decoder->int_seq = i2c_get_match_data(client);
 
        /* Copy board specific information here */
        decoder->pdata = pdata;
@@ -1183,29 +1183,26 @@ static const struct tvp514x_reg tvp514xm_init_reg_seq[] = {
  * driver_data - Driver data
  */
 static const struct i2c_device_id tvp514x_id[] = {
-       {"tvp5146", (unsigned long)tvp5146_init_reg_seq},
-       {"tvp5146m2", (unsigned long)tvp514xm_init_reg_seq},
-       {"tvp5147", (unsigned long)tvp5147_init_reg_seq},
-       {"tvp5147m1", (unsigned long)tvp514xm_init_reg_seq},
-       {},
+       {"tvp5146", (kernel_ulong_t)tvp5146_init_reg_seq },
+       {"tvp5146m2", (kernel_ulong_t)tvp514xm_init_reg_seq },
+       {"tvp5147", (kernel_ulong_t)tvp5147_init_reg_seq },
+       {"tvp5147m1", (kernel_ulong_t)tvp514xm_init_reg_seq },
+       { /* sentinel */ }
 };
-
 MODULE_DEVICE_TABLE(i2c, tvp514x_id);
 
-#if IS_ENABLED(CONFIG_OF)
 static const struct of_device_id tvp514x_of_match[] = {
-       { .compatible = "ti,tvp5146", },
-       { .compatible = "ti,tvp5146m2", },
-       { .compatible = "ti,tvp5147", },
-       { .compatible = "ti,tvp5147m1", },
-       { /* sentinel */ },
+       { .compatible = "ti,tvp5146", .data = tvp5146_init_reg_seq },
+       { .compatible = "ti,tvp5146m2", .data = tvp514xm_init_reg_seq },
+       { .compatible = "ti,tvp5147", .data = tvp5147_init_reg_seq },
+       { .compatible = "ti,tvp5147m1", .data = tvp514xm_init_reg_seq },
+       { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, tvp514x_of_match);
-#endif
 
 static struct i2c_driver tvp514x_driver = {
        .driver = {
-               .of_match_table = of_match_ptr(tvp514x_of_match),
+               .of_match_table = tvp514x_of_match,
                .name = TVP514X_MODULE_NAME,
        },
        .probe = tvp514x_probe,
index 537ebd9fa8d7406822c6123a2de9d7a45933e4bd..178bd06cc2ed506c3e4aa74bd359dfcf66d3d189 100644 (file)
@@ -759,7 +759,6 @@ static void video_i2c_release(struct video_device *vdev)
 
 static int video_i2c_probe(struct i2c_client *client)
 {
-       const struct i2c_device_id *id = i2c_client_get_device_id(client);
        struct video_i2c_data *data;
        struct v4l2_device *v4l2_dev;
        struct vb2_queue *queue;
@@ -769,11 +768,8 @@ static int video_i2c_probe(struct i2c_client *client)
        if (!data)
                return -ENOMEM;
 
-       if (dev_fwnode(&client->dev))
-               data->chip = device_get_match_data(&client->dev);
-       else if (id)
-               data->chip = &video_i2c_chip[id->driver_data];
-       else
+       data->chip = i2c_get_match_data(client);
+       if (!data->chip)
                goto error_free_device;
 
        data->regmap = regmap_init_i2c(client, data->chip->regmap_config);
@@ -940,8 +936,8 @@ static const struct dev_pm_ops video_i2c_pm_ops = {
 };
 
 static const struct i2c_device_id video_i2c_id_table[] = {
-       { "amg88xx", AMG88XX },
-       { "mlx90640", MLX90640 },
+       { "amg88xx", (kernel_ulong_t)&video_i2c_chip[AMG88XX] },
+       { "mlx90640", (kernel_ulong_t)&video_i2c_chip[MLX90640] },
        {}
 };
 MODULE_DEVICE_TABLE(i2c, video_i2c_id_table);
index 83468d4a440b3546c53ce4da2b3337018af6f7cf..543a392f863571a89a8d41de813c9b4454244360 100644 (file)
@@ -197,6 +197,7 @@ int media_entity_pads_init(struct media_entity *entity, u16 num_pads,
        struct media_device *mdev = entity->graph_obj.mdev;
        struct media_pad *iter;
        unsigned int i = 0;
+       int ret = 0;
 
        if (num_pads >= MEDIA_ENTITY_MAX_PADS)
                return -E2BIG;
@@ -210,15 +211,27 @@ int media_entity_pads_init(struct media_entity *entity, u16 num_pads,
        media_entity_for_each_pad(entity, iter) {
                iter->entity = entity;
                iter->index = i++;
+
+               if (hweight32(iter->flags & (MEDIA_PAD_FL_SINK |
+                                            MEDIA_PAD_FL_SOURCE)) != 1) {
+                       ret = -EINVAL;
+                       break;
+               }
+
                if (mdev)
                        media_gobj_create(mdev, MEDIA_GRAPH_PAD,
                                          &iter->graph_obj);
        }
 
+       if (ret && mdev) {
+               media_entity_for_each_pad(entity, iter)
+                       media_gobj_destroy(&iter->graph_obj);
+       }
+
        if (mdev)
                mutex_unlock(&mdev->graph_mutex);
 
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(media_entity_pads_init);
 
index ee095bde0b68699de0806c53f545d85b10047fe2..7f65aa60938834dd9781e17d7cb44abba4df9745 100644 (file)
@@ -13,6 +13,7 @@ if MEDIA_PCI_SUPPORT
 if MEDIA_CAMERA_SUPPORT
        comment "Media capture support"
 
+source "drivers/media/pci/mgb4/Kconfig"
 source "drivers/media/pci/solo6x10/Kconfig"
 source "drivers/media/pci/sta2x11/Kconfig"
 source "drivers/media/pci/tw5864/Kconfig"
index 8bed619b713060969d1e16451377da296263746d..f18c7e15abe3e91c48db0c2c22ae0d699bbc0320 100644 (file)
@@ -32,6 +32,7 @@ obj-$(CONFIG_VIDEO_CX25821) += cx25821/
 obj-$(CONFIG_VIDEO_CX88) += cx88/
 obj-$(CONFIG_VIDEO_DT3155) += dt3155/
 obj-$(CONFIG_VIDEO_IVTV) += ivtv/
+obj-$(CONFIG_VIDEO_MGB4) += mgb4/
 obj-$(CONFIG_VIDEO_SAA7134) += saa7134/
 obj-$(CONFIG_VIDEO_SAA7164) += saa7164/
 obj-$(CONFIG_VIDEO_SOLO6X10) += solo6x10/
index ec78f7fc5e1b6c50a265a5a9360bdce7bb9077a8..867c1308de235679b18cb259eae826d51086a78f 100644 (file)
@@ -126,6 +126,7 @@ MODULE_PARM_DESC(audiodev, "specify audio device:\n"
                "\t\t 3 = tvaudio");
 MODULE_PARM_DESC(saa6588, "if 1, then load the saa6588 RDS module, default (0) is to use the card definition.");
 
+MODULE_FIRMWARE("hcwamc.rbf");
 
 /* I2C addresses list */
 #define I2C_ADDR_TDA7432       0x8a
index aa708a0e5eac6719a26128d1e53158b9ad754f69..09a193bb87df31f1849c968aafc2872fc92367e1 100644 (file)
@@ -3474,6 +3474,7 @@ static void bttv_remove(struct pci_dev *pci_dev)
 
        /* free resources */
        free_irq(btv->c.pci->irq,btv);
+       del_timer_sync(&btv->timeout);
        iounmap(btv->bt848_mmio);
        release_mem_region(pci_resource_start(btv->c.pci,0),
                           pci_resource_len(btv->c.pci,0));
index 4cb890b949c3dfee1160cad54e4c6cd7548d13b3..390cbba6c065a0477dddb21b665dca590bd9eaff 100644 (file)
@@ -190,11 +190,15 @@ static int cx24108_tuner_set_params(struct dvb_frontend *fe)
        u32 freq = c->frequency;
        int i, a, n, pump;
        u32 band, pll;
-       u32 osci[]={950000,1019000,1075000,1178000,1296000,1432000,
-               1576000,1718000,1856000,2036000,2150000};
-       u32 bandsel[]={0,0x00020000,0x00040000,0x00100800,0x00101000,
-               0x00102000,0x00104000,0x00108000,0x00110000,
-               0x00120000,0x00140000};
+       static const u32 osci[] = {
+               950000, 1019000, 1075000, 1178000, 1296000, 1432000,
+               1576000, 1718000, 1856000, 2036000, 2150000
+       };
+       static const u32 bandsel[] = {
+               0, 0x00020000, 0x00040000, 0x00100800, 0x00101000,
+               0x00102000, 0x00104000, 0x00108000, 0x00110000,
+               0x00120000, 0x00140000
+       };
 
        #define XTAL 1011100 /* Hz, really 1.0111 MHz and a /10 prescaler */
        dprintk("cx24108 debug: entering SetTunerFreq, freq=%d\n", freq);
index 74edcc76d12f403a712c308f8e790894650cc206..6e1a0614e6d0699e1f273992a8d115b967491c6f 100644 (file)
@@ -8,6 +8,7 @@
  *  All rights reserved.
  */
 
+#include <linux/bitfield.h>
 #include <linux/delay.h>
 #include <media/i2c/adv7604.h>
 #include <media/i2c/adv7842.h>
@@ -210,17 +211,17 @@ void cobalt_pcie_status_show(struct cobalt *cobalt)
        pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &stat);
        cobalt_info("PCIe link capability 0x%08x: %s per lane and %u lanes\n",
                        capa, get_link_speed(capa),
-                       (capa & PCI_EXP_LNKCAP_MLW) >> 4);
+                       FIELD_GET(PCI_EXP_LNKCAP_MLW, capa));
        cobalt_info("PCIe link control 0x%04x\n", ctrl);
        cobalt_info("PCIe link status 0x%04x: %s per lane and %u lanes\n",
                    stat, get_link_speed(stat),
-                   (stat & PCI_EXP_LNKSTA_NLW) >> 4);
+                   FIELD_GET(PCI_EXP_LNKSTA_NLW, stat));
 
        /* Bus */
        pcie_capability_read_dword(pci_bus_dev, PCI_EXP_LNKCAP, &capa);
        cobalt_info("PCIe bus link capability 0x%08x: %s per lane and %u lanes\n",
                        capa, get_link_speed(capa),
-                       (capa & PCI_EXP_LNKCAP_MLW) >> 4);
+                       FIELD_GET(PCI_EXP_LNKCAP_MLW, capa));
 
        /* Slot */
        pcie_capability_read_dword(pci_dev, PCI_EXP_SLTCAP, &capa);
@@ -239,7 +240,7 @@ static unsigned pcie_link_get_lanes(struct cobalt *cobalt)
        if (!pci_is_pcie(pci_dev))
                return 0;
        pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &link);
-       return (link & PCI_EXP_LNKSTA_NLW) >> 4;
+       return FIELD_GET(PCI_EXP_LNKSTA_NLW, link);
 }
 
 static unsigned pcie_bus_link_get_lanes(struct cobalt *cobalt)
@@ -250,7 +251,7 @@ static unsigned pcie_bus_link_get_lanes(struct cobalt *cobalt)
        if (!pci_is_pcie(pci_dev))
                return 0;
        pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &link);
-       return (link & PCI_EXP_LNKCAP_MLW) >> 4;
+       return FIELD_GET(PCI_EXP_LNKCAP_MLW, link);
 }
 
 static void msi_config_show(struct cobalt *cobalt, struct pci_dev *pci_dev)
index 4bfbcca14f60b73728ab67df9cf26aa92c4f8c05..26bf58d17a3dfe9c8de674318884597a4a839f6d 100644 (file)
@@ -107,11 +107,9 @@ static void chain_all_buffers(struct cobalt_stream *s)
 {
        struct sg_dma_desc_info *desc[NR_BUFS];
        struct cobalt_buffer *cb;
-       struct list_head *p;
        int i = 0;
 
-       list_for_each(p, &s->bufs) {
-               cb = list_entry(p, struct cobalt_buffer, list);
+       list_for_each_entry(cb, &s->bufs, list) {
                desc[i] = &s->dma_desc_info[cb->vb.vb2_buf.index];
                if (i > 0)
                        descriptor_list_chain(desc[i-1], desc[i]);
@@ -348,7 +346,6 @@ static void cobalt_dma_stop_streaming(struct cobalt_stream *s)
        struct cobalt *cobalt = s->cobalt;
        struct sg_dma_desc_info *desc;
        struct cobalt_buffer *cb;
-       struct list_head *p;
        unsigned long flags;
        int timeout_msec = 100;
        int rx = s->video_channel;
@@ -367,8 +364,7 @@ static void cobalt_dma_stop_streaming(struct cobalt_stream *s)
 
        /* Try to stop the DMA engine gracefully */
        spin_lock_irqsave(&s->irqlock, flags);
-       list_for_each(p, &s->bufs) {
-               cb = list_entry(p, struct cobalt_buffer, list);
+       list_for_each_entry(cb, &s->bufs, list) {
                desc = &s->dma_desc_info[cb->vb.vb2_buf.index];
                /* Stop DMA after this descriptor chain */
                descriptor_list_end_of_chain(desc);
index 887d2aa364470a711afdd03bbf5ef2ade3271309..af05bde758165e9c08a622c484c1c1808543052a 100644 (file)
@@ -631,7 +631,7 @@ struct cx18 {
        u32 hw2_irq_mask;
 
        struct workqueue_struct *in_work_queue;
-       char in_workq_name[11]; /* "cx18-NN-in" */
+       char in_workq_name[39]; /* "cx18-NN-in" */
        struct cx18_in_work_order in_work_order[CX18_MAX_IN_WORK_ORDERS];
        char epu_debug_str[256]; /* CX18_EPU_DEBUG is rare: use shared space */
 
index 3b283f3c672680ee947bb48222527b6d76457048..a6457c23d18c9e15b1e6793314a9c43e4df8a372 100644 (file)
@@ -831,7 +831,7 @@ int cx18_vapi(struct cx18 *cx, u32 cmd, int args, ...)
        int i;
 
        if (cx == NULL) {
-               CX18_ERR("cx == NULL (cmd=%x)\n", cmd);
+               pr_err("cx == NULL (cmd=%x)\n", cmd);
                return 0;
        }
        if (args > MAX_MB_ARGUMENTS) {
index a8cb981544f71f5cdc193b29e26f84ef3552bae0..407a800c81bc0d92bc2e2fb84bb3f2172fcc748c 100644 (file)
@@ -10,6 +10,12 @@ config INTEL_VSC
        help
          This adds support for Intel Visual Sensing Controller (IVSC).
 
-         Enables the IVSC firmware services required for controlling
-         camera sensor ownership and CSI-2 link through Image Processing
-         Unit(IPU) driver of Intel.
+         The IVSC support is split into two devices, ACE (Algorithm
+         Context Engine) and CSI (Camera Serial Interface), each of which
+         have their own drivers. The ACE is used to select the ownership
+         of the sensor between the IVSC and the host CPU while the CSI is
+         used to both select the routing destination for the data the
+         sensor transmits over the CSI-2 bus between the IVSC and the
+         host CPU and to configure the CSI-2 bus itself.
+
+         The modules will be called ivsc-ace and ivsc-csi.
index a0491f30783119a7fc04e10f6fdb592837956cad..3622271c71c8834bd21f78b3d6d5918d727a5515 100644 (file)
@@ -30,8 +30,6 @@
 #include <linux/uuid.h>
 #include <linux/workqueue.h>
 
-#define        MEI_ACE_DRIVER_NAME     "ivsc_ace"
-
 /* indicating driver message */
 #define        ACE_DRV_MSG             1
 /* indicating set command */
@@ -408,6 +406,9 @@ static int mei_ace_setup_dev_link(struct mei_ace *ace)
        if (!csi_dev) {
                ret = -EPROBE_DEFER;
                goto err;
+       } else if (!dev_fwnode(csi_dev)) {
+               ret = -EPROBE_DEFER;
+               goto err_put;
        }
 
        /* setup link between mei_ace and mei_csi */
@@ -554,14 +555,14 @@ static const struct dev_pm_ops mei_ace_pm_ops = {
                             0x9B, 0x78, 0x03, 0x61, 0x63, 0x5E, 0x24, 0x47)
 
 static const struct mei_cl_device_id mei_ace_tbl[] = {
-       { MEI_ACE_DRIVER_NAME, MEI_ACE_UUID, MEI_CL_VERSION_ANY },
+       { .uuid = MEI_ACE_UUID, .version = MEI_CL_VERSION_ANY },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(mei, mei_ace_tbl);
 
 static struct mei_cl_driver mei_ace_driver = {
        .id_table = mei_ace_tbl,
-       .name = MEI_ACE_DRIVER_NAME,
+       .name = KBUILD_MODNAME,
 
        .probe = mei_ace_probe,
        .remove = mei_ace_remove,
index 00ba611e0f68dd4a9580af6882b8e17a717b1de7..2a6b828fd8dd537e942352204f6d895ab01e3ec6 100644 (file)
@@ -30,7 +30,6 @@
 #include <media/v4l2-fwnode.h>
 #include <media/v4l2-subdev.h>
 
-#define MEI_CSI_DRIVER_NAME "ivsc_csi"
 #define MEI_CSI_ENTITY_NAME "Intel IVSC CSI"
 
 #define MEI_CSI_LINK_FREQ_400MHZ 400000000ULL
@@ -804,14 +803,14 @@ static void mei_csi_remove(struct mei_cl_device *cldev)
                             0xAF, 0x93, 0x7b, 0x44, 0x53, 0xAC, 0x29, 0xDA)
 
 static const struct mei_cl_device_id mei_csi_tbl[] = {
-       { MEI_CSI_DRIVER_NAME, MEI_CSI_UUID, MEI_CL_VERSION_ANY },
+       { .uuid = MEI_CSI_UUID, .version = MEI_CL_VERSION_ANY },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(mei, mei_csi_tbl);
 
 static struct mei_cl_driver mei_csi_driver = {
        .id_table = mei_csi_tbl,
-       .name = MEI_CSI_DRIVER_NAME,
+       .name = KBUILD_MODNAME,
 
        .probe = mei_csi_probe,
        .remove = mei_csi_remove,
diff --git a/drivers/media/pci/mgb4/Kconfig b/drivers/media/pci/mgb4/Kconfig
new file mode 100644 (file)
index 0000000..13fad15
--- /dev/null
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_MGB4
+       tristate "Digiteq Automotive MGB4 support"
+       depends on VIDEO_DEV && PCI && I2C && DMADEVICES && SPI && MTD && IIO
+       select VIDEOBUF2_DMA_SG
+       select IIO_BUFFER
+       select IIO_TRIGGERED_BUFFER
+       select I2C_XILINX
+       select SPI_XILINX
+       select MTD_SPI_NOR
+       select XILINX_XDMA
+       help
+         This is a video4linux driver for Digiteq Automotive MGB4 grabber
+         cards.
+
+         To compile this driver as a module, choose M here: the
+         module will be called mgb4.
diff --git a/drivers/media/pci/mgb4/Makefile b/drivers/media/pci/mgb4/Makefile
new file mode 100644 (file)
index 0000000..e92ead1
--- /dev/null
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+mgb4-objs      := mgb4_regs.o mgb4_core.o mgb4_vin.o mgb4_vout.o \
+               mgb4_sysfs_pci.o mgb4_sysfs_in.o mgb4_sysfs_out.o \
+               mgb4_i2c.o mgb4_cmt.o mgb4_trigger.o mgb4_dma.o
+
+obj-$(CONFIG_VIDEO_MGB4) += mgb4.o
diff --git a/drivers/media/pci/mgb4/mgb4_cmt.c b/drivers/media/pci/mgb4/mgb4_cmt.c
new file mode 100644 (file)
index 0000000..70dc78e
--- /dev/null
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ *     author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ *
+ * The CMT module configures the FPGA Clock Management Tile (CMT) registers. For
+ * different video signal frequencies (FPGA input signal frequencies), the FPGA
+ * CMT registers need to be adjusted for the FPGA to work properly. The values
+ * are precomputed based on formulas given by Xilinx in their FPGA documentation
+ * (which are in turn full of some magic values/tables...).
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include "mgb4_core.h"
+#include "mgb4_cmt.h"
+
+static const u16 cmt_vals_out[][15] = {
+       {0x1208, 0x0000, 0x171C, 0x0000, 0x1E38, 0x0000, 0x11C7, 0x0000, 0x1041, 0x01BC, 0x7C01, 0x7DE9, 0xFFFF, 0x9900, 0x8100, },
+       {0x11C7, 0x0000, 0x1619, 0x0080, 0x1C71, 0x0000, 0x130D, 0x0080, 0x0041, 0x0090, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x9000, },
+       {0x11C7, 0x0000, 0x1619, 0x0080, 0x1C71, 0x0000, 0x165A, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x9000, },
+       {0x11C7, 0x0000, 0x1619, 0x0080, 0x1C71, 0x0000, 0x1187, 0x0080, 0x1041, 0x01EE, 0x7C01, 0x7DE9, 0xFFFF, 0x9900, 0x8100, },
+       {0x1186, 0x0000, 0x1555, 0x0000, 0x1AAA, 0x0000, 0x1451, 0x0000, 0x0042, 0x0013, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x11C7, 0x0000, 0x1619, 0x0080, 0x1C71, 0x0000, 0x134E, 0x0080, 0x0041, 0x005E, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x1619, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x9000, },
+       {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x179E, 0x0000, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+       {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x179F, 0x0080, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+       {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x17DF, 0x0000, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x8800, },
+       {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x128B, 0x0080, 0x0041, 0x00DB, 0x7C01, 0x7DE9, 0xFFFF, 0x9000, 0x0100, },
+       {0x1186, 0x0000, 0x1555, 0x0000, 0x1AAA, 0x0000, 0x1820, 0x0000, 0x0083, 0x00FA, 0x7DE9, 0x7DE8, 0xFFFF, 0x0900, 0x9000, },
+       {0x1186, 0x0000, 0x1555, 0x0000, 0x1AAA, 0x0000, 0x1187, 0x0080, 0x1041, 0x01EE, 0x7C01, 0x7DE9, 0xFFFF, 0x9900, 0x8100, },
+       {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x169B, 0x0080, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1800, 0x0100, },
+       {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x171C, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x1800, },
+       {0x1186, 0x0000, 0x1555, 0x0000, 0x1AAA, 0x0000, 0x1515, 0x0080, 0x0042, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x1493, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x15D8, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1900, 0x0100, },
+       {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x124A, 0x0080, 0x0041, 0x010D, 0x7C01, 0x7DE9, 0xFFFF, 0x9000, 0x0100, },
+       {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x175D, 0x0000, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+       {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x1619, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x9000, },
+       {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x17DF, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x8800, },
+       {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x17E0, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x9000, },
+       {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x1820, 0x0000, 0x0083, 0x00FA, 0x7DE9, 0x7DE8, 0xFFFF, 0x0900, 0x9000, },
+       {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x13D0, 0x0080, 0x0042, 0x002C, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x128B, 0x0080, 0x0041, 0x00DB, 0x7C01, 0x7DE9, 0xFFFF, 0x9000, 0x0100, },
+       {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x1820, 0x0000, 0x00C3, 0x00FA, 0x7DE9, 0x7DE8, 0xFFFF, 0x0900, 0x9000, },
+       {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x134E, 0x0080, 0x0041, 0x005E, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x1515, 0x0080, 0x0042, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x175D, 0x0000, 0x00C4, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+       {0x1145, 0x0000, 0x1452, 0x0080, 0x18E3, 0x0000, 0x11C7, 0x0000, 0x1041, 0x01BC, 0x7C01, 0x7DE9, 0xFFFF, 0x9900, 0x8100, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x1209, 0x0080, 0x0041, 0x013F, 0x7C01, 0x7DE9, 0xFFFF, 0x9900, 0x1100, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x1556, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x8000, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x179F, 0x0080, 0x00C4, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+       {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x15D8, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1900, 0x0100, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x1105, 0x0080, 0x1041, 0x01E8, 0x6401, 0x65E9, 0xFFFF, 0x9800, 0x1100, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x1820, 0x0000, 0x00C4, 0x00FA, 0x7DE9, 0x7DE8, 0xFFFF, 0x0900, 0x9000, },
+       {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x1493, 0x0080, 0x0042, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x138E, 0x0000, 0x0042, 0x005E, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x17E0, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x9000, },
+       {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x165A, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x9000, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x175D, 0x0000, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+       {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x1187, 0x0080, 0x1041, 0x01EE, 0x7C01, 0x7DE9, 0xFFFF, 0x9900, 0x8100, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x175E, 0x0080, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x179E, 0x0000, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+       {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x134E, 0x0080, 0x0041, 0x005E, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x165A, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x9000, },
+       {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x16DC, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x1800, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x169A, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x9000, },
+       {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x11C7, 0x0000, 0x1041, 0x01BC, 0x7C01, 0x7DE9, 0xFFFF, 0x9900, 0x8100, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x169B, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1800, 0x0100, },
+       {0x1104, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x171D, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x1800, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x16DB, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1800, 0x0100, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x1146, 0x0080, 0x1041, 0x0184, 0x7C01, 0x7DE9, 0xFFFF, 0x9900, 0x8100, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x171C, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x1800, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x1451, 0x0000, 0x0042, 0x0013, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x171D, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x1800, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x175D, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x1452, 0x0080, 0x0042, 0x0013, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x15D8, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1900, 0x0100, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1104, 0x0000, 0x1041, 0x01E8, 0x5801, 0x59E9, 0xFFFF, 0x9900, 0x0900, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x179F, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1515, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x17DF, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x8800, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1659, 0x0000, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x9000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1555, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x8000, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x14D3, 0x0000, 0x0042, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x1820, 0x0000, 0x0083, 0x00FA, 0x7DE9, 0x7DE8, 0xFFFF, 0x0900, 0x9000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1556, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x8000, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x1187, 0x0080, 0x1041, 0x01EE, 0x7C01, 0x7DE9, 0xFFFF, 0x9900, 0x8100, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1452, 0x0080, 0x0082, 0x0013, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x169B, 0x0080, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1800, 0x0100, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x1514, 0x0000, 0x0042, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x17E0, 0x0080, 0x00C4, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x9000, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x1515, 0x0080, 0x0042, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x10C3, 0x0000, 0x128B, 0x0080, 0x1555, 0x0000, 0x16DC, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x1800, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1493, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x1800, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x15D8, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1900, 0x0100, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x171D, 0x0080, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x1800, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1618, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1900, 0x0100, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x175D, 0x0000, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x14D4, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1619, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x9000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x179E, 0x0000, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x179F, 0x0080, 0x00C3, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1515, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x13D0, 0x0080, 0x0042, 0x002C, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x169A, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x9000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x128B, 0x0080, 0x0041, 0x00DB, 0x7C01, 0x7DE9, 0xFFFF, 0x9000, 0x0100, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x169B, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1800, 0x0100, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1820, 0x0000, 0x00C3, 0x00FA, 0x7DE9, 0x7DE8, 0xFFFF, 0x0900, 0x9000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1556, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x8000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x16DB, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1800, 0x0100, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1411, 0x0080, 0x0042, 0x002C, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x171C, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x1800, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1597, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x8000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1451, 0x0000, 0x0042, 0x0013, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x171D, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x1800, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x12CC, 0x0080, 0x0041, 0x00A9, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x9000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x175D, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1452, 0x0080, 0x0042, 0x0013, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x15D8, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1900, 0x0100, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x175E, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1492, 0x0000, 0x0042, 0x0013, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x179F, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0800, 0x1000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1619, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x9000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1493, 0x0080, 0x0042, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x17DF, 0x0000, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x8800, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x130D, 0x0080, 0x0041, 0x0090, 0x7C01, 0x7DE9, 0xFFFF, 0x1100, 0x9000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x17E0, 0x0080, 0x0083, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x9000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x14D3, 0x0000, 0x0042, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x165A, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1000, 0x9000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x1820, 0x0000, 0x0083, 0x00FA, 0x7DE9, 0x7DE8, 0xFFFF, 0x0900, 0x9000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x14D4, 0x0080, 0x0042, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x0900, 0x1000, },
+       {0x1082, 0x0000, 0x11C7, 0x0000, 0x138E, 0x0000, 0x169B, 0x0080, 0x0082, 0x00FA, 0x7C01, 0x7DE9, 0xFFFF, 0x1800, 0x0100, },
+};
+
+static const u16 cmt_vals_in[][13] = {
+       {0x1082, 0x0000, 0x5104, 0x0000, 0x11C7, 0x0000, 0x1041, 0x02BC, 0x7C01, 0xFFE9, 0x9900, 0x9908, 0x8100},
+       {0x1104, 0x0000, 0x9208, 0x0000, 0x138E, 0x0000, 0x1041, 0x015E, 0x7C01, 0xFFE9, 0x0100, 0x0908, 0x1000},
+};
+
+static const u32 cmt_addrs_out[][15] = {
+       {0x420, 0x424, 0x428, 0x42C, 0x430, 0x434, 0x450, 0x454, 0x458, 0x460, 0x464, 0x468, 0x4A0, 0x538, 0x53C},
+       {0x620, 0x624, 0x628, 0x62C, 0x630, 0x634, 0x650, 0x654, 0x658, 0x660, 0x664, 0x668, 0x6A0, 0x738, 0x73C},
+};
+
+static const u32 cmt_addrs_in[][13] = {
+       {0x020, 0x024, 0x028, 0x02C, 0x050, 0x054, 0x058, 0x060, 0x064, 0x068, 0x0A0, 0x138, 0x13C},
+       {0x220, 0x224, 0x228, 0x22C, 0x250, 0x254, 0x258, 0x260, 0x264, 0x268, 0x2A0, 0x338, 0x33C},
+};
+
+static const u32 cmt_freq[] = {
+       25000, 25510, 26020, 26530, 26983, 27551, 28000, 28570,
+       29046, 29522, 30000, 30476, 30952, 31546, 32000, 32539,
+       33035, 33571, 33928, 34522, 35000, 35428, 36000, 36571,
+       36904, 37500, 38093, 38571, 39047, 39453, 40000, 40476,
+       40952, 41494, 41964, 42857, 43535, 44047, 44444, 45000,
+       45535, 46029, 46428, 46823, 47617, 48214, 48571, 49107,
+       49523, 50000, 50476, 50892, 51428, 52380, 53333, 53967,
+       54285, 55238, 55555, 55952, 57142, 58095, 58571, 59047,
+       59521, 60000, 60316, 60952, 61428, 61904, 62500, 63092,
+       63491, 64282, 65078, 65476, 66071, 66664, 67142, 67854,
+       68571, 69044, 69642, 70000, 71425, 72616, 73214, 73808,
+       74285, 75000, 75714, 76187, 76785, 77142, 78570, 80000,
+       80357, 80951, 81428, 82142, 82857, 83332, 83928, 84285,
+       85713, 87142, 87500, 88094, 88571, 89285, 90000, 90475,
+       91071, 91428, 92856, 94642,
+};
+
+static size_t freq_srch(u32 key, const u32 *array, size_t size)
+{
+       int l = 0;
+       int r = size - 1;
+       int m = 0;
+
+       while (l <= r) {
+               m = (l + r) / 2;
+               if (array[m] < key)
+                       l = m + 1;
+               else if (array[m] > key)
+                       r = m - 1;
+               else
+                       return m;
+       }
+
+       if (r < 0 || l > size - 1)
+               return m;
+       else
+               return (abs(key - array[l]) < abs(key - array[r])) ? l : r;
+}
+
+u32 mgb4_cmt_set_vout_freq(struct mgb4_vout_dev *voutdev, unsigned int freq)
+{
+       struct mgb4_regs *video = &voutdev->mgbdev->video;
+       const struct mgb4_vout_regs *regs = &voutdev->config->regs;
+       const u16 *reg_set;
+       const u32 *addr;
+       u32 config;
+       size_t i, index;
+
+       index = freq_srch(freq, cmt_freq, ARRAY_SIZE(cmt_freq));
+       addr = cmt_addrs_out[voutdev->config->id];
+       reg_set = cmt_vals_out[index];
+
+       config = mgb4_read_reg(video, regs->config);
+
+       mgb4_write_reg(video, regs->config, 0x1 | (config & ~0x3));
+
+       for (i = 0; i < ARRAY_SIZE(cmt_addrs_out[0]); i++)
+               mgb4_write_reg(&voutdev->mgbdev->cmt, addr[i], reg_set[i]);
+
+       mgb4_mask_reg(video, regs->config, 0x100, 0x100);
+       mgb4_mask_reg(video, regs->config, 0x100, 0x0);
+
+       mgb4_write_reg(video, regs->config, config & ~0x1);
+
+       return cmt_freq[index];
+}
+
+void mgb4_cmt_set_vin_freq_range(struct mgb4_vin_dev *vindev,
+                                unsigned int freq_range)
+{
+       struct mgb4_regs *video = &vindev->mgbdev->video;
+       const struct mgb4_vin_regs *regs = &vindev->config->regs;
+       const u16 *reg_set;
+       const u32 *addr;
+       u32 config;
+       size_t i;
+
+       addr = cmt_addrs_in[vindev->config->id];
+       reg_set = cmt_vals_in[freq_range];
+
+       config = mgb4_read_reg(video, regs->config);
+
+       mgb4_write_reg(video, regs->config, 0x1 | (config & ~0x3));
+
+       for (i = 0; i < ARRAY_SIZE(cmt_addrs_in[0]); i++)
+               mgb4_write_reg(&vindev->mgbdev->cmt, addr[i], reg_set[i]);
+
+       mgb4_mask_reg(video, regs->config, 0x1000, 0x1000);
+       mgb4_mask_reg(video, regs->config, 0x1000, 0x0);
+
+       mgb4_write_reg(video, regs->config, config & ~0x1);
+}
diff --git a/drivers/media/pci/mgb4/mgb4_cmt.h b/drivers/media/pci/mgb4/mgb4_cmt.h
new file mode 100644 (file)
index 0000000..b15df56
--- /dev/null
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ *     author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ */
+
+#ifndef __MGB4_CMT_H__
+#define __MGB4_CMT_H__
+
+#include "mgb4_vout.h"
+#include "mgb4_vin.h"
+
+u32 mgb4_cmt_set_vout_freq(struct mgb4_vout_dev *voutdev, unsigned int freq);
+void mgb4_cmt_set_vin_freq_range(struct mgb4_vin_dev *vindev,
+                                unsigned int freq_range);
+
+#endif
diff --git a/drivers/media/pci/mgb4/mgb4_core.c b/drivers/media/pci/mgb4/mgb4_core.c
new file mode 100644 (file)
index 0000000..3efb33f
--- /dev/null
@@ -0,0 +1,686 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This is the driver for the MGB4 video grabber card by Digiteq Automotive.
+ *
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ *     author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ *
+ * This is the main driver module. The DMA, I2C and SPI sub-drivers are
+ * initialized here and the input/output v4l2 devices are created.
+ *
+ * The mgb4 card uses different expansion modules for different video sources
+ * (GMSL and FPDL3 for now) so in probe() we detect the module type based on
+ * what we see on the I2C bus and check if it matches the FPGA bitstream (there
+ * are different bitstreams for different expansion modules). When no expansion
+ * module is present, we still let the driver initialize to allow flashing of
+ * the FPGA firmware using the SPI FLASH device. No v4l2 video devices are
+ * created in this case.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/dma/amd_xdma.h>
+#include <linux/platform_data/amd_xdma.h>
+#include <linux/spi/xilinx_spi.h>
+#include <linux/mtd/mtd.h>
+#include <linux/hwmon.h>
+#include <linux/debugfs.h>
+#include "mgb4_dma.h"
+#include "mgb4_i2c.h"
+#include "mgb4_sysfs.h"
+#include "mgb4_vout.h"
+#include "mgb4_vin.h"
+#include "mgb4_trigger.h"
+#include "mgb4_core.h"
+
+#define MGB4_USER_IRQS 16
+
+ATTRIBUTE_GROUPS(mgb4_pci);
+
+static int flashid;
+
+static struct xdma_chan_info h2c_chan_info = {
+       .dir = DMA_MEM_TO_DEV,
+};
+
+static struct xdma_chan_info c2h_chan_info = {
+       .dir = DMA_DEV_TO_MEM,
+};
+
+static struct xspi_platform_data spi_platform_data = {
+       .num_chipselect = 1,
+       .bits_per_word = 8
+};
+
+static const struct i2c_board_info extender_info = {
+       I2C_BOARD_INFO("extender", 0x21)
+};
+
+#if IS_REACHABLE(CONFIG_HWMON)
+static umode_t temp_is_visible(const void *data, enum hwmon_sensor_types type,
+                              u32 attr, int channel)
+{
+       if (type == hwmon_temp &&
+           (attr == hwmon_temp_input || attr == hwmon_temp_label))
+               return 0444;
+       else
+               return 0;
+}
+
+static int temp_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+                    int channel, long *val)
+{
+       struct mgb4_dev *mgbdev = dev_get_drvdata(dev);
+       u32 val10, raw;
+
+       if (type != hwmon_temp || attr != hwmon_temp_input)
+               return -EOPNOTSUPP;
+
+       raw = mgb4_read_reg(&mgbdev->video, 0xD0);
+       /* register value -> Celsius degrees formula given by Xilinx */
+       val10 = ((((raw >> 20) & 0xFFF) * 503975) - 1118822400) / 409600;
+       *val = val10 * 100;
+
+       return 0;
+}
+
+static int temp_read_string(struct device *dev, enum hwmon_sensor_types type,
+                           u32 attr, int channel, const char **str)
+{
+       if (type != hwmon_temp || attr != hwmon_temp_label)
+               return -EOPNOTSUPP;
+
+       *str = "FPGA Temperature";
+
+       return 0;
+}
+
+static const struct hwmon_ops temp_ops = {
+       .is_visible = temp_is_visible,
+       .read = temp_read,
+       .read_string = temp_read_string
+};
+
+static const struct hwmon_channel_info *temp_channel_info[] = {
+       HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_LABEL),
+       NULL
+};
+
+static const struct hwmon_chip_info temp_chip_info = {
+       .ops = &temp_ops,
+       .info = temp_channel_info,
+};
+#endif
+
+static int match_i2c_adap(struct device *dev, void *data)
+{
+       return i2c_verify_adapter(dev) ? 1 : 0;
+}
+
+static struct i2c_adapter *get_i2c_adap(struct platform_device *pdev)
+{
+       struct device *dev;
+
+       mutex_lock(&pdev->dev.mutex);
+       dev = device_find_child(&pdev->dev, NULL, match_i2c_adap);
+       mutex_unlock(&pdev->dev.mutex);
+
+       return dev ? to_i2c_adapter(dev) : NULL;
+}
+
+static int match_spi_adap(struct device *dev, void *data)
+{
+       return to_spi_device(dev) ? 1 : 0;
+}
+
+static struct spi_master *get_spi_adap(struct platform_device *pdev)
+{
+       struct device *dev;
+
+       mutex_lock(&pdev->dev.mutex);
+       dev = device_find_child(&pdev->dev, NULL, match_spi_adap);
+       mutex_unlock(&pdev->dev.mutex);
+
+       return dev ? container_of(dev, struct spi_master, dev) : NULL;
+}
+
+static int init_spi(struct mgb4_dev *mgbdev)
+{
+       struct resource spi_resources[] = {
+               {
+                       .start  = 0x400,
+                       .end    = 0x47f,
+                       .flags  = IORESOURCE_MEM,
+                       .name   = "io-memory",
+               },
+               {
+                       .start  = 14,
+                       .end    = 14,
+                       .flags  = IORESOURCE_IRQ,
+                       .name   = "irq",
+               },
+       };
+       struct spi_board_info spi_info = {
+               .max_speed_hz = 10000000,
+               .modalias = "m25p80",
+               .chip_select = 0,
+               .mode = SPI_MODE_3,
+       };
+       struct pci_dev *pdev = mgbdev->pdev;
+       struct device *dev = &pdev->dev;
+       struct spi_master *master;
+       struct spi_device *spi_dev;
+       u32 irq;
+       int rv, id;
+       resource_size_t mapbase = pci_resource_start(pdev, MGB4_MGB4_BAR_ID);
+
+       request_module("platform:xilinx_spi");
+
+       irq = xdma_get_user_irq(mgbdev->xdev, 14);
+       xdma_enable_user_irq(mgbdev->xdev, irq);
+
+       spi_resources[0].parent = &pdev->resource[MGB4_MGB4_BAR_ID];
+       spi_resources[0].start += mapbase;
+       spi_resources[0].end += mapbase;
+       spi_resources[1].start = irq;
+       spi_resources[1].end = irq;
+
+       id = pci_dev_id(pdev);
+       mgbdev->spi_pdev = platform_device_register_resndata(dev, "xilinx_spi",
+                                                            id, spi_resources,
+                                                            ARRAY_SIZE(spi_resources),
+                                                            &spi_platform_data,
+                                                            sizeof(spi_platform_data));
+       if (IS_ERR(mgbdev->spi_pdev)) {
+               dev_err(dev, "failed to register SPI device\n");
+               return PTR_ERR(mgbdev->spi_pdev);
+       }
+
+       master = get_spi_adap(mgbdev->spi_pdev);
+       if (!master) {
+               dev_err(dev, "failed to get SPI adapter\n");
+               rv = -EINVAL;
+               goto err_pdev;
+       }
+
+       snprintf(mgbdev->fw_part_name, sizeof(mgbdev->fw_part_name),
+                "mgb4-fw.%d", flashid);
+       mgbdev->partitions[0].name = mgbdev->fw_part_name;
+       mgbdev->partitions[0].size = 0x400000;
+       mgbdev->partitions[0].offset = 0x400000;
+       mgbdev->partitions[0].mask_flags = 0;
+
+       snprintf(mgbdev->data_part_name, sizeof(mgbdev->data_part_name),
+                "mgb4-data.%d", flashid);
+       mgbdev->partitions[1].name = mgbdev->data_part_name;
+       mgbdev->partitions[1].size = 0x10000;
+       mgbdev->partitions[1].offset = 0xFF0000;
+       mgbdev->partitions[1].mask_flags = MTD_CAP_NORFLASH;
+
+       snprintf(mgbdev->flash_name, sizeof(mgbdev->flash_name),
+                "mgb4-flash.%d", flashid);
+       mgbdev->flash_data.name = mgbdev->flash_name;
+       mgbdev->flash_data.parts = mgbdev->partitions;
+       mgbdev->flash_data.nr_parts = ARRAY_SIZE(mgbdev->partitions);
+       mgbdev->flash_data.type = "spi-nor";
+
+       spi_info.platform_data = &mgbdev->flash_data;
+
+       spi_dev = spi_new_device(master, &spi_info);
+       put_device(&master->dev);
+       if (!spi_dev) {
+               dev_err(dev, "failed to create MTD device\n");
+               rv = -EINVAL;
+               goto err_pdev;
+       }
+
+       return 0;
+
+err_pdev:
+       platform_device_unregister(mgbdev->spi_pdev);
+
+       return rv;
+}
+
+static void free_spi(struct mgb4_dev *mgbdev)
+{
+       platform_device_unregister(mgbdev->spi_pdev);
+}
+
+static int init_i2c(struct mgb4_dev *mgbdev)
+{
+       struct resource i2c_resources[] = {
+               {
+                       .start  = 0x200,
+                       .end    = 0x3ff,
+                       .flags  = IORESOURCE_MEM,
+                       .name   = "io-memory",
+               },
+               {
+                       .start  = 15,
+                       .end    = 15,
+                       .flags  = IORESOURCE_IRQ,
+                       .name   = "irq",
+               },
+       };
+       struct pci_dev *pdev = mgbdev->pdev;
+       struct device *dev = &pdev->dev;
+       char clk_name[16];
+       u32 irq;
+       int rv, id;
+       resource_size_t mapbase = pci_resource_start(pdev, MGB4_MGB4_BAR_ID);
+
+       request_module("platform:xiic-i2c");
+
+       irq = xdma_get_user_irq(mgbdev->xdev, 15);
+       xdma_enable_user_irq(mgbdev->xdev, irq);
+
+       i2c_resources[0].parent = &pdev->resource[MGB4_MGB4_BAR_ID];
+       i2c_resources[0].start += mapbase;
+       i2c_resources[0].end += mapbase;
+       i2c_resources[1].start = irq;
+       i2c_resources[1].end = irq;
+
+       id = pci_dev_id(pdev);
+
+       /* create dummy clock required by the xiic-i2c adapter */
+       snprintf(clk_name, sizeof(clk_name), "xiic-i2c.%d", id);
+       mgbdev->i2c_clk = clk_hw_register_fixed_rate(NULL, clk_name, NULL,
+                                                    0, 125000000);
+       if (IS_ERR(mgbdev->i2c_clk)) {
+               dev_err(dev, "failed to register I2C clock\n");
+               return PTR_ERR(mgbdev->i2c_clk);
+       }
+       mgbdev->i2c_cl = clkdev_hw_create(mgbdev->i2c_clk, NULL, "xiic-i2c.%d",
+                                         id);
+       if (!mgbdev->i2c_cl) {
+               dev_err(dev, "failed to register I2C clockdev\n");
+               rv = -ENOMEM;
+               goto err_clk;
+       }
+
+       mgbdev->i2c_pdev = platform_device_register_resndata(dev, "xiic-i2c",
+                                                            id, i2c_resources,
+                                                            ARRAY_SIZE(i2c_resources),
+                                                            NULL, 0);
+       if (IS_ERR(mgbdev->i2c_pdev)) {
+               dev_err(dev, "failed to register I2C device\n");
+               rv = PTR_ERR(mgbdev->i2c_pdev);
+               goto err_clkdev;
+       }
+
+       mgbdev->i2c_adap = get_i2c_adap(mgbdev->i2c_pdev);
+       if (!mgbdev->i2c_adap) {
+               dev_err(dev, "failed to get I2C adapter\n");
+               rv = -EINVAL;
+               goto err_pdev;
+       }
+
+       mutex_init(&mgbdev->i2c_lock);
+
+       return 0;
+
+err_pdev:
+       platform_device_unregister(mgbdev->i2c_pdev);
+err_clkdev:
+       clkdev_drop(mgbdev->i2c_cl);
+err_clk:
+       clk_hw_unregister(mgbdev->i2c_clk);
+
+       return rv;
+}
+
+static void free_i2c(struct mgb4_dev *mgbdev)
+{
+       put_device(&mgbdev->i2c_adap->dev);
+       platform_device_unregister(mgbdev->i2c_pdev);
+       clkdev_drop(mgbdev->i2c_cl);
+       clk_hw_unregister(mgbdev->i2c_clk);
+}
+
+static int get_serial_number(struct mgb4_dev *mgbdev)
+{
+       struct device *dev = &mgbdev->pdev->dev;
+       struct mtd_info *mtd;
+       size_t rs;
+       int rv;
+
+       mgbdev->serial_number = 0;
+
+       mtd = get_mtd_device_nm(mgbdev->data_part_name);
+       if (IS_ERR(mtd)) {
+               dev_warn(dev, "failed to get data MTD device\n");
+               return -ENOENT;
+       }
+       rv = mtd_read(mtd, 0, sizeof(mgbdev->serial_number), &rs,
+                     (u_char *)&mgbdev->serial_number);
+       put_mtd_device(mtd);
+       if (rv < 0 || rs != sizeof(mgbdev->serial_number)) {
+               dev_warn(dev, "error reading MTD device\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int get_module_version(struct mgb4_dev *mgbdev)
+{
+       struct device *dev = &mgbdev->pdev->dev;
+       struct mgb4_i2c_client extender;
+       s32 version;
+       u32 fw_version;
+       int rv;
+
+       rv = mgb4_i2c_init(&extender, mgbdev->i2c_adap, &extender_info, 8);
+       if (rv < 0) {
+               dev_err(dev, "failed to create extender I2C device\n");
+               return rv;
+       }
+       version = mgb4_i2c_read_byte(&extender, 0x00);
+       mgb4_i2c_free(&extender);
+       if (version < 0) {
+               dev_err(dev, "error reading module version\n");
+               return -EIO;
+       }
+
+       mgbdev->module_version = ~((u32)version) & 0xff;
+       if (!(MGB4_IS_FPDL3(mgbdev) || MGB4_IS_GMSL(mgbdev))) {
+               dev_err(dev, "unknown module type\n");
+               return -EINVAL;
+       }
+       fw_version = mgb4_read_reg(&mgbdev->video, 0xC4);
+       if (fw_version >> 24 != mgbdev->module_version >> 4) {
+               dev_err(dev, "module/firmware type mismatch\n");
+               return -EINVAL;
+       }
+
+       dev_info(dev, "%s module detected\n",
+                MGB4_IS_FPDL3(mgbdev) ? "FPDL3" : "GMSL");
+
+       return 0;
+}
+
+static int map_regs(struct pci_dev *pdev, struct resource *res,
+                   struct mgb4_regs *regs)
+{
+       int rv;
+       resource_size_t mapbase = pci_resource_start(pdev, MGB4_MGB4_BAR_ID);
+
+       res->start += mapbase;
+       res->end += mapbase;
+
+       rv = mgb4_regs_map(res, regs);
+       if (rv < 0) {
+               dev_err(&pdev->dev, "failed to map %s registers\n", res->name);
+               return rv;
+       }
+
+       return 0;
+}
+
+static int init_xdma(struct mgb4_dev *mgbdev)
+{
+       struct xdma_platdata data;
+       struct resource res[2] = { 0 };
+       struct dma_slave_map *map;
+       struct pci_dev *pdev = mgbdev->pdev;
+       struct device *dev = &pdev->dev;
+       int i;
+
+       res[0].start = pci_resource_start(pdev, MGB4_XDMA_BAR_ID);
+       res[0].end = pci_resource_end(pdev, MGB4_XDMA_BAR_ID);
+       res[0].flags = IORESOURCE_MEM;
+       res[0].parent = &pdev->resource[MGB4_XDMA_BAR_ID];
+       res[1].start = pci_irq_vector(pdev, 0);
+       res[1].end = res[1].start + MGB4_VIN_DEVICES + MGB4_VOUT_DEVICES
+                    + MGB4_USER_IRQS - 1;
+       res[1].flags = IORESOURCE_IRQ;
+
+       data.max_dma_channels = MGB4_VIN_DEVICES + MGB4_VOUT_DEVICES;
+       data.device_map = mgbdev->slave_map;
+       data.device_map_cnt = MGB4_VIN_DEVICES + MGB4_VOUT_DEVICES;
+
+       for (i = 0; i < MGB4_VIN_DEVICES; i++) {
+               sprintf(mgbdev->channel_names[i], "c2h%d", i);
+               map = &data.device_map[i];
+               map->slave = mgbdev->channel_names[i];
+               map->devname = dev_name(dev);
+               map->param = XDMA_FILTER_PARAM(&c2h_chan_info);
+       }
+       for (i = 0; i < MGB4_VOUT_DEVICES; i++) {
+               sprintf(mgbdev->channel_names[i + MGB4_VIN_DEVICES], "h2c%d", i);
+               map = &data.device_map[i + MGB4_VIN_DEVICES];
+               map->slave = mgbdev->channel_names[i + MGB4_VIN_DEVICES];
+               map->devname = dev_name(dev);
+               map->param = XDMA_FILTER_PARAM(&h2c_chan_info);
+       }
+
+       mgbdev->xdev = platform_device_register_resndata(dev, "xdma",
+                                                        PLATFORM_DEVID_AUTO, res,
+                                                        2, &data, sizeof(data));
+       if (IS_ERR(mgbdev->xdev)) {
+               dev_err(dev, "failed to register XDMA device\n");
+               return PTR_ERR(mgbdev->xdev);
+       }
+
+       return 0;
+}
+
+static void free_xdma(struct mgb4_dev *mgbdev)
+{
+       platform_device_unregister(mgbdev->xdev);
+}
+
+static int mgb4_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+       int i, rv;
+       struct mgb4_dev *mgbdev;
+       struct resource video = {
+               .start  = 0x0,
+               .end    = 0x100,
+               .flags  = IORESOURCE_MEM,
+               .name   = "mgb4-video",
+       };
+       struct resource cmt = {
+               .start  = 0x1000,
+               .end    = 0x1800,
+               .flags  = IORESOURCE_MEM,
+               .name   = "mgb4-cmt",
+       };
+       int irqs = pci_msix_vec_count(pdev);
+
+       mgbdev = kzalloc(sizeof(*mgbdev), GFP_KERNEL);
+       if (!mgbdev)
+               return -ENOMEM;
+
+       mgbdev->pdev = pdev;
+       pci_set_drvdata(pdev, mgbdev);
+
+       /* PCIe related stuff */
+       rv = pci_enable_device(pdev);
+       if (rv) {
+               dev_err(&pdev->dev, "error enabling PCI device\n");
+               goto err_mgbdev;
+       }
+
+       rv = pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
+       if (rv)
+               dev_warn(&pdev->dev, "error enabling PCIe relaxed ordering\n");
+       rv = pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_EXT_TAG);
+       if (rv)
+               dev_warn(&pdev->dev, "error enabling PCIe extended tag field\n");
+       rv = pcie_set_readrq(pdev, 512);
+       if (rv)
+               dev_warn(&pdev->dev, "error setting PCIe max. memory read size\n");
+       pci_set_master(pdev);
+
+       rv = pci_alloc_irq_vectors(pdev, irqs, irqs, PCI_IRQ_MSIX);
+       if (rv < 0) {
+               dev_err(&pdev->dev, "error allocating MSI-X IRQs\n");
+               goto err_enable_pci;
+       }
+
+       rv = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+       if (rv) {
+               dev_err(&pdev->dev, "error setting DMA mask\n");
+               goto err_enable_pci;
+       }
+
+       /* DMA + IRQ engine */
+       rv = init_xdma(mgbdev);
+       if (rv)
+               goto err_alloc_irq;
+       rv = mgb4_dma_channel_init(mgbdev);
+       if (rv)
+               goto err_dma_chan;
+
+       /* mgb4 video registers */
+       rv = map_regs(pdev, &video, &mgbdev->video);
+       if (rv < 0)
+               goto err_dma_chan;
+       /* mgb4 cmt registers */
+       rv = map_regs(pdev, &cmt, &mgbdev->cmt);
+       if (rv < 0)
+               goto err_video_regs;
+
+       /* SPI FLASH */
+       rv = init_spi(mgbdev);
+       if (rv < 0)
+               goto err_cmt_regs;
+
+       /* I2C controller */
+       rv = init_i2c(mgbdev);
+       if (rv < 0)
+               goto err_spi;
+
+       /* PCI card related sysfs attributes */
+       rv = device_add_groups(&pdev->dev, mgb4_pci_groups);
+       if (rv < 0)
+               goto err_i2c;
+
+#if IS_REACHABLE(CONFIG_HWMON)
+       /* HWmon (card temperature) */
+       mgbdev->hwmon_dev = hwmon_device_register_with_info(&pdev->dev, "mgb4",
+                                                           mgbdev,
+                                                           &temp_chip_info,
+                                                           NULL);
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+       mgbdev->debugfs = debugfs_create_dir(dev_name(&pdev->dev), NULL);
+#endif
+
+       /* Get card serial number. On systems without MTD flash support we may
+        * get an error thus ignore the return value. An invalid serial number
+        * should not break anything...
+        */
+       if (get_serial_number(mgbdev) < 0)
+               dev_warn(&pdev->dev, "error reading card serial number\n");
+
+       /* Get module type. If no valid module is found, skip the video device
+        * creation part but do not exit with error to allow flashing the card.
+        */
+       rv = get_module_version(mgbdev);
+       if (rv < 0)
+               goto exit;
+
+       /* Video input v4l2 devices */
+       for (i = 0; i < MGB4_VIN_DEVICES; i++)
+               mgbdev->vin[i] = mgb4_vin_create(mgbdev, i);
+
+       /* Video output v4l2 devices */
+       for (i = 0; i < MGB4_VOUT_DEVICES; i++)
+               mgbdev->vout[i] = mgb4_vout_create(mgbdev, i);
+
+       /* Triggers */
+       mgbdev->indio_dev = mgb4_trigger_create(mgbdev);
+
+exit:
+       flashid++;
+
+       return 0;
+
+err_i2c:
+       free_i2c(mgbdev);
+err_spi:
+       free_spi(mgbdev);
+err_cmt_regs:
+       mgb4_regs_free(&mgbdev->cmt);
+err_video_regs:
+       mgb4_regs_free(&mgbdev->video);
+err_dma_chan:
+       mgb4_dma_channel_free(mgbdev);
+       free_xdma(mgbdev);
+err_alloc_irq:
+       pci_disable_msix(pdev);
+err_enable_pci:
+       pci_disable_device(pdev);
+err_mgbdev:
+       kfree(mgbdev);
+
+       return rv;
+}
+
+static void mgb4_remove(struct pci_dev *pdev)
+{
+       struct mgb4_dev *mgbdev = pci_get_drvdata(pdev);
+       int i;
+
+#ifdef CONFIG_DEBUG_FS
+       debugfs_remove_recursive(mgbdev->debugfs);
+#endif
+#if IS_REACHABLE(CONFIG_HWMON)
+       hwmon_device_unregister(mgbdev->hwmon_dev);
+#endif
+
+       if (mgbdev->indio_dev)
+               mgb4_trigger_free(mgbdev->indio_dev);
+
+       for (i = 0; i < MGB4_VOUT_DEVICES; i++)
+               if (mgbdev->vout[i])
+                       mgb4_vout_free(mgbdev->vout[i]);
+       for (i = 0; i < MGB4_VIN_DEVICES; i++)
+               if (mgbdev->vin[i])
+                       mgb4_vin_free(mgbdev->vin[i]);
+
+       device_remove_groups(&mgbdev->pdev->dev, mgb4_pci_groups);
+       free_spi(mgbdev);
+       free_i2c(mgbdev);
+       mgb4_regs_free(&mgbdev->video);
+       mgb4_regs_free(&mgbdev->cmt);
+
+       mgb4_dma_channel_free(mgbdev);
+       free_xdma(mgbdev);
+
+       pci_disable_msix(mgbdev->pdev);
+       pci_disable_device(mgbdev->pdev);
+
+       kfree(mgbdev);
+}
+
+static const struct pci_device_id mgb4_pci_ids[] = {
+       { PCI_DEVICE(0x1ed8, 0x0101), },
+       { 0, }
+};
+MODULE_DEVICE_TABLE(pci, mgb4_pci_ids);
+
+static struct pci_driver mgb4_pci_driver = {
+       .name = KBUILD_MODNAME,
+       .id_table = mgb4_pci_ids,
+       .probe = mgb4_probe,
+       .remove = mgb4_remove,
+};
+
+module_pci_driver(mgb4_pci_driver);
+
+MODULE_AUTHOR("Digiteq Automotive s.r.o.");
+MODULE_DESCRIPTION("Digiteq Automotive MGB4 Driver");
+MODULE_LICENSE("GPL");
+MODULE_SOFTDEP("pre: platform:xiic-i2c platform:xilinx_spi spi-nor");
diff --git a/drivers/media/pci/mgb4/mgb4_core.h b/drivers/media/pci/mgb4/mgb4_core.h
new file mode 100644 (file)
index 0000000..2a946e4
--- /dev/null
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ *     author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ */
+
+#ifndef __MGB4_CORE_H__
+#define __MGB4_CORE_H__
+
+#include <linux/spi/flash.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mutex.h>
+#include <linux/dmaengine.h>
+#include "mgb4_regs.h"
+
+#define MGB4_VIN_DEVICES  2
+#define MGB4_VOUT_DEVICES 2
+
+#define MGB4_MGB4_BAR_ID  0
+#define MGB4_XDMA_BAR_ID  1
+
+#define MGB4_IS_GMSL(mgbdev) \
+       ((mgbdev)->module_version >> 4 == 2)
+#define MGB4_IS_FPDL3(mgbdev) \
+       ((mgbdev)->module_version >> 4 == 1)
+
+struct mgb4_dma_channel {
+       struct dma_chan *chan;
+       struct completion req_compl;
+};
+
+struct mgb4_dev {
+       struct pci_dev *pdev;
+       struct platform_device *xdev;
+       struct mgb4_vin_dev *vin[MGB4_VIN_DEVICES];
+       struct mgb4_vout_dev *vout[MGB4_VOUT_DEVICES];
+
+       struct mgb4_dma_channel c2h_chan[MGB4_VIN_DEVICES];
+       struct mgb4_dma_channel h2c_chan[MGB4_VOUT_DEVICES];
+       struct dma_slave_map slave_map[MGB4_VIN_DEVICES + MGB4_VOUT_DEVICES];
+
+       struct mgb4_regs video;
+       struct mgb4_regs cmt;
+
+       struct clk_hw *i2c_clk;
+       struct clk_lookup *i2c_cl;
+       struct platform_device *i2c_pdev;
+       struct i2c_adapter *i2c_adap;
+       struct mutex i2c_lock; /* I2C bus access lock */
+
+       struct platform_device *spi_pdev;
+       struct flash_platform_data flash_data;
+       struct mtd_partition partitions[2];
+       char flash_name[16];
+       char fw_part_name[16];
+       char data_part_name[16];
+       char channel_names[MGB4_VIN_DEVICES + MGB4_VOUT_DEVICES][16];
+
+       struct iio_dev *indio_dev;
+#if IS_REACHABLE(CONFIG_HWMON)
+       struct device *hwmon_dev;
+#endif
+
+       unsigned long io_reconfig;
+
+       u8 module_version;
+       u32 serial_number;
+
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *debugfs;
+#endif
+};
+
+#endif
diff --git a/drivers/media/pci/mgb4/mgb4_dma.c b/drivers/media/pci/mgb4/mgb4_dma.c
new file mode 100644 (file)
index 0000000..cae888e
--- /dev/null
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021-2022 Digiteq Automotive
+ *     author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ *
+ * This module handles the DMA transfers. A standard dmaengine API as provided
+ * by the XDMA module is used.
+ */
+
+#include <linux/pci.h>
+#include <linux/dma-direction.h>
+#include "mgb4_core.h"
+#include "mgb4_dma.h"
+
+static void chan_irq(void *param)
+{
+       struct mgb4_dma_channel *chan = param;
+
+       complete(&chan->req_compl);
+}
+
+int mgb4_dma_transfer(struct mgb4_dev *mgbdev, u32 channel, bool write,
+                     u64 paddr, struct sg_table *sgt)
+{
+       struct dma_slave_config cfg;
+       struct mgb4_dma_channel *chan;
+       struct dma_async_tx_descriptor *tx;
+       struct pci_dev *pdev = mgbdev->pdev;
+       int ret;
+
+       memset(&cfg, 0, sizeof(cfg));
+
+       if (write) {
+               cfg.direction = DMA_MEM_TO_DEV;
+               cfg.dst_addr = paddr;
+               cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+               chan = &mgbdev->h2c_chan[channel];
+       } else {
+               cfg.direction = DMA_DEV_TO_MEM;
+               cfg.src_addr = paddr;
+               cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+               chan = &mgbdev->c2h_chan[channel];
+       }
+
+       ret = dmaengine_slave_config(chan->chan, &cfg);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to config dma: %d\n", ret);
+               return ret;
+       }
+
+       tx = dmaengine_prep_slave_sg(chan->chan, sgt->sgl, sgt->nents,
+                                    cfg.direction, 0);
+       if (!tx) {
+               dev_err(&pdev->dev, "failed to prep slave sg\n");
+               return -EIO;
+       }
+
+       tx->callback = chan_irq;
+       tx->callback_param = chan;
+
+       ret = dma_submit_error(dmaengine_submit(tx));
+       if (ret) {
+               dev_err(&pdev->dev, "failed to submit sg\n");
+               return -EIO;
+       }
+
+       dma_async_issue_pending(chan->chan);
+
+       if (!wait_for_completion_timeout(&chan->req_compl,
+                                        msecs_to_jiffies(10000))) {
+               dev_err(&pdev->dev, "dma timeout\n");
+               dmaengine_terminate_sync(chan->chan);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+int mgb4_dma_channel_init(struct mgb4_dev *mgbdev)
+{
+       int i, ret;
+       char name[16];
+       struct pci_dev *pdev = mgbdev->pdev;
+
+       for (i = 0; i < MGB4_VIN_DEVICES; i++) {
+               sprintf(name, "c2h%d", i);
+               mgbdev->c2h_chan[i].chan = dma_request_chan(&pdev->dev, name);
+               if (IS_ERR(mgbdev->c2h_chan[i].chan)) {
+                       dev_err(&pdev->dev, "failed to initialize %s", name);
+                       ret = PTR_ERR(mgbdev->c2h_chan[i].chan);
+                       mgbdev->c2h_chan[i].chan = NULL;
+                       return ret;
+               }
+               init_completion(&mgbdev->c2h_chan[i].req_compl);
+       }
+       for (i = 0; i < MGB4_VOUT_DEVICES; i++) {
+               sprintf(name, "h2c%d", i);
+               mgbdev->h2c_chan[i].chan = dma_request_chan(&pdev->dev, name);
+               if (IS_ERR(mgbdev->h2c_chan[i].chan)) {
+                       dev_err(&pdev->dev, "failed to initialize %s", name);
+                       ret = PTR_ERR(mgbdev->h2c_chan[i].chan);
+                       mgbdev->h2c_chan[i].chan = NULL;
+                       return ret;
+               }
+               init_completion(&mgbdev->h2c_chan[i].req_compl);
+       }
+
+       return 0;
+}
+
+void mgb4_dma_channel_free(struct mgb4_dev *mgbdev)
+{
+       int i;
+
+       for (i = 0; i < MGB4_VIN_DEVICES; i++) {
+               if (mgbdev->c2h_chan[i].chan)
+                       dma_release_channel(mgbdev->c2h_chan[i].chan);
+       }
+       for (i = 0; i < MGB4_VOUT_DEVICES; i++) {
+               if (mgbdev->h2c_chan[i].chan)
+                       dma_release_channel(mgbdev->h2c_chan[i].chan);
+       }
+}
diff --git a/drivers/media/pci/mgb4/mgb4_dma.h b/drivers/media/pci/mgb4/mgb4_dma.h
new file mode 100644 (file)
index 0000000..4ebc2b1
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ *     author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ */
+
+#ifndef __MGB4_DMA_H__
+#define __MGB4_DMA_H__
+
+#include "mgb4_core.h"
+
+int mgb4_dma_channel_init(struct mgb4_dev *mgbdev);
+void mgb4_dma_channel_free(struct mgb4_dev *mgbdev);
+
+int mgb4_dma_transfer(struct mgb4_dev *mgbdev, u32 channel, bool write,
+                     u64 paddr, struct sg_table *sgt);
+
+#endif
diff --git a/drivers/media/pci/mgb4/mgb4_i2c.c b/drivers/media/pci/mgb4/mgb4_i2c.c
new file mode 100644 (file)
index 0000000..2697b67
--- /dev/null
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ *     author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ *
+ * The i2c module unifies the I2C access to the serializes/deserializes. The I2C
+ * chips on the GMSL module use 16b addressing, the FPDL3 chips use standard
+ * 8b addressing.
+ */
+
+#include "mgb4_i2c.h"
+
+static int read_r16(struct i2c_client *client, u16 reg, u8 *val, int len)
+{
+       int ret;
+       u8 buf[2];
+       struct i2c_msg msg[2] = {
+               {
+                       .addr = client->addr,
+                       .flags = 0,
+                       .len = 2,
+                       .buf = buf,
+               }, {
+                       .addr = client->addr,
+                       .flags = I2C_M_RD,
+                       .len = len,
+                       .buf = val,
+               }
+       };
+
+       buf[0] = (reg >> 8) & 0xff;
+       buf[1] = (reg >> 0) & 0xff;
+
+       ret = i2c_transfer(client->adapter, msg, 2);
+       if (ret < 0)
+               return ret;
+       else if (ret != 2)
+               return -EREMOTEIO;
+       else
+               return 0;
+}
+
+static int write_r16(struct i2c_client *client, u16 reg, const u8 *val, int len)
+{
+       int ret;
+       u8 buf[4];
+       struct i2c_msg msg[1] = {
+               {
+                       .addr = client->addr,
+                       .flags = 0,
+                       .len = 2 + len,
+                       .buf = buf,
+               }
+       };
+
+       if (2 + len > sizeof(buf))
+               return -EINVAL;
+
+       buf[0] = (reg >> 8) & 0xff;
+       buf[1] = (reg >> 0) & 0xff;
+       memcpy(&buf[2], val, len);
+
+       ret = i2c_transfer(client->adapter, msg, 1);
+       if (ret < 0)
+               return ret;
+       else if (ret != 1)
+               return -EREMOTEIO;
+       else
+               return 0;
+}
+
+int mgb4_i2c_init(struct mgb4_i2c_client *client, struct i2c_adapter *adap,
+                 struct i2c_board_info const *info, int addr_size)
+{
+       client->client = i2c_new_client_device(adap, info);
+       if (IS_ERR(client->client))
+               return PTR_ERR(client->client);
+
+       client->addr_size = addr_size;
+
+       return 0;
+}
+
+void mgb4_i2c_free(struct mgb4_i2c_client *client)
+{
+       i2c_unregister_device(client->client);
+}
+
+s32 mgb4_i2c_read_byte(struct mgb4_i2c_client *client, u16 reg)
+{
+       int ret;
+       u8 b;
+
+       if (client->addr_size == 8)
+               return i2c_smbus_read_byte_data(client->client, reg);
+
+       ret = read_r16(client->client, reg, &b, 1);
+       if (ret < 0)
+               return ret;
+
+       return (s32)b;
+}
+
+s32 mgb4_i2c_write_byte(struct mgb4_i2c_client *client, u16 reg, u8 val)
+{
+       if (client->addr_size == 8)
+               return i2c_smbus_write_byte_data(client->client, reg, val);
+       else
+               return write_r16(client->client, reg, &val, 1);
+}
+
+s32 mgb4_i2c_mask_byte(struct mgb4_i2c_client *client, u16 reg, u8 mask, u8 val)
+{
+       s32 ret;
+
+       if (mask != 0xFF) {
+               ret = mgb4_i2c_read_byte(client, reg);
+               if (ret < 0)
+                       return ret;
+               val |= (u8)ret & ~mask;
+       }
+
+       return mgb4_i2c_write_byte(client, reg, val);
+}
+
+int mgb4_i2c_configure(struct mgb4_i2c_client *client,
+                      const struct mgb4_i2c_kv *values, size_t count)
+{
+       size_t i;
+       s32 res;
+
+       for (i = 0; i < count; i++) {
+               res = mgb4_i2c_mask_byte(client, values[i].reg, values[i].mask,
+                                        values[i].val);
+               if (res < 0)
+                       return res;
+       }
+
+       return 0;
+}
diff --git a/drivers/media/pci/mgb4/mgb4_i2c.h b/drivers/media/pci/mgb4/mgb4_i2c.h
new file mode 100644 (file)
index 0000000..fac6a16
--- /dev/null
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ *     author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ */
+
+#ifndef __MGB4_I2C_H__
+#define __MGB4_I2C_H__
+
+#include <linux/i2c.h>
+
+struct mgb4_i2c_client {
+       struct i2c_client *client;
+       int addr_size;
+};
+
+struct mgb4_i2c_kv {
+       u16 reg;
+       u8 mask;
+       u8 val;
+};
+
+int mgb4_i2c_init(struct mgb4_i2c_client *client, struct i2c_adapter *adap,
+                 struct i2c_board_info const *info, int addr_size);
+void mgb4_i2c_free(struct mgb4_i2c_client *client);
+
+s32 mgb4_i2c_read_byte(struct mgb4_i2c_client *client, u16 reg);
+s32 mgb4_i2c_write_byte(struct mgb4_i2c_client *client, u16 reg, u8 val);
+s32 mgb4_i2c_mask_byte(struct mgb4_i2c_client *client, u16 reg, u8 mask,
+                      u8 val);
+
+int mgb4_i2c_configure(struct mgb4_i2c_client *client,
+                      const struct mgb4_i2c_kv *values, size_t count);
+
+#endif
diff --git a/drivers/media/pci/mgb4/mgb4_io.h b/drivers/media/pci/mgb4/mgb4_io.h
new file mode 100644 (file)
index 0000000..8698db1
--- /dev/null
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021-2022 Digiteq Automotive
+ *     author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ */
+
+#ifndef __MGB4_IO_H__
+#define __MGB4_IO_H__
+
+#include <media/v4l2-dev.h>
+
+#define MGB4_DEFAULT_WIDTH     1280
+#define MGB4_DEFAULT_HEIGHT    640
+#define MGB4_DEFAULT_PERIOD    (125000000 / 60)
+
+/* Register access error indication */
+#define MGB4_ERR_NO_REG        0xFFFFFFFE
+/* Frame buffer addresses greater than 0xFFFFFFFA indicate HW errors */
+#define MGB4_ERR_QUEUE_TIMEOUT 0xFFFFFFFD
+#define MGB4_ERR_QUEUE_EMPTY   0xFFFFFFFC
+#define MGB4_ERR_QUEUE_FULL    0xFFFFFFFB
+
+struct mgb4_frame_buffer {
+       struct vb2_v4l2_buffer vb;
+       struct list_head list;
+};
+
+static inline struct mgb4_frame_buffer *to_frame_buffer(struct vb2_v4l2_buffer *vbuf)
+{
+       return container_of(vbuf, struct mgb4_frame_buffer, vb);
+}
+
+#endif
diff --git a/drivers/media/pci/mgb4/mgb4_regs.c b/drivers/media/pci/mgb4/mgb4_regs.c
new file mode 100644 (file)
index 0000000..53d4e45
--- /dev/null
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021-2022 Digiteq Automotive
+ *     author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ */
+
+#include <linux/ioport.h>
+#include "mgb4_regs.h"
+
+int mgb4_regs_map(struct resource *res, struct mgb4_regs *regs)
+{
+       regs->mapbase = res->start;
+       regs->mapsize = res->end - res->start;
+
+       if (!request_mem_region(regs->mapbase, regs->mapsize, res->name))
+               return -EINVAL;
+       regs->membase = ioremap(regs->mapbase, regs->mapsize);
+       if (!regs->membase) {
+               release_mem_region(regs->mapbase, regs->mapsize);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+void mgb4_regs_free(struct mgb4_regs *regs)
+{
+       iounmap(regs->membase);
+       release_mem_region(regs->mapbase, regs->mapsize);
+}
diff --git a/drivers/media/pci/mgb4/mgb4_regs.h b/drivers/media/pci/mgb4/mgb4_regs.h
new file mode 100644 (file)
index 0000000..c451808
--- /dev/null
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021-2022 Digiteq Automotive
+ *     author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ */
+
+#ifndef __MGB4_REGS_H__
+#define __MGB4_REGS_H__
+
+#include <linux/io.h>
+
+struct mgb4_regs {
+       resource_size_t mapbase;
+       resource_size_t mapsize;
+       void __iomem *membase;
+};
+
+#define mgb4_write_reg(regs, offset, val) \
+       iowrite32(val, (regs)->membase + (offset))
+#define  mgb4_read_reg(regs, offset) \
+       ioread32((regs)->membase + (offset))
+
+static inline void mgb4_mask_reg(struct mgb4_regs *regs, u32 reg, u32 mask,
+                                u32 val)
+{
+       u32 ret = mgb4_read_reg(regs, reg);
+
+       val |= ret & ~mask;
+       mgb4_write_reg(regs, reg, val);
+}
+
+int mgb4_regs_map(struct resource *res, struct mgb4_regs *regs);
+void mgb4_regs_free(struct mgb4_regs *regs);
+
+#endif
diff --git a/drivers/media/pci/mgb4/mgb4_sysfs.h b/drivers/media/pci/mgb4/mgb4_sysfs.h
new file mode 100644 (file)
index 0000000..017d82c
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021-2022 Digiteq Automotive
+ *     author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ */
+
+#ifndef __MGB4_SYSFS_H__
+#define __MGB4_SYSFS_H__
+
+#include <linux/sysfs.h>
+
+extern struct attribute *mgb4_pci_attrs[];
+extern struct attribute *mgb4_fpdl3_in_attrs[];
+extern struct attribute *mgb4_gmsl_in_attrs[];
+extern struct attribute *mgb4_fpdl3_out_attrs[];
+extern struct attribute *mgb4_gmsl_out_attrs[];
+
+#endif
diff --git a/drivers/media/pci/mgb4/mgb4_sysfs_in.c b/drivers/media/pci/mgb4/mgb4_sysfs_in.c
new file mode 100644 (file)
index 0000000..0ba66a2
--- /dev/null
@@ -0,0 +1,772 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ *     author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ *
+ * This module handles all the sysfs info/configuration that is related to the
+ * v4l2 input devices.
+ */
+
+#include <linux/device.h>
+#include "mgb4_core.h"
+#include "mgb4_i2c.h"
+#include "mgb4_vin.h"
+#include "mgb4_cmt.h"
+#include "mgb4_sysfs.h"
+
+/* Common for both FPDL3 and GMSL */
+
+static ssize_t input_id_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+
+       return sprintf(buf, "%d\n", vindev->config->id);
+}
+
+static ssize_t oldi_lane_width_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       struct mgb4_dev *mgbdev = vindev->mgbdev;
+       u16 i2c_reg;
+       u8 i2c_mask, i2c_single_val, i2c_dual_val;
+       u32 config;
+       int ret;
+
+       i2c_reg = MGB4_IS_GMSL(mgbdev) ? 0x1CE : 0x49;
+       i2c_mask = MGB4_IS_GMSL(mgbdev) ? 0x0E : 0x03;
+       i2c_single_val = MGB4_IS_GMSL(mgbdev) ? 0x00 : 0x02;
+       i2c_dual_val = MGB4_IS_GMSL(mgbdev) ? 0x0E : 0x00;
+
+       mutex_lock(&mgbdev->i2c_lock);
+       ret = mgb4_i2c_read_byte(&vindev->deser, i2c_reg);
+       mutex_unlock(&mgbdev->i2c_lock);
+       if (ret < 0)
+               return -EIO;
+
+       config = mgb4_read_reg(&mgbdev->video, vindev->config->regs.config);
+
+       if (((config & (1U << 9)) && ((ret & i2c_mask) != i2c_dual_val)) ||
+           (!(config & (1U << 9)) && ((ret & i2c_mask) != i2c_single_val))) {
+               dev_err(dev, "I2C/FPGA register value mismatch\n");
+               return -EINVAL;
+       }
+
+       return sprintf(buf, "%s\n", config & (1U << 9) ? "1" : "0");
+}
+
+/*
+ * OLDI lane width change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t oldi_lane_width_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t count)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       struct mgb4_dev *mgbdev = vindev->mgbdev;
+       u32 fpga_data;
+       u16 i2c_reg;
+       u8 i2c_mask, i2c_data;
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+
+       switch (val) {
+       case 0: /* single */
+               fpga_data = 0;
+               i2c_data = MGB4_IS_GMSL(mgbdev) ? 0x00 : 0x02;
+               break;
+       case 1: /* dual */
+               fpga_data = 1U << 9;
+               i2c_data = MGB4_IS_GMSL(mgbdev) ? 0x0E : 0x00;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       i2c_reg = MGB4_IS_GMSL(mgbdev) ? 0x1CE : 0x49;
+       i2c_mask = MGB4_IS_GMSL(mgbdev) ? 0x0E : 0x03;
+
+       mutex_lock(&mgbdev->i2c_lock);
+       ret = mgb4_i2c_mask_byte(&vindev->deser, i2c_reg, i2c_mask, i2c_data);
+       mutex_unlock(&mgbdev->i2c_lock);
+       if (ret < 0)
+               return -EIO;
+       mgb4_mask_reg(&mgbdev->video, vindev->config->regs.config, 1U << 9,
+                     fpga_data);
+       if (MGB4_IS_GMSL(mgbdev)) {
+               /* reset input link */
+               mutex_lock(&mgbdev->i2c_lock);
+               ret = mgb4_i2c_mask_byte(&vindev->deser, 0x10, 1U << 5, 1U << 5);
+               mutex_unlock(&mgbdev->i2c_lock);
+               if (ret < 0)
+                       return -EIO;
+       }
+
+       return count;
+}
+
+static ssize_t color_mapping_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       u32 config = mgb4_read_reg(&vindev->mgbdev->video,
+         vindev->config->regs.config);
+
+       return sprintf(buf, "%s\n", config & (1U << 8) ? "0" : "1");
+}
+
+/*
+ * Color mapping change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t color_mapping_store(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t count)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       u32 fpga_data;
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+
+       switch (val) {
+       case 0: /* OLDI/JEIDA */
+               fpga_data = (1U << 8);
+               break;
+       case 1: /* SPWG/VESA */
+               fpga_data = 0;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       mgb4_mask_reg(&vindev->mgbdev->video, vindev->config->regs.config,
+                     1U << 8, fpga_data);
+
+       return count;
+}
+
+static ssize_t link_status_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       u32 status = mgb4_read_reg(&vindev->mgbdev->video,
+                                  vindev->config->regs.status);
+
+       return sprintf(buf, "%s\n", status & (1U << 2) ? "1" : "0");
+}
+
+static ssize_t stream_status_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       u32 status = mgb4_read_reg(&vindev->mgbdev->video,
+                                  vindev->config->regs.status);
+
+       return sprintf(buf, "%s\n", ((status & (1 << 14)) &&
+                      (status & (1 << 2)) && (status & (3 << 9))) ? "1" : "0");
+}
+
+static ssize_t video_width_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       u32 config = mgb4_read_reg(&vindev->mgbdev->video,
+         vindev->config->regs.resolution);
+
+       return sprintf(buf, "%u\n", config >> 16);
+}
+
+static ssize_t video_height_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       u32 config = mgb4_read_reg(&vindev->mgbdev->video,
+         vindev->config->regs.resolution);
+
+       return sprintf(buf, "%u\n", config & 0xFFFF);
+}
+
+static ssize_t hsync_status_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       u32 status = mgb4_read_reg(&vindev->mgbdev->video,
+                                  vindev->config->regs.status);
+       u32 res;
+
+       if (!(status & (1U << 11)))
+               res = 0x02; // not available
+       else if (status & (1U << 12))
+               res = 0x01; // active high
+       else
+               res = 0x00; // active low
+
+       return sprintf(buf, "%u\n", res);
+}
+
+static ssize_t vsync_status_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       u32 status = mgb4_read_reg(&vindev->mgbdev->video,
+                                  vindev->config->regs.status);
+       u32 res;
+
+       if (!(status & (1U << 11)))
+               res = 0x02; // not available
+       else if (status & (1U << 13))
+               res = 0x01; // active high
+       else
+               res = 0x00; // active low
+
+       return sprintf(buf, "%u\n", res);
+}
+
+static ssize_t hsync_gap_length_show(struct device *dev,
+                                    struct device_attribute *attr,
+                                    char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       u32 sync = mgb4_read_reg(&vindev->mgbdev->video,
+                                vindev->config->regs.sync);
+
+       return sprintf(buf, "%u\n", sync >> 16);
+}
+
+/*
+ * HSYNC gap length change is expected to be called on live streams. Video
+ * device locking/queue check is not needed.
+ */
+static ssize_t hsync_gap_length_store(struct device *dev,
+                                     struct device_attribute *attr,
+                                     const char *buf, size_t count)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+       if (val > 0xFFFF)
+               return -EINVAL;
+
+       mgb4_mask_reg(&vindev->mgbdev->video, vindev->config->regs.sync,
+                     0xFFFF0000, val << 16);
+
+       return count;
+}
+
+static ssize_t vsync_gap_length_show(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       u32 sync = mgb4_read_reg(&vindev->mgbdev->video,
+                                vindev->config->regs.sync);
+
+       return sprintf(buf, "%u\n", sync & 0xFFFF);
+}
+
+/*
+ * VSYNC gap length change is expected to be called on live streams. Video
+ * device locking/queue check is not needed.
+ */
+static ssize_t vsync_gap_length_store(struct device *dev,
+                                     struct device_attribute *attr,
+                                     const char *buf, size_t count)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+       if (val > 0xFFFF)
+               return -EINVAL;
+
+       mgb4_mask_reg(&vindev->mgbdev->video, vindev->config->regs.sync, 0xFFFF,
+                     val);
+
+       return count;
+}
+
+static ssize_t pclk_frequency_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       u32 freq = mgb4_read_reg(&vindev->mgbdev->video,
+                                vindev->config->regs.pclk);
+
+       return sprintf(buf, "%u\n", freq);
+}
+
+static ssize_t hsync_width_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
+                               vindev->config->regs.signal);
+
+       return sprintf(buf, "%u\n", (sig & 0x00FF0000) >> 16);
+}
+
+static ssize_t vsync_width_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
+                               vindev->config->regs.signal2);
+
+       return sprintf(buf, "%u\n", (sig & 0x00FF0000) >> 16);
+}
+
+static ssize_t hback_porch_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
+                               vindev->config->regs.signal);
+
+       return sprintf(buf, "%u\n", (sig & 0x0000FF00) >> 8);
+}
+
+static ssize_t hfront_porch_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
+                               vindev->config->regs.signal);
+
+       return sprintf(buf, "%u\n", (sig & 0x000000FF));
+}
+
+static ssize_t vback_porch_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
+                               vindev->config->regs.signal2);
+
+       return sprintf(buf, "%u\n", (sig & 0x0000FF00) >> 8);
+}
+
+static ssize_t vfront_porch_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       u32 sig = mgb4_read_reg(&vindev->mgbdev->video,
+                               vindev->config->regs.signal2);
+
+       return sprintf(buf, "%u\n", (sig & 0x000000FF));
+}
+
+static ssize_t frequency_range_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+
+       return sprintf(buf, "%d\n", vindev->freq_range);
+}
+
+static ssize_t frequency_range_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t count)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+       if (val > 1)
+               return -EINVAL;
+
+       mutex_lock(vindev->vdev.lock);
+       if (vb2_is_busy(vindev->vdev.queue)) {
+               mutex_unlock(vindev->vdev.lock);
+               return -EBUSY;
+       }
+
+       mgb4_cmt_set_vin_freq_range(vindev, val);
+       vindev->freq_range = val;
+
+       mutex_unlock(vindev->vdev.lock);
+
+       return count;
+}
+
+/* FPDL3 only */
+
+static ssize_t fpdl3_input_width_show(struct device *dev,
+                                     struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       s32 ret;
+
+       mutex_lock(&vindev->mgbdev->i2c_lock);
+       ret = mgb4_i2c_read_byte(&vindev->deser, 0x34);
+       mutex_unlock(&vindev->mgbdev->i2c_lock);
+       if (ret < 0)
+               return -EIO;
+
+       switch ((u8)ret & 0x18) {
+       case 0:
+               return sprintf(buf, "0\n");
+       case 0x10:
+               return sprintf(buf, "1\n");
+       case 0x08:
+               return sprintf(buf, "2\n");
+       default:
+               return -EINVAL;
+       }
+}
+
+/*
+ * FPD-Link width change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t fpdl3_input_width_store(struct device *dev,
+                                      struct device_attribute *attr,
+                                      const char *buf, size_t count)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       u8 i2c_data;
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+
+       switch (val) {
+       case 0: /* auto */
+               i2c_data = 0x00;
+               break;
+       case 1: /* single */
+               i2c_data = 0x10;
+               break;
+       case 2: /* dual */
+               i2c_data = 0x08;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       mutex_lock(&vindev->mgbdev->i2c_lock);
+       ret = mgb4_i2c_mask_byte(&vindev->deser, 0x34, 0x18, i2c_data);
+       mutex_unlock(&vindev->mgbdev->i2c_lock);
+       if (ret < 0)
+               return -EIO;
+
+       return count;
+}
+
+/* GMSL only */
+
+static ssize_t gmsl_mode_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       s32 r1, r300, r3;
+
+       mutex_lock(&vindev->mgbdev->i2c_lock);
+       r1 = mgb4_i2c_read_byte(&vindev->deser, 0x01);
+       r300 = mgb4_i2c_read_byte(&vindev->deser, 0x300);
+       r3 = mgb4_i2c_read_byte(&vindev->deser, 0x03);
+       mutex_unlock(&vindev->mgbdev->i2c_lock);
+       if (r1 < 0 || r300 < 0 || r3 < 0)
+               return -EIO;
+
+       if ((r1 & 0x03) == 0x03 && (r300 & 0x0C) == 0x0C && (r3 & 0xC0) == 0xC0)
+               return sprintf(buf, "0\n");
+       else if ((r1 & 0x03) == 0x02 && (r300 & 0x0C) == 0x08 && (r3 & 0xC0) == 0x00)
+               return sprintf(buf, "1\n");
+       else if ((r1 & 0x03) == 0x01 && (r300 & 0x0C) == 0x04 && (r3 & 0xC0) == 0x00)
+               return sprintf(buf, "2\n");
+       else if ((r1 & 0x03) == 0x00 && (r300 & 0x0C) == 0x00 && (r3 & 0xC0) == 0x00)
+               return sprintf(buf, "3\n");
+       else
+               return -EINVAL;
+}
+
+/*
+ * GMSL mode change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t gmsl_mode_store(struct device *dev,
+                              struct device_attribute *attr, const char *buf,
+                              size_t count)
+{
+       static const struct mgb4_i2c_kv G12[] = {
+               {0x01, 0x03, 0x03}, {0x300, 0x0C, 0x0C}, {0x03, 0xC0, 0xC0}};
+       static const struct mgb4_i2c_kv G6[] = {
+               {0x01, 0x03, 0x02}, {0x300, 0x0C, 0x08}, {0x03, 0xC0, 0x00}};
+       static const struct mgb4_i2c_kv G3[] = {
+               {0x01, 0x03, 0x01}, {0x300, 0x0C, 0x04}, {0x03, 0xC0, 0x00}};
+       static const struct mgb4_i2c_kv G1[] = {
+               {0x01, 0x03, 0x00}, {0x300, 0x0C, 0x00}, {0x03, 0xC0, 0x00}};
+       static const struct mgb4_i2c_kv reset[] = {
+               {0x10, 1U << 5, 1U << 5}, {0x300, 1U << 6, 1U << 6}};
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       const struct mgb4_i2c_kv *values;
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+
+       switch (val) {
+       case 0: /* 12Gb/s */
+               values = G12;
+               break;
+       case 1: /* 6Gb/s */
+               values = G6;
+               break;
+       case 2: /* 3Gb/s */
+               values = G3;
+               break;
+       case 3: /* 1.5Gb/s */
+               values = G1;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       mutex_lock(&vindev->mgbdev->i2c_lock);
+       ret = mgb4_i2c_configure(&vindev->deser, values, 3);
+       ret |= mgb4_i2c_configure(&vindev->deser, reset, 2);
+       mutex_unlock(&vindev->mgbdev->i2c_lock);
+       if (ret < 0)
+               return -EIO;
+
+       return count;
+}
+
+static ssize_t gmsl_stream_id_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       s32 ret;
+
+       mutex_lock(&vindev->mgbdev->i2c_lock);
+       ret = mgb4_i2c_read_byte(&vindev->deser, 0xA0);
+       mutex_unlock(&vindev->mgbdev->i2c_lock);
+       if (ret < 0)
+               return -EIO;
+
+       return sprintf(buf, "%d\n", ret & 0x03);
+}
+
+static ssize_t gmsl_stream_id_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t count)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+       if (val > 3)
+               return -EINVAL;
+
+       mutex_lock(vindev->vdev.lock);
+       if (vb2_is_busy(vindev->vdev.queue)) {
+               mutex_unlock(vindev->vdev.lock);
+               return -EBUSY;
+       }
+
+       mutex_lock(&vindev->mgbdev->i2c_lock);
+       ret = mgb4_i2c_mask_byte(&vindev->deser, 0xA0, 0x03, (u8)val);
+       mutex_unlock(&vindev->mgbdev->i2c_lock);
+
+       mutex_unlock(vindev->vdev.lock);
+
+       return (ret < 0) ? -EIO : count;
+}
+
+static ssize_t gmsl_fec_show(struct device *dev, struct device_attribute *attr,
+                            char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       s32 r3e0, r308;
+
+       mutex_lock(&vindev->mgbdev->i2c_lock);
+       r3e0 = mgb4_i2c_read_byte(&vindev->deser, 0x3E0);
+       r308 = mgb4_i2c_read_byte(&vindev->deser, 0x308);
+       mutex_unlock(&vindev->mgbdev->i2c_lock);
+       if (r3e0 < 0 || r308 < 0)
+               return -EIO;
+
+       if ((r3e0 & 0x07) == 0x00 && (r308 & 0x01) == 0x00)
+               return sprintf(buf, "0\n");
+       else if ((r3e0 & 0x07) == 0x07 && (r308 & 0x01) == 0x01)
+               return sprintf(buf, "1\n");
+       else
+               return -EINVAL;
+}
+
+/*
+ * GMSL FEC change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t gmsl_fec_store(struct device *dev, struct device_attribute *attr,
+                             const char *buf, size_t count)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vin_dev *vindev = video_get_drvdata(vdev);
+       static const struct mgb4_i2c_kv enable[] = {
+               {0x3E0, 0x07, 0x07}, {0x308, 0x01, 0x01}};
+       static const struct mgb4_i2c_kv disable[] = {
+               {0x3E0, 0x07, 0x00}, {0x308, 0x01, 0x00}};
+       static const struct mgb4_i2c_kv reset[] = {
+               {0x10, 1U << 5, 1U << 5}, {0x300, 1U << 6, 1U << 6}};
+       const struct mgb4_i2c_kv *values;
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+
+       switch (val) {
+       case 0: /* disabled */
+               values = disable;
+               break;
+       case 1: /* enabled */
+               values = enable;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       mutex_lock(&vindev->mgbdev->i2c_lock);
+       ret = mgb4_i2c_configure(&vindev->deser, values, 2);
+       ret |= mgb4_i2c_configure(&vindev->deser, reset, 2);
+       mutex_unlock(&vindev->mgbdev->i2c_lock);
+       if (ret < 0)
+               return -EIO;
+
+       return count;
+}
+
+static DEVICE_ATTR_RO(input_id);
+static DEVICE_ATTR_RW(oldi_lane_width);
+static DEVICE_ATTR_RW(color_mapping);
+static DEVICE_ATTR_RO(link_status);
+static DEVICE_ATTR_RO(stream_status);
+static DEVICE_ATTR_RO(video_width);
+static DEVICE_ATTR_RO(video_height);
+static DEVICE_ATTR_RO(hsync_status);
+static DEVICE_ATTR_RO(vsync_status);
+static DEVICE_ATTR_RW(hsync_gap_length);
+static DEVICE_ATTR_RW(vsync_gap_length);
+static DEVICE_ATTR_RO(pclk_frequency);
+static DEVICE_ATTR_RO(hsync_width);
+static DEVICE_ATTR_RO(vsync_width);
+static DEVICE_ATTR_RO(hback_porch);
+static DEVICE_ATTR_RO(hfront_porch);
+static DEVICE_ATTR_RO(vback_porch);
+static DEVICE_ATTR_RO(vfront_porch);
+static DEVICE_ATTR_RW(frequency_range);
+
+static DEVICE_ATTR_RW(fpdl3_input_width);
+
+static DEVICE_ATTR_RW(gmsl_mode);
+static DEVICE_ATTR_RW(gmsl_stream_id);
+static DEVICE_ATTR_RW(gmsl_fec);
+
+struct attribute *mgb4_fpdl3_in_attrs[] = {
+       &dev_attr_input_id.attr,
+       &dev_attr_link_status.attr,
+       &dev_attr_stream_status.attr,
+       &dev_attr_video_width.attr,
+       &dev_attr_video_height.attr,
+       &dev_attr_hsync_status.attr,
+       &dev_attr_vsync_status.attr,
+       &dev_attr_oldi_lane_width.attr,
+       &dev_attr_color_mapping.attr,
+       &dev_attr_hsync_gap_length.attr,
+       &dev_attr_vsync_gap_length.attr,
+       &dev_attr_pclk_frequency.attr,
+       &dev_attr_hsync_width.attr,
+       &dev_attr_vsync_width.attr,
+       &dev_attr_hback_porch.attr,
+       &dev_attr_hfront_porch.attr,
+       &dev_attr_vback_porch.attr,
+       &dev_attr_vfront_porch.attr,
+       &dev_attr_frequency_range.attr,
+       &dev_attr_fpdl3_input_width.attr,
+       NULL
+};
+
+struct attribute *mgb4_gmsl_in_attrs[] = {
+       &dev_attr_input_id.attr,
+       &dev_attr_link_status.attr,
+       &dev_attr_stream_status.attr,
+       &dev_attr_video_width.attr,
+       &dev_attr_video_height.attr,
+       &dev_attr_hsync_status.attr,
+       &dev_attr_vsync_status.attr,
+       &dev_attr_oldi_lane_width.attr,
+       &dev_attr_color_mapping.attr,
+       &dev_attr_hsync_gap_length.attr,
+       &dev_attr_vsync_gap_length.attr,
+       &dev_attr_pclk_frequency.attr,
+       &dev_attr_hsync_width.attr,
+       &dev_attr_vsync_width.attr,
+       &dev_attr_hback_porch.attr,
+       &dev_attr_hfront_porch.attr,
+       &dev_attr_vback_porch.attr,
+       &dev_attr_vfront_porch.attr,
+       &dev_attr_frequency_range.attr,
+       &dev_attr_gmsl_mode.attr,
+       &dev_attr_gmsl_stream_id.attr,
+       &dev_attr_gmsl_fec.attr,
+       NULL
+};
diff --git a/drivers/media/pci/mgb4/mgb4_sysfs_out.c b/drivers/media/pci/mgb4/mgb4_sysfs_out.c
new file mode 100644 (file)
index 0000000..9f6e81c
--- /dev/null
@@ -0,0 +1,740 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ *     author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ *
+ * This module handles all the sysfs info/configuration that is related to the
+ * v4l2 output devices.
+ */
+
+#include <linux/device.h>
+#include <linux/nospec.h>
+#include "mgb4_core.h"
+#include "mgb4_i2c.h"
+#include "mgb4_vout.h"
+#include "mgb4_vin.h"
+#include "mgb4_cmt.h"
+#include "mgb4_sysfs.h"
+
+static int loopin_cnt(struct mgb4_vin_dev *vindev)
+{
+       struct mgb4_vout_dev *voutdev;
+       u32 config;
+       int i, cnt = 0;
+
+       for (i = 0; i < MGB4_VOUT_DEVICES; i++) {
+               voutdev = vindev->mgbdev->vout[i];
+               if (!voutdev)
+                       continue;
+
+               config = mgb4_read_reg(&voutdev->mgbdev->video,
+                                      voutdev->config->regs.config);
+               if ((config & 0xc) >> 2 == vindev->config->id)
+                       cnt++;
+       }
+
+       return cnt;
+}
+
+static bool is_busy(struct video_device *dev)
+{
+       bool ret;
+
+       mutex_lock(dev->lock);
+       ret = vb2_is_busy(dev->queue);
+       mutex_unlock(dev->lock);
+
+       return ret;
+}
+
+/* Common for both FPDL3 and GMSL */
+
+static ssize_t output_id_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+
+       return sprintf(buf, "%d\n", voutdev->config->id);
+}
+
+static ssize_t video_source_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       u32 config = mgb4_read_reg(&voutdev->mgbdev->video,
+         voutdev->config->regs.config);
+
+       return sprintf(buf, "%u\n", (config & 0xc) >> 2);
+}
+
+/*
+ * Video source change may affect the buffer queue of ANY video input/output on
+ * the card thus if any of the inputs/outputs is in use, we do not allow
+ * the change.
+ *
+ * As we do not want to lock all the video devices at the same time, a two-stage
+ * locking strategy is used. In addition to the video device locking there is
+ * a global (PCI device) variable "io_reconfig" atomically checked/set when
+ * the reconfiguration is running. All the video devices check the variable in
+ * their queue_setup() functions and do not allow to start the queue when
+ * the reconfiguration has started.
+ */
+static ssize_t video_source_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t count)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       struct mgb4_dev *mgbdev = voutdev->mgbdev;
+       struct mgb4_vin_dev *loopin_new = NULL, *loopin_old = NULL;
+       unsigned long val;
+       ssize_t ret;
+       u32 config;
+       int i;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+       if (val > 3)
+               return -EINVAL;
+
+       if (test_and_set_bit(0, &mgbdev->io_reconfig))
+               return -EBUSY;
+
+       ret = -EBUSY;
+       for (i = 0; i < MGB4_VIN_DEVICES; i++)
+               if (mgbdev->vin[i] && is_busy(&mgbdev->vin[i]->vdev))
+                       goto end;
+       for (i = 0; i < MGB4_VOUT_DEVICES; i++)
+               if (mgbdev->vout[i] && is_busy(&mgbdev->vout[i]->vdev))
+                       goto end;
+
+       config = mgb4_read_reg(&mgbdev->video, voutdev->config->regs.config);
+
+       if (((config & 0xc) >> 2) < MGB4_VIN_DEVICES)
+               loopin_old = mgbdev->vin[(config & 0xc) >> 2];
+       if (val < MGB4_VIN_DEVICES) {
+               val = array_index_nospec(val, MGB4_VIN_DEVICES);
+               loopin_new = mgbdev->vin[val];
+       }
+       if (loopin_old && loopin_cnt(loopin_old) == 1)
+               mgb4_mask_reg(&mgbdev->video, loopin_old->config->regs.config,
+                             0x2, 0x0);
+       if (loopin_new)
+               mgb4_mask_reg(&mgbdev->video, loopin_new->config->regs.config,
+                             0x2, 0x2);
+
+       if (val == voutdev->config->id + MGB4_VIN_DEVICES)
+               mgb4_write_reg(&mgbdev->video, voutdev->config->regs.config,
+                              config & ~(1 << 1));
+       else
+               mgb4_write_reg(&mgbdev->video, voutdev->config->regs.config,
+                              config | (1U << 1));
+
+       mgb4_mask_reg(&mgbdev->video, voutdev->config->regs.config, 0xc,
+                     val << 2);
+
+       ret = count;
+end:
+       clear_bit(0, &mgbdev->io_reconfig);
+
+       return ret;
+}
+
+static ssize_t display_width_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       u32 config = mgb4_read_reg(&voutdev->mgbdev->video,
+         voutdev->config->regs.resolution);
+
+       return sprintf(buf, "%u\n", config >> 16);
+}
+
+static ssize_t display_width_store(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t count)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+       if (val > 0xFFFF)
+               return -EINVAL;
+
+       mutex_lock(voutdev->vdev.lock);
+       if (vb2_is_busy(voutdev->vdev.queue)) {
+               mutex_unlock(voutdev->vdev.lock);
+               return -EBUSY;
+       }
+
+       mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.resolution,
+                     0xFFFF0000, val << 16);
+
+       mutex_unlock(voutdev->vdev.lock);
+
+       return count;
+}
+
+static ssize_t display_height_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       u32 config = mgb4_read_reg(&voutdev->mgbdev->video,
+         voutdev->config->regs.resolution);
+
+       return sprintf(buf, "%u\n", config & 0xFFFF);
+}
+
+static ssize_t display_height_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t count)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+       if (val > 0xFFFF)
+               return -EINVAL;
+
+       mutex_lock(voutdev->vdev.lock);
+       if (vb2_is_busy(voutdev->vdev.queue)) {
+               mutex_unlock(voutdev->vdev.lock);
+               return -EBUSY;
+       }
+
+       mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.resolution,
+                     0xFFFF, val);
+
+       mutex_unlock(voutdev->vdev.lock);
+
+       return count;
+}
+
+static ssize_t frame_rate_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       u32 period = mgb4_read_reg(&voutdev->mgbdev->video,
+                                  voutdev->config->regs.frame_period);
+
+       return sprintf(buf, "%u\n", 125000000 / period);
+}
+
+/*
+ * Frame rate change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t frame_rate_store(struct device *dev,
+                               struct device_attribute *attr, const char *buf,
+                               size_t count)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+
+       mgb4_write_reg(&voutdev->mgbdev->video,
+                      voutdev->config->regs.frame_period, 125000000 / val);
+
+       return count;
+}
+
+static ssize_t hsync_width_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       u32 sig = mgb4_read_reg(&voutdev->mgbdev->video,
+                               voutdev->config->regs.hsync);
+
+       return sprintf(buf, "%u\n", (sig & 0x00FF0000) >> 16);
+}
+
+/*
+ * HSYNC width change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t hsync_width_store(struct device *dev,
+                                struct device_attribute *attr, const char *buf,
+                                size_t count)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+       if (val > 0xFF)
+               return -EINVAL;
+
+       mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.hsync,
+                     0x00FF0000, val << 16);
+
+       return count;
+}
+
+static ssize_t vsync_width_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       u32 sig = mgb4_read_reg(&voutdev->mgbdev->video,
+                               voutdev->config->regs.vsync);
+
+       return sprintf(buf, "%u\n", (sig & 0x00FF0000) >> 16);
+}
+
+/*
+ * VSYNC vidth change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t vsync_width_store(struct device *dev,
+                                struct device_attribute *attr, const char *buf,
+                                size_t count)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+       if (val > 0xFF)
+               return -EINVAL;
+
+       mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.vsync,
+                     0x00FF0000, val << 16);
+
+       return count;
+}
+
+static ssize_t hback_porch_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       u32 sig = mgb4_read_reg(&voutdev->mgbdev->video,
+                               voutdev->config->regs.hsync);
+
+       return sprintf(buf, "%u\n", (sig & 0x0000FF00) >> 8);
+}
+
+/*
+ * hback porch change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t hback_porch_store(struct device *dev,
+                                struct device_attribute *attr, const char *buf,
+                                size_t count)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+       if (val > 0xFF)
+               return -EINVAL;
+
+       mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.hsync,
+                     0x0000FF00, val << 8);
+
+       return count;
+}
+
+static ssize_t vback_porch_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       u32 sig = mgb4_read_reg(&voutdev->mgbdev->video,
+                               voutdev->config->regs.vsync);
+
+       return sprintf(buf, "%u\n", (sig & 0x0000FF00) >> 8);
+}
+
+/*
+ * vback porch change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t vback_porch_store(struct device *dev,
+                                struct device_attribute *attr, const char *buf,
+                                size_t count)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+       if (val > 0xFF)
+               return -EINVAL;
+
+       mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.vsync,
+                     0x0000FF00, val << 8);
+
+       return count;
+}
+
+static ssize_t hfront_porch_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       u32 sig = mgb4_read_reg(&voutdev->mgbdev->video,
+                               voutdev->config->regs.hsync);
+
+       return sprintf(buf, "%u\n", (sig & 0x000000FF));
+}
+
+/*
+ * hfront porch change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t hfront_porch_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t count)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+       if (val > 0xFF)
+               return -EINVAL;
+
+       mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.hsync,
+                     0x000000FF, val);
+
+       return count;
+}
+
+static ssize_t vfront_porch_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       u32 sig = mgb4_read_reg(&voutdev->mgbdev->video,
+                               voutdev->config->regs.vsync);
+
+       return sprintf(buf, "%u\n", (sig & 0x000000FF));
+}
+
+/*
+ * vfront porch change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t vfront_porch_store(struct device *dev,
+                                 struct device_attribute *attr, const char *buf,
+                                 size_t count)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+       if (val > 0xFF)
+               return -EINVAL;
+
+       mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.vsync,
+                     0x000000FF, val);
+
+       return count;
+}
+
+/* FPDL3 only */
+
+static ssize_t hsync_polarity_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       u32 config = mgb4_read_reg(&voutdev->mgbdev->video,
+         voutdev->config->regs.hsync);
+
+       return sprintf(buf, "%u\n", (config & (1U << 31)) >> 31);
+}
+
+/*
+ * HSYNC polarity change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t hsync_polarity_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t count)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+       if (val > 1)
+               return -EINVAL;
+
+       mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.hsync,
+                     (1U << 31), val << 31);
+
+       return count;
+}
+
+static ssize_t vsync_polarity_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       u32 config = mgb4_read_reg(&voutdev->mgbdev->video,
+         voutdev->config->regs.vsync);
+
+       return sprintf(buf, "%u\n", (config & (1U << 31)) >> 31);
+}
+
+/*
+ * VSYNC polarity change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t vsync_polarity_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t count)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+       if (val > 1)
+               return -EINVAL;
+
+       mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.vsync,
+                     (1U << 31), val << 31);
+
+       return count;
+}
+
+static ssize_t de_polarity_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       u32 config = mgb4_read_reg(&voutdev->mgbdev->video,
+         voutdev->config->regs.vsync);
+
+       return sprintf(buf, "%u\n", (config & (1U << 30)) >> 30);
+}
+
+/*
+ * DE polarity change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t de_polarity_store(struct device *dev,
+                                struct device_attribute *attr, const char *buf,
+                                size_t count)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+       if (val > 1)
+               return -EINVAL;
+
+       mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.vsync,
+                     (1U << 30), val << 30);
+
+       return count;
+}
+
+static ssize_t fpdl3_output_width_show(struct device *dev,
+                                      struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       s32 ret;
+
+       mutex_lock(&voutdev->mgbdev->i2c_lock);
+       ret = mgb4_i2c_read_byte(&voutdev->ser, 0x5B);
+       mutex_unlock(&voutdev->mgbdev->i2c_lock);
+       if (ret < 0)
+               return -EIO;
+
+       switch ((u8)ret & 0x03) {
+       case 0:
+               return sprintf(buf, "0\n");
+       case 1:
+               return sprintf(buf, "1\n");
+       case 3:
+               return sprintf(buf, "2\n");
+       default:
+               return -EINVAL;
+       }
+}
+
+/*
+ * FPD-Link width change is expected to be called on live streams. Video device
+ * locking/queue check is not needed.
+ */
+static ssize_t fpdl3_output_width_store(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t count)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       u8 i2c_data;
+       unsigned long val;
+       int ret;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+
+       switch (val) {
+       case 0: /* auto */
+               i2c_data = 0x00;
+               break;
+       case 1: /* single */
+               i2c_data = 0x01;
+               break;
+       case 2: /* dual */
+               i2c_data = 0x03;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       mutex_lock(&voutdev->mgbdev->i2c_lock);
+       ret = mgb4_i2c_mask_byte(&voutdev->ser, 0x5B, 0x03, i2c_data);
+       mutex_unlock(&voutdev->mgbdev->i2c_lock);
+       if (ret < 0)
+               return -EIO;
+
+       return count;
+}
+
+static ssize_t pclk_frequency_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+
+       return sprintf(buf, "%u\n", voutdev->freq);
+}
+
+static ssize_t pclk_frequency_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t count)
+{
+       struct video_device *vdev = to_video_device(dev);
+       struct mgb4_vout_dev *voutdev = video_get_drvdata(vdev);
+       unsigned long val;
+       int ret;
+       unsigned int dp;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+
+       mutex_lock(voutdev->vdev.lock);
+       if (vb2_is_busy(voutdev->vdev.queue)) {
+               mutex_unlock(voutdev->vdev.lock);
+               return -EBUSY;
+       }
+
+       dp = (val > 50000) ? 1 : 0;
+       voutdev->freq = mgb4_cmt_set_vout_freq(voutdev, val >> dp) << dp;
+
+       mgb4_mask_reg(&voutdev->mgbdev->video, voutdev->config->regs.config,
+                     0x10, dp << 4);
+       mutex_lock(&voutdev->mgbdev->i2c_lock);
+       ret = mgb4_i2c_mask_byte(&voutdev->ser, 0x4F, 1 << 6, ((~dp) & 1) << 6);
+       mutex_unlock(&voutdev->mgbdev->i2c_lock);
+
+       mutex_unlock(voutdev->vdev.lock);
+
+       return (ret < 0) ? -EIO : count;
+}
+
+static DEVICE_ATTR_RO(output_id);
+static DEVICE_ATTR_RW(video_source);
+static DEVICE_ATTR_RW(display_width);
+static DEVICE_ATTR_RW(display_height);
+static DEVICE_ATTR_RW(frame_rate);
+static DEVICE_ATTR_RW(hsync_polarity);
+static DEVICE_ATTR_RW(vsync_polarity);
+static DEVICE_ATTR_RW(de_polarity);
+static DEVICE_ATTR_RW(pclk_frequency);
+static DEVICE_ATTR_RW(hsync_width);
+static DEVICE_ATTR_RW(vsync_width);
+static DEVICE_ATTR_RW(hback_porch);
+static DEVICE_ATTR_RW(hfront_porch);
+static DEVICE_ATTR_RW(vback_porch);
+static DEVICE_ATTR_RW(vfront_porch);
+
+static DEVICE_ATTR_RW(fpdl3_output_width);
+
+struct attribute *mgb4_fpdl3_out_attrs[] = {
+       &dev_attr_output_id.attr,
+       &dev_attr_video_source.attr,
+       &dev_attr_display_width.attr,
+       &dev_attr_display_height.attr,
+       &dev_attr_frame_rate.attr,
+       &dev_attr_hsync_polarity.attr,
+       &dev_attr_vsync_polarity.attr,
+       &dev_attr_de_polarity.attr,
+       &dev_attr_pclk_frequency.attr,
+       &dev_attr_hsync_width.attr,
+       &dev_attr_vsync_width.attr,
+       &dev_attr_hback_porch.attr,
+       &dev_attr_hfront_porch.attr,
+       &dev_attr_vback_porch.attr,
+       &dev_attr_vfront_porch.attr,
+       &dev_attr_fpdl3_output_width.attr,
+       NULL
+};
+
+struct attribute *mgb4_gmsl_out_attrs[] = {
+       &dev_attr_output_id.attr,
+       &dev_attr_video_source.attr,
+       &dev_attr_display_width.attr,
+       &dev_attr_display_height.attr,
+       &dev_attr_frame_rate.attr,
+       NULL
+};
diff --git a/drivers/media/pci/mgb4/mgb4_sysfs_pci.c b/drivers/media/pci/mgb4/mgb4_sysfs_pci.c
new file mode 100644 (file)
index 0000000..d26935f
--- /dev/null
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021-2022 Digiteq Automotive
+ *     author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ *
+ * This module handles all the sysfs info/configuration that is related to the
+ * PCI card device.
+ */
+
+#include <linux/device.h>
+#include "mgb4_core.h"
+#include "mgb4_sysfs.h"
+
+static ssize_t module_version_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct mgb4_dev *mgbdev = dev_get_drvdata(dev);
+
+       return sprintf(buf, "%u\n", mgbdev->module_version & 0x0F);
+}
+
+static ssize_t module_type_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct mgb4_dev *mgbdev = dev_get_drvdata(dev);
+
+       return sprintf(buf, "%u\n", mgbdev->module_version >> 4);
+}
+
+static ssize_t fw_version_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct mgb4_dev *mgbdev = dev_get_drvdata(dev);
+       u32 config = mgb4_read_reg(&mgbdev->video, 0xC4);
+
+       return sprintf(buf, "%u\n", config & 0xFFFF);
+}
+
+static ssize_t fw_type_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       struct mgb4_dev *mgbdev = dev_get_drvdata(dev);
+       u32 config = mgb4_read_reg(&mgbdev->video, 0xC4);
+
+       return sprintf(buf, "%u\n", config >> 24);
+}
+
+static ssize_t serial_number_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct mgb4_dev *mgbdev = dev_get_drvdata(dev);
+       u32 sn = mgbdev->serial_number;
+
+       return sprintf(buf, "%03d-%03d-%03d-%03d\n", sn >> 24, (sn >> 16) & 0xFF,
+         (sn >> 8) & 0xFF, sn & 0xFF);
+}
+
+static DEVICE_ATTR_RO(module_version);
+static DEVICE_ATTR_RO(module_type);
+static DEVICE_ATTR_RO(fw_version);
+static DEVICE_ATTR_RO(fw_type);
+static DEVICE_ATTR_RO(serial_number);
+
+struct attribute *mgb4_pci_attrs[] = {
+       &dev_attr_module_type.attr,
+       &dev_attr_module_version.attr,
+       &dev_attr_fw_type.attr,
+       &dev_attr_fw_version.attr,
+       &dev_attr_serial_number.attr,
+       NULL
+};
diff --git a/drivers/media/pci/mgb4/mgb4_trigger.c b/drivers/media/pci/mgb4/mgb4_trigger.c
new file mode 100644 (file)
index 0000000..923650d
--- /dev/null
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ *     author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ *
+ * This module handles the IIO trigger device. The card has two signal inputs
+ * for event triggers that can be used to record events related to the video
+ * stream. A standard linux IIO device with triggered buffer capability is
+ * created and configured that can be used to fetch the events with the same
+ * clock source as the video frames.
+ */
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/pci.h>
+#include <linux/dma/amd_xdma.h>
+#include "mgb4_core.h"
+#include "mgb4_trigger.h"
+
+struct trigger_data {
+       struct mgb4_dev *mgbdev;
+       struct iio_trigger *trig;
+};
+
+static int trigger_read_raw(struct iio_dev *indio_dev,
+                           struct iio_chan_spec const *chan, int *val,
+                           int *val2, long mask)
+{
+       struct trigger_data *st = iio_priv(indio_dev);
+
+       switch (mask) {
+       case IIO_CHAN_INFO_RAW:
+               if (iio_buffer_enabled(indio_dev))
+                       return -EBUSY;
+               *val = mgb4_read_reg(&st->mgbdev->video, 0xA0);
+
+               return IIO_VAL_INT;
+       }
+
+       return -EINVAL;
+}
+
+static int trigger_set_state(struct iio_trigger *trig, bool state)
+{
+       struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+       struct trigger_data *st = iio_priv(indio_dev);
+       int irq = xdma_get_user_irq(st->mgbdev->xdev, 11);
+
+       if (state)
+               xdma_enable_user_irq(st->mgbdev->xdev, irq);
+       else
+               xdma_disable_user_irq(st->mgbdev->xdev, irq);
+
+       return 0;
+}
+
+static const struct iio_trigger_ops trigger_ops = {
+       .set_trigger_state = &trigger_set_state,
+};
+
+static const struct iio_info trigger_info = {
+       .read_raw         = trigger_read_raw,
+};
+
+#define TRIGGER_CHANNEL(_si) {                    \
+       .type = IIO_ACTIVITY,                         \
+       .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+       .scan_index = _si,                            \
+       .scan_type = {                                \
+               .sign = 'u',                              \
+               .realbits = 32,                           \
+               .storagebits = 32,                        \
+               .shift = 0,                               \
+               .endianness = IIO_CPU                     \
+       },                                            \
+}
+
+static const struct iio_chan_spec trigger_channels[] = {
+       TRIGGER_CHANNEL(0),
+       IIO_CHAN_SOFT_TIMESTAMP(1),
+};
+
+static irqreturn_t trigger_handler(int irq, void *p)
+{
+       struct iio_poll_func *pf = p;
+       struct iio_dev *indio_dev = pf->indio_dev;
+       struct trigger_data *st = iio_priv(indio_dev);
+       struct {
+               u32 data;
+               s64 ts __aligned(8);
+       } scan;
+
+       scan.data = mgb4_read_reg(&st->mgbdev->video, 0xA0);
+       mgb4_write_reg(&st->mgbdev->video, 0xA0, scan.data);
+
+       iio_push_to_buffers_with_timestamp(indio_dev, &scan, pf->timestamp);
+       iio_trigger_notify_done(indio_dev->trig);
+
+       mgb4_write_reg(&st->mgbdev->video, 0xB4, 1U << 11);
+
+       return IRQ_HANDLED;
+}
+
+static int probe_trigger(struct iio_dev *indio_dev, int irq)
+{
+       int ret;
+       struct trigger_data *st = iio_priv(indio_dev);
+
+       st->trig = iio_trigger_alloc(&st->mgbdev->pdev->dev, "%s-dev%d",
+                                    indio_dev->name, iio_device_id(indio_dev));
+       if (!st->trig)
+               return -ENOMEM;
+
+       ret = request_irq(irq, &iio_trigger_generic_data_rdy_poll, 0,
+                         "mgb4-trigger", st->trig);
+       if (ret)
+               goto error_free_trig;
+
+       st->trig->ops = &trigger_ops;
+       iio_trigger_set_drvdata(st->trig, indio_dev);
+       ret = iio_trigger_register(st->trig);
+       if (ret)
+               goto error_free_irq;
+
+       indio_dev->trig = iio_trigger_get(st->trig);
+
+       return 0;
+
+error_free_irq:
+       free_irq(irq, st->trig);
+error_free_trig:
+       iio_trigger_free(st->trig);
+
+       return ret;
+}
+
+static void remove_trigger(struct iio_dev *indio_dev, int irq)
+{
+       struct trigger_data *st = iio_priv(indio_dev);
+
+       iio_trigger_unregister(st->trig);
+       free_irq(irq, st->trig);
+       iio_trigger_free(st->trig);
+}
+
+struct iio_dev *mgb4_trigger_create(struct mgb4_dev *mgbdev)
+{
+       struct iio_dev *indio_dev;
+       struct trigger_data *data;
+       struct pci_dev *pdev = mgbdev->pdev;
+       struct device *dev = &pdev->dev;
+       int rv, irq;
+
+       indio_dev = iio_device_alloc(dev, sizeof(*data));
+       if (!indio_dev)
+               return NULL;
+
+       indio_dev->info = &trigger_info;
+       indio_dev->name = "mgb4";
+       indio_dev->modes = INDIO_DIRECT_MODE;
+       indio_dev->channels = trigger_channels;
+       indio_dev->num_channels = ARRAY_SIZE(trigger_channels);
+
+       data = iio_priv(indio_dev);
+       data->mgbdev = mgbdev;
+
+       irq = xdma_get_user_irq(mgbdev->xdev, 11);
+       rv = probe_trigger(indio_dev, irq);
+       if (rv < 0) {
+               dev_err(dev, "iio triggered setup failed\n");
+               goto error_alloc;
+       }
+       rv = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+                                       trigger_handler, NULL);
+       if (rv < 0) {
+               dev_err(dev, "iio triggered buffer setup failed\n");
+               goto error_trigger;
+       }
+       rv = iio_device_register(indio_dev);
+       if (rv < 0) {
+               dev_err(dev, "iio device register failed\n");
+               goto error_buffer;
+       }
+
+       return indio_dev;
+
+error_buffer:
+       iio_triggered_buffer_cleanup(indio_dev);
+error_trigger:
+       remove_trigger(indio_dev, irq);
+error_alloc:
+       iio_device_free(indio_dev);
+
+       return NULL;
+}
+
+void mgb4_trigger_free(struct iio_dev *indio_dev)
+{
+       struct trigger_data *st = iio_priv(indio_dev);
+
+       iio_device_unregister(indio_dev);
+       iio_triggered_buffer_cleanup(indio_dev);
+       remove_trigger(indio_dev, xdma_get_user_irq(st->mgbdev->xdev, 11));
+       iio_device_free(indio_dev);
+}
diff --git a/drivers/media/pci/mgb4/mgb4_trigger.h b/drivers/media/pci/mgb4/mgb4_trigger.h
new file mode 100644 (file)
index 0000000..6c25bc4
--- /dev/null
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021-2022 Digiteq Automotive
+ *     author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ */
+
+struct iio_dev *mgb4_trigger_create(struct mgb4_dev *mgbdev);
+void mgb4_trigger_free(struct iio_dev *indio_dev);
diff --git a/drivers/media/pci/mgb4/mgb4_vin.c b/drivers/media/pci/mgb4/mgb4_vin.c
new file mode 100644 (file)
index 0000000..d72b07b
--- /dev/null
@@ -0,0 +1,939 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ *     author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ *
+ * This is the v4l2 input device module. It initializes the signal deserializers
+ * and creates the v4l2 video devices. The input signal can change at any time
+ * which is handled by the "timings" callbacks and an IRQ based watcher, that
+ * emits the V4L2_EVENT_SOURCE_CHANGE event in case of a signal source change.
+ *
+ * When the device is in loopback mode (a direct, in HW, in->out frame passing
+ * mode) the card's frame queue must be running regardless of whether a v4l2
+ * stream is running and the output parameters like frame buffers padding must
+ * be in sync with the input parameters.
+ */
+
+#include <linux/pci.h>
+#include <linux/workqueue.h>
+#include <linux/align.h>
+#include <linux/dma/amd_xdma.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-sg.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-event.h>
+#include "mgb4_core.h"
+#include "mgb4_dma.h"
+#include "mgb4_sysfs.h"
+#include "mgb4_io.h"
+#include "mgb4_vout.h"
+#include "mgb4_vin.h"
+
+ATTRIBUTE_GROUPS(mgb4_fpdl3_in);
+ATTRIBUTE_GROUPS(mgb4_gmsl_in);
+
+static const struct mgb4_vin_config vin_cfg[] = {
+       {0, 0, 0, 6, {0x10, 0x00, 0x04, 0x08, 0x1C, 0x14, 0x18, 0x20, 0x24, 0x28}},
+       {1, 1, 1, 7, {0x40, 0x30, 0x34, 0x38, 0x4C, 0x44, 0x48, 0x50, 0x54, 0x58}}
+};
+
+static const struct i2c_board_info fpdl3_deser_info[] = {
+       {I2C_BOARD_INFO("deserializer1", 0x38)},
+       {I2C_BOARD_INFO("deserializer2", 0x36)},
+};
+
+static const struct i2c_board_info gmsl_deser_info[] = {
+       {I2C_BOARD_INFO("deserializer1", 0x4C)},
+       {I2C_BOARD_INFO("deserializer2", 0x2A)},
+};
+
+static const struct mgb4_i2c_kv fpdl3_i2c[] = {
+       {0x06, 0xFF, 0x04}, {0x07, 0xFF, 0x01}, {0x45, 0xFF, 0xE8},
+       {0x49, 0xFF, 0x00}, {0x34, 0xFF, 0x00}, {0x23, 0xFF, 0x00}
+};
+
+static const struct mgb4_i2c_kv gmsl_i2c[] = {
+       {0x01, 0x03, 0x03}, {0x300, 0x0C, 0x0C}, {0x03, 0xC0, 0xC0},
+       {0x1CE, 0x0E, 0x0E}, {0x11, 0x05, 0x00}, {0x05, 0xC0, 0x40},
+       {0x307, 0x0F, 0x00}, {0xA0, 0x03, 0x00}, {0x3E0, 0x07, 0x07},
+       {0x308, 0x01, 0x01}, {0x10, 0x20, 0x20}, {0x300, 0x40, 0x40}
+};
+
+static const struct v4l2_dv_timings_cap video_timings_cap = {
+       .type = V4L2_DV_BT_656_1120,
+       .bt = {
+               .min_width = 320,
+               .max_width = 4096,
+               .min_height = 240,
+               .max_height = 2160,
+               .min_pixelclock = 1843200, /* 320 x 240 x 24Hz */
+               .max_pixelclock = 530841600, /* 4096 x 2160 x 60Hz */
+               .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+                       V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
+               .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
+                       V4L2_DV_BT_CAP_CUSTOM,
+       },
+};
+
+/*
+ * Returns the video output connected with the given video input if the input
+ * is in loopback mode.
+ */
+static struct mgb4_vout_dev *loopback_dev(struct mgb4_vin_dev *vindev, int i)
+{
+       struct mgb4_vout_dev *voutdev;
+       u32 config;
+
+       voutdev = vindev->mgbdev->vout[i];
+       if (!voutdev)
+               return NULL;
+
+       config = mgb4_read_reg(&voutdev->mgbdev->video,
+                              voutdev->config->regs.config);
+       if ((config & 0xc) >> 2 == vindev->config->id)
+               return voutdev;
+
+       return NULL;
+}
+
+/*
+ * Check, whether the loopback mode - a HW INPUT->OUTPUT transmission - is
+ * enabled on the given input.
+ */
+static int loopback_active(struct mgb4_vin_dev *vindev)
+{
+       int i;
+
+       for (i = 0; i < MGB4_VOUT_DEVICES; i++)
+               if (loopback_dev(vindev, i))
+                       return 1;
+
+       return 0;
+}
+
+/*
+ * Set the output frame buffer padding of all outputs connected with the given
+ * input when the video input is set to loopback mode. The paddings must be
+ * the same for the loopback to work properly.
+ */
+static void set_loopback_padding(struct mgb4_vin_dev *vindev, u32 padding)
+{
+       struct mgb4_regs *video = &vindev->mgbdev->video;
+       struct mgb4_vout_dev *voutdev;
+       int i;
+
+       for (i = 0; i < MGB4_VOUT_DEVICES; i++) {
+               voutdev = loopback_dev(vindev, i);
+               if (voutdev)
+                       mgb4_write_reg(video, voutdev->config->regs.padding,
+                                      padding);
+       }
+}
+
+static int get_timings(struct mgb4_vin_dev *vindev,
+                      struct v4l2_dv_timings *timings)
+{
+       struct mgb4_regs *video = &vindev->mgbdev->video;
+       const struct mgb4_vin_regs *regs = &vindev->config->regs;
+
+       u32 status = mgb4_read_reg(video, regs->status);
+       u32 pclk = mgb4_read_reg(video, regs->pclk);
+       u32 signal = mgb4_read_reg(video, regs->signal);
+       u32 signal2 = mgb4_read_reg(video, regs->signal2);
+       u32 resolution = mgb4_read_reg(video, regs->resolution);
+
+       if (!(status & (1U << 2)))
+               return -ENOLCK;
+       if (!(status & (3 << 9)))
+               return -ENOLINK;
+
+       memset(timings, 0, sizeof(*timings));
+       timings->type = V4L2_DV_BT_656_1120;
+       timings->bt.width = resolution >> 16;
+       timings->bt.height = resolution & 0xFFFF;
+       if (status & (1U << 12))
+               timings->bt.polarities |= V4L2_DV_HSYNC_POS_POL;
+       if (status & (1U << 13))
+               timings->bt.polarities |= V4L2_DV_VSYNC_POS_POL;
+       timings->bt.pixelclock = pclk * 1000;
+       timings->bt.hsync = (signal & 0x00FF0000) >> 16;
+       timings->bt.vsync = (signal2 & 0x00FF0000) >> 16;
+       timings->bt.hbackporch = (signal & 0x0000FF00) >> 8;
+       timings->bt.hfrontporch = signal & 0x000000FF;
+       timings->bt.vbackporch = (signal2 & 0x0000FF00) >> 8;
+       timings->bt.vfrontporch = signal2 & 0x000000FF;
+
+       return 0;
+}
+
+static void return_all_buffers(struct mgb4_vin_dev *vindev,
+                              enum vb2_buffer_state state)
+{
+       struct mgb4_frame_buffer *buf, *node;
+       unsigned long flags;
+
+       spin_lock_irqsave(&vindev->qlock, flags);
+       list_for_each_entry_safe(buf, node, &vindev->buf_list, list) {
+               vb2_buffer_done(&buf->vb.vb2_buf, state);
+               list_del(&buf->list);
+       }
+       spin_unlock_irqrestore(&vindev->qlock, flags);
+}
+
+static int queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
+                      unsigned int *nplanes, unsigned int sizes[],
+                      struct device *alloc_devs[])
+{
+       struct mgb4_vin_dev *vindev = vb2_get_drv_priv(q);
+       unsigned int size = (vindev->timings.bt.width + vindev->padding)
+        * vindev->timings.bt.height * 4;
+
+       /*
+        * If I/O reconfiguration is in process, do not allow to start
+        * the queue. See video_source_store() in mgb4_sysfs_out.c for
+        * details.
+        */
+       if (test_bit(0, &vindev->mgbdev->io_reconfig))
+               return -EBUSY;
+
+       if (!size)
+               return -EINVAL;
+       if (*nplanes)
+               return sizes[0] < size ? -EINVAL : 0;
+       *nplanes = 1;
+       sizes[0] = size;
+
+       return 0;
+}
+
+static int buffer_init(struct vb2_buffer *vb)
+{
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct mgb4_frame_buffer *buf = to_frame_buffer(vbuf);
+
+       INIT_LIST_HEAD(&buf->list);
+
+       return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+       struct mgb4_vin_dev *vindev = vb2_get_drv_priv(vb->vb2_queue);
+       struct device *dev = &vindev->mgbdev->pdev->dev;
+       unsigned int size = (vindev->timings.bt.width + vindev->padding)
+        * vindev->timings.bt.height * 4;
+
+       if (vb2_plane_size(vb, 0) < size) {
+               dev_err(dev, "buffer too small (%lu < %u)\n",
+                       vb2_plane_size(vb, 0), size);
+               return -EINVAL;
+       }
+
+       vb2_set_plane_payload(vb, 0, size);
+
+       return 0;
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+       struct mgb4_vin_dev *vindev = vb2_get_drv_priv(vb->vb2_queue);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct mgb4_frame_buffer *buf = to_frame_buffer(vbuf);
+       unsigned long flags;
+
+       spin_lock_irqsave(&vindev->qlock, flags);
+       list_add_tail(&buf->list, &vindev->buf_list);
+       spin_unlock_irqrestore(&vindev->qlock, flags);
+}
+
+static void stop_streaming(struct vb2_queue *vq)
+{
+       struct mgb4_vin_dev *vindev = vb2_get_drv_priv(vq);
+       const struct mgb4_vin_config *config = vindev->config;
+       int irq = xdma_get_user_irq(vindev->mgbdev->xdev, config->vin_irq);
+
+       xdma_disable_user_irq(vindev->mgbdev->xdev, irq);
+
+       /*
+        * In loopback mode, the HW frame queue must be left running for
+        * the IN->OUT transmission to work!
+        */
+       if (!loopback_active(vindev))
+               mgb4_mask_reg(&vindev->mgbdev->video, config->regs.config, 0x2,
+                             0x0);
+
+       cancel_work_sync(&vindev->dma_work);
+       return_all_buffers(vindev, VB2_BUF_STATE_ERROR);
+}
+
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+       struct mgb4_vin_dev *vindev = vb2_get_drv_priv(vq);
+       const struct mgb4_vin_config *config = vindev->config;
+       int irq = xdma_get_user_irq(vindev->mgbdev->xdev, config->vin_irq);
+
+       vindev->sequence = 0;
+
+       /*
+        * In loopback mode, the HW frame queue is already running.
+        */
+       if (!loopback_active(vindev))
+               mgb4_mask_reg(&vindev->mgbdev->video, config->regs.config, 0x2,
+                             0x2);
+
+       xdma_enable_user_irq(vindev->mgbdev->xdev, irq);
+
+       return 0;
+}
+
+static const struct vb2_ops queue_ops = {
+       .queue_setup = queue_setup,
+       .buf_init = buffer_init,
+       .buf_prepare = buffer_prepare,
+       .buf_queue = buffer_queue,
+       .start_streaming = start_streaming,
+       .stop_streaming = stop_streaming,
+       .wait_prepare = vb2_ops_wait_prepare,
+       .wait_finish = vb2_ops_wait_finish
+};
+
+static int fh_open(struct file *file)
+{
+       struct mgb4_vin_dev *vindev = video_drvdata(file);
+       int rv;
+
+       mutex_lock(&vindev->lock);
+
+       rv = v4l2_fh_open(file);
+       if (rv)
+               goto out;
+
+       if (!v4l2_fh_is_singular_file(file))
+               goto out;
+
+       get_timings(vindev, &vindev->timings);
+       set_loopback_padding(vindev, vindev->padding);
+
+out:
+       mutex_unlock(&vindev->lock);
+       return rv;
+}
+
+static int fh_release(struct file *file)
+{
+       struct mgb4_vin_dev *vindev = video_drvdata(file);
+       int rv;
+
+       mutex_lock(&vindev->lock);
+
+       if (v4l2_fh_is_singular_file(file))
+               set_loopback_padding(vindev, 0);
+
+       rv = _vb2_fop_release(file, NULL);
+
+       mutex_unlock(&vindev->lock);
+
+       return rv;
+}
+
+static const struct v4l2_file_operations video_fops = {
+       .owner = THIS_MODULE,
+       .open = fh_open,
+       .release = fh_release,
+       .unlocked_ioctl = video_ioctl2,
+       .read = vb2_fop_read,
+       .mmap = vb2_fop_mmap,
+       .poll = vb2_fop_poll,
+};
+
+static int vidioc_querycap(struct file *file, void *priv,
+                          struct v4l2_capability *cap)
+{
+       strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
+       strscpy(cap->card, "MGB4 PCIe Card", sizeof(cap->card));
+
+       return 0;
+}
+
+static int vidioc_enum_fmt(struct file *file, void *priv,
+                          struct v4l2_fmtdesc *f)
+{
+       if (f->index != 0)
+               return -EINVAL;
+
+       f->pixelformat = V4L2_PIX_FMT_ABGR32;
+
+       return 0;
+}
+
+static int vidioc_enum_frameintervals(struct file *file, void *priv,
+                                     struct v4l2_frmivalenum *ival)
+{
+       struct mgb4_vin_dev *vindev = video_drvdata(file);
+
+       if (ival->index != 0)
+               return -EINVAL;
+       if (ival->pixel_format != V4L2_PIX_FMT_ABGR32)
+               return -EINVAL;
+       if (ival->width != vindev->timings.bt.width ||
+           ival->height != vindev->timings.bt.height)
+               return -EINVAL;
+
+       ival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
+       ival->stepwise.min.denominator = 60;
+       ival->stepwise.min.numerator = 1;
+       ival->stepwise.max.denominator = 1;
+       ival->stepwise.max.numerator = 1;
+       ival->stepwise.step = ival->stepwise.max;
+
+       return 0;
+}
+
+static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+       struct mgb4_vin_dev *vindev = video_drvdata(file);
+
+       f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
+       f->fmt.pix.width = vindev->timings.bt.width;
+       f->fmt.pix.height = vindev->timings.bt.height;
+       f->fmt.pix.field = V4L2_FIELD_NONE;
+       f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
+       f->fmt.pix.bytesperline = (f->fmt.pix.width + vindev->padding) * 4;
+       f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
+
+       return 0;
+}
+
+static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+       struct mgb4_vin_dev *vindev = video_drvdata(file);
+
+       f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
+       f->fmt.pix.width = vindev->timings.bt.width;
+       f->fmt.pix.height = vindev->timings.bt.height;
+       f->fmt.pix.field = V4L2_FIELD_NONE;
+       f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
+       f->fmt.pix.bytesperline = max(f->fmt.pix.width * 4,
+                                     ALIGN_DOWN(f->fmt.pix.bytesperline, 4));
+       f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
+
+       return 0;
+}
+
+static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+       struct mgb4_vin_dev *vindev = video_drvdata(file);
+       struct mgb4_regs *video = &vindev->mgbdev->video;
+
+       if (vb2_is_busy(&vindev->queue))
+               return -EBUSY;
+
+       vidioc_try_fmt(file, priv, f);
+
+       vindev->padding = (f->fmt.pix.bytesperline - (f->fmt.pix.width * 4)) / 4;
+       mgb4_write_reg(video, vindev->config->regs.padding, vindev->padding);
+       set_loopback_padding(vindev, vindev->padding);
+
+       return 0;
+}
+
+static int vidioc_enum_input(struct file *file, void *priv,
+                            struct v4l2_input *i)
+{
+       struct mgb4_vin_dev *vindev = video_drvdata(file);
+       struct mgb4_regs *video = &vindev->mgbdev->video;
+       u32 status;
+
+       if (i->index != 0)
+               return -EINVAL;
+
+       strscpy(i->name, "MGB4", sizeof(i->name));
+       i->type = V4L2_INPUT_TYPE_CAMERA;
+       i->capabilities = V4L2_IN_CAP_DV_TIMINGS;
+       i->status = 0;
+
+       status = mgb4_read_reg(video, vindev->config->regs.status);
+       if (!(status & (1U << 2)))
+               i->status |= V4L2_IN_ST_NO_SYNC;
+       if (!(status & (3 << 9)))
+               i->status |= V4L2_IN_ST_NO_SIGNAL;
+
+       return 0;
+}
+
+static int vidioc_enum_framesizes(struct file *file, void *fh,
+                                 struct v4l2_frmsizeenum *fsize)
+{
+       struct mgb4_vin_dev *vindev = video_drvdata(file);
+
+       if (fsize->index != 0 || fsize->pixel_format != V4L2_PIX_FMT_ABGR32)
+               return -EINVAL;
+
+       fsize->discrete.width = vindev->timings.bt.width;
+       fsize->discrete.height = vindev->timings.bt.height;
+       fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+
+       return 0;
+}
+
+static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
+{
+       return (i == 0) ? 0 : -EINVAL;
+}
+
+static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
+{
+       *i = 0;
+       return 0;
+}
+
+static int vidioc_parm(struct file *file, void *priv,
+                      struct v4l2_streamparm *parm)
+{
+       struct mgb4_vin_dev *vindev = video_drvdata(file);
+       struct mgb4_regs *video = &vindev->mgbdev->video;
+       const struct mgb4_vin_regs *regs = &vindev->config->regs;
+       struct v4l2_fract timeperframe = {
+               .numerator = mgb4_read_reg(video, regs->frame_period),
+               .denominator = 125000000,
+       };
+
+       if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+               return -EINVAL;
+
+       parm->parm.capture.readbuffers = 2;
+       parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+       parm->parm.capture.timeperframe = timeperframe;
+
+       return 0;
+}
+
+static int vidioc_s_dv_timings(struct file *file, void *fh,
+                              struct v4l2_dv_timings *timings)
+{
+       struct mgb4_vin_dev *vindev = video_drvdata(file);
+
+       if (timings->bt.width < video_timings_cap.bt.min_width ||
+           timings->bt.width > video_timings_cap.bt.max_width ||
+           timings->bt.height < video_timings_cap.bt.min_height ||
+           timings->bt.height > video_timings_cap.bt.max_height)
+               return -EINVAL;
+       if (timings->bt.width == vindev->timings.bt.width &&
+           timings->bt.height == vindev->timings.bt.height)
+               return 0;
+       if (vb2_is_busy(&vindev->queue))
+               return -EBUSY;
+
+       vindev->timings = *timings;
+
+       return 0;
+}
+
+static int vidioc_g_dv_timings(struct file *file, void *fh,
+                              struct v4l2_dv_timings *timings)
+{
+       struct mgb4_vin_dev *vindev = video_drvdata(file);
+       *timings = vindev->timings;
+
+       return 0;
+}
+
+static int vidioc_query_dv_timings(struct file *file, void *fh,
+                                  struct v4l2_dv_timings *timings)
+{
+       struct mgb4_vin_dev *vindev = video_drvdata(file);
+
+       return get_timings(vindev, timings);
+}
+
+static int vidioc_enum_dv_timings(struct file *file, void *fh,
+                                 struct v4l2_enum_dv_timings *timings)
+{
+       return v4l2_enum_dv_timings_cap(timings, &video_timings_cap, NULL, NULL);
+}
+
+static int vidioc_dv_timings_cap(struct file *file, void *fh,
+                                struct v4l2_dv_timings_cap *cap)
+{
+       *cap = video_timings_cap;
+
+       return 0;
+}
+
+static int vidioc_subscribe_event(struct v4l2_fh *fh,
+                                 const struct v4l2_event_subscription *sub)
+{
+       switch (sub->type) {
+       case V4L2_EVENT_SOURCE_CHANGE:
+               return v4l2_src_change_event_subscribe(fh, sub);
+       }
+
+       return v4l2_ctrl_subscribe_event(fh, sub);
+}
+
+static const struct v4l2_ioctl_ops video_ioctl_ops = {
+       .vidioc_querycap = vidioc_querycap,
+       .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt,
+       .vidioc_try_fmt_vid_cap = vidioc_try_fmt,
+       .vidioc_s_fmt_vid_cap = vidioc_s_fmt,
+       .vidioc_g_fmt_vid_cap = vidioc_g_fmt,
+       .vidioc_enum_framesizes = vidioc_enum_framesizes,
+       .vidioc_enum_frameintervals = vidioc_enum_frameintervals,
+       .vidioc_enum_input = vidioc_enum_input,
+       .vidioc_g_input = vidioc_g_input,
+       .vidioc_s_input = vidioc_s_input,
+       .vidioc_reqbufs = vb2_ioctl_reqbufs,
+       .vidioc_create_bufs = vb2_ioctl_create_bufs,
+       .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+       .vidioc_querybuf = vb2_ioctl_querybuf,
+       .vidioc_qbuf = vb2_ioctl_qbuf,
+       .vidioc_dqbuf = vb2_ioctl_dqbuf,
+       .vidioc_expbuf = vb2_ioctl_expbuf,
+       .vidioc_streamon = vb2_ioctl_streamon,
+       .vidioc_streamoff = vb2_ioctl_streamoff,
+       .vidioc_g_parm = vidioc_parm,
+       .vidioc_s_parm = vidioc_parm,
+       .vidioc_dv_timings_cap = vidioc_dv_timings_cap,
+       .vidioc_enum_dv_timings = vidioc_enum_dv_timings,
+       .vidioc_g_dv_timings = vidioc_g_dv_timings,
+       .vidioc_s_dv_timings = vidioc_s_dv_timings,
+       .vidioc_query_dv_timings = vidioc_query_dv_timings,
+       .vidioc_subscribe_event = vidioc_subscribe_event,
+       .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static void dma_transfer(struct work_struct *work)
+{
+       struct mgb4_vin_dev *vindev = container_of(work, struct mgb4_vin_dev,
+                                                  dma_work);
+       struct mgb4_regs *video = &vindev->mgbdev->video;
+       struct device *dev = &vindev->mgbdev->pdev->dev;
+       struct mgb4_frame_buffer *buf = NULL;
+       unsigned long flags;
+       u32 addr;
+       int rv;
+
+       spin_lock_irqsave(&vindev->qlock, flags);
+       if (!list_empty(&vindev->buf_list)) {
+               buf = list_first_entry(&vindev->buf_list,
+                                      struct mgb4_frame_buffer, list);
+               list_del_init(vindev->buf_list.next);
+       }
+       spin_unlock_irqrestore(&vindev->qlock, flags);
+
+       if (!buf)
+               return;
+
+       addr = mgb4_read_reg(video, vindev->config->regs.address);
+       if (addr >= MGB4_ERR_QUEUE_FULL) {
+               dev_dbg(dev, "frame queue error (%d)\n", (int)addr);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+               return;
+       }
+
+       rv = mgb4_dma_transfer(vindev->mgbdev, vindev->config->dma_channel,
+                              false, addr,
+                              vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0));
+       if (rv < 0) {
+               dev_warn(dev, "DMA transfer error\n");
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+       } else {
+               buf->vb.vb2_buf.timestamp = ktime_get_ns();
+               buf->vb.sequence = vindev->sequence++;
+               buf->vb.field = V4L2_FIELD_NONE;
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+       }
+}
+
+static void signal_change(struct work_struct *work)
+{
+       struct mgb4_vin_dev *vindev = container_of(work, struct mgb4_vin_dev,
+                                                  err_work);
+       struct mgb4_regs *video = &vindev->mgbdev->video;
+       struct v4l2_bt_timings *timings = &vindev->timings.bt;
+       struct device *dev = &vindev->mgbdev->pdev->dev;
+
+       u32 resolution = mgb4_read_reg(video, vindev->config->regs.resolution);
+       u32 width = resolution >> 16;
+       u32 height = resolution & 0xFFFF;
+
+       if (timings->width != width || timings->height != height) {
+               static const struct v4l2_event ev = {
+                       .type = V4L2_EVENT_SOURCE_CHANGE,
+                       .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+               };
+
+               v4l2_event_queue(&vindev->vdev, &ev);
+
+               if (vb2_is_streaming(&vindev->queue))
+                       vb2_queue_error(&vindev->queue);
+       }
+
+       dev_dbg(dev, "stream changed to %ux%u\n", width, height);
+}
+
+static irqreturn_t vin_handler(int irq, void *ctx)
+{
+       struct mgb4_vin_dev *vindev = (struct mgb4_vin_dev *)ctx;
+       struct mgb4_regs *video = &vindev->mgbdev->video;
+
+       schedule_work(&vindev->dma_work);
+
+       mgb4_write_reg(video, 0xB4, 1U << vindev->config->vin_irq);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t err_handler(int irq, void *ctx)
+{
+       struct mgb4_vin_dev *vindev = (struct mgb4_vin_dev *)ctx;
+       struct mgb4_regs *video = &vindev->mgbdev->video;
+
+       schedule_work(&vindev->err_work);
+
+       mgb4_write_reg(video, 0xB4, 1U << vindev->config->err_irq);
+
+       return IRQ_HANDLED;
+}
+
+static int deser_init(struct mgb4_vin_dev *vindev, int id)
+{
+       int rv, addr_size;
+       size_t values_count;
+       const struct mgb4_i2c_kv *values;
+       const struct i2c_board_info *info;
+       struct device *dev = &vindev->mgbdev->pdev->dev;
+
+       if (MGB4_IS_GMSL(vindev->mgbdev)) {
+               info = &gmsl_deser_info[id];
+               addr_size = 16;
+               values = gmsl_i2c;
+               values_count = ARRAY_SIZE(gmsl_i2c);
+       } else {
+               info = &fpdl3_deser_info[id];
+               addr_size = 8;
+               values = fpdl3_i2c;
+               values_count = ARRAY_SIZE(fpdl3_i2c);
+       }
+
+       rv = mgb4_i2c_init(&vindev->deser, vindev->mgbdev->i2c_adap, info,
+                          addr_size);
+       if (rv < 0) {
+               dev_err(dev, "failed to create deserializer\n");
+               return rv;
+       }
+       rv = mgb4_i2c_configure(&vindev->deser, values, values_count);
+       if (rv < 0) {
+               dev_err(dev, "failed to configure deserializer\n");
+               goto err_i2c_dev;
+       }
+
+       return 0;
+
+err_i2c_dev:
+       mgb4_i2c_free(&vindev->deser);
+
+       return rv;
+}
+
+static void fpga_init(struct mgb4_vin_dev *vindev)
+{
+       struct mgb4_regs *video = &vindev->mgbdev->video;
+       const struct mgb4_vin_regs *regs = &vindev->config->regs;
+
+       mgb4_write_reg(video, regs->config, 0x00000001);
+       mgb4_write_reg(video, regs->sync, 0x03E80002);
+       mgb4_write_reg(video, regs->padding, 0x00000000);
+       mgb4_write_reg(video, regs->config, 1U << 9);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void debugfs_init(struct mgb4_vin_dev *vindev)
+{
+       struct mgb4_regs *video = &vindev->mgbdev->video;
+
+       vindev->debugfs = debugfs_create_dir(vindev->vdev.name,
+                                            vindev->mgbdev->debugfs);
+       if (!vindev->debugfs)
+               return;
+
+       vindev->regs[0].name = "CONFIG";
+       vindev->regs[0].offset = vindev->config->regs.config;
+       vindev->regs[1].name = "STATUS";
+       vindev->regs[1].offset = vindev->config->regs.status;
+       vindev->regs[2].name = "RESOLUTION";
+       vindev->regs[2].offset = vindev->config->regs.resolution;
+       vindev->regs[3].name = "FRAME_PERIOD";
+       vindev->regs[3].offset = vindev->config->regs.frame_period;
+       vindev->regs[4].name = "HS_VS_GENER_SETTINGS";
+       vindev->regs[4].offset = vindev->config->regs.sync;
+       vindev->regs[5].name = "PCLK_FREQUENCY";
+       vindev->regs[5].offset = vindev->config->regs.pclk;
+       vindev->regs[6].name = "VIDEO_PARAMS_1";
+       vindev->regs[6].offset = vindev->config->regs.signal;
+       vindev->regs[7].name = "VIDEO_PARAMS_2";
+       vindev->regs[7].offset = vindev->config->regs.signal2;
+       vindev->regs[8].name = "PADDING_PIXELS";
+       vindev->regs[8].offset = vindev->config->regs.padding;
+
+       vindev->regset.base = video->membase;
+       vindev->regset.regs = vindev->regs;
+       vindev->regset.nregs = ARRAY_SIZE(vindev->regs);
+
+       debugfs_create_regset32("registers", 0444, vindev->debugfs,
+                               &vindev->regset);
+}
+#endif
+
+struct mgb4_vin_dev *mgb4_vin_create(struct mgb4_dev *mgbdev, int id)
+{
+       int rv;
+       const struct attribute_group **groups;
+       struct mgb4_vin_dev *vindev;
+       struct pci_dev *pdev = mgbdev->pdev;
+       struct device *dev = &pdev->dev;
+       int vin_irq, err_irq;
+
+       vindev = kzalloc(sizeof(*vindev), GFP_KERNEL);
+       if (!vindev)
+               return NULL;
+
+       vindev->mgbdev = mgbdev;
+       vindev->config = &vin_cfg[id];
+
+       /* Frame queue*/
+       INIT_LIST_HEAD(&vindev->buf_list);
+       spin_lock_init(&vindev->qlock);
+
+       /* Work queues */
+       INIT_WORK(&vindev->dma_work, dma_transfer);
+       INIT_WORK(&vindev->err_work, signal_change);
+
+       /* IRQ callback */
+       vin_irq = xdma_get_user_irq(mgbdev->xdev, vindev->config->vin_irq);
+       rv = request_irq(vin_irq, vin_handler, 0, "mgb4-vin", vindev);
+       if (rv) {
+               dev_err(dev, "failed to register vin irq handler\n");
+               goto err_alloc;
+       }
+       /* Error IRQ callback */
+       err_irq = xdma_get_user_irq(mgbdev->xdev, vindev->config->err_irq);
+       rv = request_irq(err_irq, err_handler, 0, "mgb4-err", vindev);
+       if (rv) {
+               dev_err(dev, "failed to register err irq handler\n");
+               goto err_vin_irq;
+       }
+
+       /* Set the FPGA registers default values */
+       fpga_init(vindev);
+
+       /* Set the deserializer default values */
+       rv = deser_init(vindev, id);
+       if (rv)
+               goto err_err_irq;
+
+       /* V4L2 stuff init */
+       rv = v4l2_device_register(dev, &vindev->v4l2dev);
+       if (rv) {
+               dev_err(dev, "failed to register v4l2 device\n");
+               goto err_err_irq;
+       }
+
+       mutex_init(&vindev->lock);
+
+       vindev->queue.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+       vindev->queue.io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+       vindev->queue.buf_struct_size = sizeof(struct mgb4_frame_buffer);
+       vindev->queue.ops = &queue_ops;
+       vindev->queue.mem_ops = &vb2_dma_sg_memops;
+       vindev->queue.gfp_flags = GFP_DMA32;
+       vindev->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+       vindev->queue.min_buffers_needed = 2;
+       vindev->queue.drv_priv = vindev;
+       vindev->queue.lock = &vindev->lock;
+       vindev->queue.dev = dev;
+       rv = vb2_queue_init(&vindev->queue);
+       if (rv) {
+               dev_err(dev, "failed to initialize vb2 queue\n");
+               goto err_v4l2_dev;
+       }
+
+       snprintf(vindev->vdev.name, sizeof(vindev->vdev.name), "mgb4-in%d",
+                id + 1);
+       vindev->vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE
+         | V4L2_CAP_STREAMING;
+       vindev->vdev.fops = &video_fops;
+       vindev->vdev.ioctl_ops = &video_ioctl_ops;
+       vindev->vdev.release = video_device_release_empty;
+       vindev->vdev.v4l2_dev = &vindev->v4l2dev;
+       vindev->vdev.lock = &vindev->lock;
+       vindev->vdev.queue = &vindev->queue;
+       video_set_drvdata(&vindev->vdev, vindev);
+
+       /* Enable the video signal change watcher */
+       xdma_enable_user_irq(vindev->mgbdev->xdev, err_irq);
+
+       /* Register the video device */
+       rv = video_register_device(&vindev->vdev, VFL_TYPE_VIDEO, -1);
+       if (rv) {
+               dev_err(dev, "failed to register video device\n");
+               goto err_v4l2_dev;
+       }
+
+       /* Module sysfs attributes */
+       groups = MGB4_IS_GMSL(mgbdev)
+         ? mgb4_gmsl_in_groups : mgb4_fpdl3_in_groups;
+       rv = device_add_groups(&vindev->vdev.dev, groups);
+       if (rv) {
+               dev_err(dev, "failed to create sysfs attributes\n");
+               goto err_video_dev;
+       }
+
+#ifdef CONFIG_DEBUG_FS
+       debugfs_init(vindev);
+#endif
+
+       return vindev;
+
+err_video_dev:
+       video_unregister_device(&vindev->vdev);
+err_v4l2_dev:
+       v4l2_device_unregister(&vindev->v4l2dev);
+err_err_irq:
+       free_irq(err_irq, vindev);
+err_vin_irq:
+       free_irq(vin_irq, vindev);
+err_alloc:
+       kfree(vindev);
+
+       return NULL;
+}
+
+void mgb4_vin_free(struct mgb4_vin_dev *vindev)
+{
+       const struct attribute_group **groups;
+       int vin_irq = xdma_get_user_irq(vindev->mgbdev->xdev,
+                                       vindev->config->vin_irq);
+       int err_irq = xdma_get_user_irq(vindev->mgbdev->xdev,
+                                       vindev->config->err_irq);
+
+       xdma_disable_user_irq(vindev->mgbdev->xdev, err_irq);
+
+       free_irq(vin_irq, vindev);
+       free_irq(err_irq, vindev);
+
+#ifdef CONFIG_DEBUG_FS
+       debugfs_remove_recursive(vindev->debugfs);
+#endif
+
+       groups = MGB4_IS_GMSL(vindev->mgbdev)
+         ? mgb4_gmsl_in_groups : mgb4_fpdl3_in_groups;
+       device_remove_groups(&vindev->vdev.dev, groups);
+
+       mgb4_i2c_free(&vindev->deser);
+       video_unregister_device(&vindev->vdev);
+       v4l2_device_unregister(&vindev->v4l2dev);
+
+       kfree(vindev);
+}
diff --git a/drivers/media/pci/mgb4/mgb4_vin.h b/drivers/media/pci/mgb4/mgb4_vin.h
new file mode 100644 (file)
index 0000000..0249b40
--- /dev/null
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ *     author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ */
+
+#ifndef __MGB4_VIN_H__
+#define __MGB4_VIN_H__
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ctrls.h>
+#include <media/videobuf2-core.h>
+#include <linux/debugfs.h>
+#include "mgb4_i2c.h"
+
+struct mgb4_vin_regs {
+       u32 address;
+       u32 config;
+       u32 status;
+       u32 resolution;
+       u32 frame_period;
+       u32 sync;
+       u32 pclk;
+       u32 signal;
+       u32 signal2;
+       u32 padding;
+};
+
+struct mgb4_vin_config {
+       int id;
+       int dma_channel;
+       int vin_irq;
+       int err_irq;
+       struct mgb4_vin_regs regs;
+};
+
+struct mgb4_vin_dev {
+       struct mgb4_dev *mgbdev;
+       struct v4l2_device v4l2dev;
+       struct video_device vdev;
+       struct vb2_queue queue;
+       struct mutex lock; /* vdev lock */
+
+       spinlock_t qlock; /* video buffer queue lock */
+       struct list_head buf_list;
+       struct work_struct dma_work, err_work;
+
+       unsigned int sequence;
+
+       struct v4l2_dv_timings timings;
+       u32 freq_range;
+       u32 padding;
+
+       struct mgb4_i2c_client deser;
+
+       const struct mgb4_vin_config *config;
+
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *debugfs;
+       struct debugfs_regset32 regset;
+       struct debugfs_reg32 regs[9];
+#endif
+};
+
+struct mgb4_vin_dev *mgb4_vin_create(struct mgb4_dev *mgbdev, int id);
+void mgb4_vin_free(struct mgb4_vin_dev *vindev);
+
+#endif
diff --git a/drivers/media/pci/mgb4/mgb4_vout.c b/drivers/media/pci/mgb4/mgb4_vout.c
new file mode 100644 (file)
index 0000000..857fc7b
--- /dev/null
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ *     author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ *
+ * This is the v4l2 output device module. It initializes the signal serializers
+ * and creates the v4l2 video devices.
+ *
+ * When the device is in loopback mode (a direct, in HW, in->out frame passing
+ * mode) we disable the v4l2 output by returning EBUSY in the open() syscall.
+ */
+
+#include <linux/pci.h>
+#include <linux/align.h>
+#include <linux/dma/amd_xdma.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-sg.h>
+#include "mgb4_core.h"
+#include "mgb4_dma.h"
+#include "mgb4_sysfs.h"
+#include "mgb4_io.h"
+#include "mgb4_cmt.h"
+#include "mgb4_vout.h"
+
+ATTRIBUTE_GROUPS(mgb4_fpdl3_out);
+ATTRIBUTE_GROUPS(mgb4_gmsl_out);
+
+static const struct mgb4_vout_config vout_cfg[] = {
+       {0, 0, 8, {0x78, 0x60, 0x64, 0x68, 0x74, 0x6C, 0x70, 0x7c}},
+       {1, 1, 9, {0x98, 0x80, 0x84, 0x88, 0x94, 0x8c, 0x90, 0x9c}}
+};
+
+static const struct i2c_board_info fpdl3_ser_info[] = {
+       {I2C_BOARD_INFO("serializer1", 0x14)},
+       {I2C_BOARD_INFO("serializer2", 0x16)},
+};
+
+static const struct mgb4_i2c_kv fpdl3_i2c[] = {
+       {0x05, 0xFF, 0x04}, {0x06, 0xFF, 0x01}, {0xC2, 0xFF, 0x80}
+};
+
+static void return_all_buffers(struct mgb4_vout_dev *voutdev,
+                              enum vb2_buffer_state state)
+{
+       struct mgb4_frame_buffer *buf, *node;
+       unsigned long flags;
+
+       spin_lock_irqsave(&voutdev->qlock, flags);
+       list_for_each_entry_safe(buf, node, &voutdev->buf_list, list) {
+               vb2_buffer_done(&buf->vb.vb2_buf, state);
+               list_del(&buf->list);
+       }
+       spin_unlock_irqrestore(&voutdev->qlock, flags);
+}
+
+static int queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
+                      unsigned int *nplanes, unsigned int sizes[],
+                      struct device *alloc_devs[])
+{
+       struct mgb4_vout_dev *voutdev = vb2_get_drv_priv(q);
+       unsigned int size;
+
+       /*
+        * If I/O reconfiguration is in process, do not allow to start
+        * the queue. See video_source_store() in mgb4_sysfs_out.c for
+        * details.
+        */
+       if (test_bit(0, &voutdev->mgbdev->io_reconfig))
+               return -EBUSY;
+
+       size = (voutdev->width + voutdev->padding) * voutdev->height * 4;
+
+       if (*nplanes)
+               return sizes[0] < size ? -EINVAL : 0;
+       *nplanes = 1;
+       sizes[0] = size;
+
+       return 0;
+}
+
+static int buffer_init(struct vb2_buffer *vb)
+{
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct mgb4_frame_buffer *buf = to_frame_buffer(vbuf);
+
+       INIT_LIST_HEAD(&buf->list);
+
+       return 0;
+}
+
+static int buffer_prepare(struct vb2_buffer *vb)
+{
+       struct mgb4_vout_dev *voutdev = vb2_get_drv_priv(vb->vb2_queue);
+       struct device *dev = &voutdev->mgbdev->pdev->dev;
+       unsigned int size;
+
+       size = (voutdev->width + voutdev->padding) * voutdev->height * 4;
+
+       if (vb2_plane_size(vb, 0) < size) {
+               dev_err(dev, "buffer too small (%lu < %u)\n",
+                       vb2_plane_size(vb, 0), size);
+               return -EINVAL;
+       }
+
+       vb2_set_plane_payload(vb, 0, size);
+
+       return 0;
+}
+
+static void buffer_queue(struct vb2_buffer *vb)
+{
+       struct mgb4_vout_dev *vindev = vb2_get_drv_priv(vb->vb2_queue);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct mgb4_frame_buffer *buf = to_frame_buffer(vbuf);
+       unsigned long flags;
+
+       spin_lock_irqsave(&vindev->qlock, flags);
+       list_add_tail(&buf->list, &vindev->buf_list);
+       spin_unlock_irqrestore(&vindev->qlock, flags);
+}
+
+static void stop_streaming(struct vb2_queue *vq)
+{
+       struct mgb4_vout_dev *voutdev = vb2_get_drv_priv(vq);
+       struct mgb4_dev *mgbdev = voutdev->mgbdev;
+       int irq = xdma_get_user_irq(mgbdev->xdev, voutdev->config->irq);
+
+       xdma_disable_user_irq(mgbdev->xdev, irq);
+       cancel_work_sync(&voutdev->dma_work);
+       mgb4_mask_reg(&mgbdev->video, voutdev->config->regs.config, 0x2, 0x0);
+       return_all_buffers(voutdev, VB2_BUF_STATE_ERROR);
+}
+
+static int start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+       struct mgb4_vout_dev *voutdev = vb2_get_drv_priv(vq);
+       struct mgb4_dev *mgbdev = voutdev->mgbdev;
+       struct device *dev = &mgbdev->pdev->dev;
+       struct mgb4_frame_buffer *buf;
+       struct mgb4_regs *video = &mgbdev->video;
+       const struct mgb4_vout_config *config = voutdev->config;
+       int irq = xdma_get_user_irq(mgbdev->xdev, config->irq);
+       int rv;
+       u32 addr;
+
+       mgb4_mask_reg(video, config->regs.config, 0x2, 0x2);
+
+       addr = mgb4_read_reg(video, config->regs.address);
+       if (addr >= MGB4_ERR_QUEUE_FULL) {
+               dev_dbg(dev, "frame queue error (%d)\n", (int)addr);
+               return_all_buffers(voutdev, VB2_BUF_STATE_QUEUED);
+               return -EBUSY;
+       }
+
+       buf = list_first_entry(&voutdev->buf_list, struct mgb4_frame_buffer,
+                              list);
+       list_del_init(voutdev->buf_list.next);
+
+       rv = mgb4_dma_transfer(mgbdev, config->dma_channel, true, addr,
+                              vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0));
+       if (rv < 0) {
+               dev_warn(dev, "DMA transfer error\n");
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+       } else {
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+       }
+
+       xdma_enable_user_irq(mgbdev->xdev, irq);
+
+       return 0;
+}
+
+static const struct vb2_ops queue_ops = {
+       .queue_setup = queue_setup,
+       .buf_init = buffer_init,
+       .buf_prepare = buffer_prepare,
+       .buf_queue = buffer_queue,
+       .start_streaming = start_streaming,
+       .stop_streaming = stop_streaming,
+       .wait_prepare = vb2_ops_wait_prepare,
+       .wait_finish = vb2_ops_wait_finish
+};
+
+static int vidioc_querycap(struct file *file, void *priv,
+                          struct v4l2_capability *cap)
+{
+       strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
+       strscpy(cap->card, "MGB4 PCIe Card", sizeof(cap->card));
+
+       return 0;
+}
+
+static int vidioc_enum_fmt(struct file *file, void *priv,
+                          struct v4l2_fmtdesc *f)
+{
+       if (f->index != 0)
+               return -EINVAL;
+
+       f->pixelformat = V4L2_PIX_FMT_ABGR32;
+
+       return 0;
+}
+
+static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+       struct mgb4_vout_dev *voutdev = video_drvdata(file);
+
+       f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
+       f->fmt.pix.width = voutdev->width;
+       f->fmt.pix.height = voutdev->height;
+       f->fmt.pix.field = V4L2_FIELD_NONE;
+       f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
+       f->fmt.pix.bytesperline = (f->fmt.pix.width + voutdev->padding) * 4;
+       f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
+
+       return 0;
+}
+
+static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+       struct mgb4_vout_dev *voutdev = video_drvdata(file);
+
+       f->fmt.pix.pixelformat = V4L2_PIX_FMT_ABGR32;
+       f->fmt.pix.width = voutdev->width;
+       f->fmt.pix.height = voutdev->height;
+       f->fmt.pix.field = V4L2_FIELD_NONE;
+       f->fmt.pix.colorspace = V4L2_COLORSPACE_RAW;
+       f->fmt.pix.bytesperline = max(f->fmt.pix.width * 4,
+                                     ALIGN_DOWN(f->fmt.pix.bytesperline, 4));
+       f->fmt.pix.sizeimage = f->fmt.pix.bytesperline * f->fmt.pix.height;
+
+       return 0;
+}
+
+static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
+{
+       struct mgb4_vout_dev *voutdev = video_drvdata(file);
+       struct mgb4_regs *video = &voutdev->mgbdev->video;
+
+       if (vb2_is_busy(&voutdev->queue))
+               return -EBUSY;
+
+       vidioc_try_fmt(file, priv, f);
+
+       voutdev->padding = (f->fmt.pix.bytesperline - (f->fmt.pix.width * 4)) / 4;
+       mgb4_write_reg(video, voutdev->config->regs.padding, voutdev->padding);
+
+       return 0;
+}
+
+static int vidioc_g_output(struct file *file, void *priv, unsigned int *i)
+{
+       *i = 0;
+       return 0;
+}
+
+static int vidioc_s_output(struct file *file, void *priv, unsigned int i)
+{
+       return i ? -EINVAL : 0;
+}
+
+static int vidioc_enum_output(struct file *file, void *priv,
+                             struct v4l2_output *out)
+{
+       if (out->index != 0)
+               return -EINVAL;
+
+       out->type = V4L2_OUTPUT_TYPE_ANALOG;
+       strscpy(out->name, "MGB4", sizeof(out->name));
+
+       return 0;
+}
+
+static const struct v4l2_ioctl_ops video_ioctl_ops = {
+       .vidioc_querycap = vidioc_querycap,
+       .vidioc_enum_fmt_vid_out = vidioc_enum_fmt,
+       .vidioc_try_fmt_vid_out = vidioc_try_fmt,
+       .vidioc_s_fmt_vid_out = vidioc_s_fmt,
+       .vidioc_g_fmt_vid_out = vidioc_g_fmt,
+       .vidioc_enum_output = vidioc_enum_output,
+       .vidioc_g_output = vidioc_g_output,
+       .vidioc_s_output = vidioc_s_output,
+       .vidioc_reqbufs = vb2_ioctl_reqbufs,
+       .vidioc_create_bufs = vb2_ioctl_create_bufs,
+       .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+       .vidioc_querybuf = vb2_ioctl_querybuf,
+       .vidioc_qbuf = vb2_ioctl_qbuf,
+       .vidioc_dqbuf = vb2_ioctl_dqbuf,
+       .vidioc_expbuf = vb2_ioctl_expbuf,
+       .vidioc_streamon = vb2_ioctl_streamon,
+       .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+static int fh_open(struct file *file)
+{
+       struct mgb4_vout_dev *voutdev = video_drvdata(file);
+       struct mgb4_regs *video = &voutdev->mgbdev->video;
+       struct device *dev = &voutdev->mgbdev->pdev->dev;
+       u32 config, resolution;
+       int rv;
+
+       /* Return EBUSY when the device is in loopback mode */
+       config = mgb4_read_reg(video, voutdev->config->regs.config);
+       if ((config & 0xc) >> 2 != voutdev->config->id + MGB4_VIN_DEVICES) {
+               dev_dbg(dev, "can not open - device in loopback mode");
+               return -EBUSY;
+       }
+
+       mutex_lock(&voutdev->lock);
+
+       rv = v4l2_fh_open(file);
+       if (rv)
+               goto out;
+
+       if (!v4l2_fh_is_singular_file(file))
+               goto out;
+
+       resolution = mgb4_read_reg(video, voutdev->config->regs.resolution);
+       voutdev->width = resolution >> 16;
+       voutdev->height = resolution & 0xFFFF;
+
+out:
+       mutex_unlock(&voutdev->lock);
+       return rv;
+}
+
+static const struct v4l2_file_operations video_fops = {
+       .owner = THIS_MODULE,
+       .open = fh_open,
+       .release = vb2_fop_release,
+       .unlocked_ioctl = video_ioctl2,
+       .write = vb2_fop_write,
+       .mmap = vb2_fop_mmap,
+       .poll = vb2_fop_poll,
+};
+
+static void dma_transfer(struct work_struct *work)
+{
+       struct mgb4_vout_dev *voutdev = container_of(work, struct mgb4_vout_dev,
+                                                    dma_work);
+       struct device *dev = &voutdev->mgbdev->pdev->dev;
+       struct mgb4_regs *video = &voutdev->mgbdev->video;
+       struct mgb4_frame_buffer *buf = NULL;
+       unsigned long flags;
+       u32 addr;
+       int rv;
+
+       spin_lock_irqsave(&voutdev->qlock, flags);
+       if (!list_empty(&voutdev->buf_list)) {
+               buf = list_first_entry(&voutdev->buf_list,
+                                      struct mgb4_frame_buffer, list);
+               list_del_init(voutdev->buf_list.next);
+       }
+       spin_unlock_irqrestore(&voutdev->qlock, flags);
+
+       if (!buf)
+               return;
+
+       addr = mgb4_read_reg(video, voutdev->config->regs.address);
+       if (addr >= MGB4_ERR_QUEUE_FULL) {
+               dev_dbg(dev, "frame queue error (%d)\n", (int)addr);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+               return;
+       }
+
+       rv = mgb4_dma_transfer(voutdev->mgbdev, voutdev->config->dma_channel,
+                              true, addr,
+                              vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0));
+       if (rv < 0) {
+               dev_warn(dev, "DMA transfer error\n");
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+       } else {
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+       }
+}
+
+static irqreturn_t handler(int irq, void *ctx)
+{
+       struct mgb4_vout_dev *voutdev = (struct mgb4_vout_dev *)ctx;
+       struct mgb4_regs *video = &voutdev->mgbdev->video;
+
+       schedule_work(&voutdev->dma_work);
+
+       mgb4_write_reg(video, 0xB4, 1U << voutdev->config->irq);
+
+       return IRQ_HANDLED;
+}
+
+static int ser_init(struct mgb4_vout_dev *voutdev, int id)
+{
+       int rv;
+       const struct i2c_board_info *info = &fpdl3_ser_info[id];
+       struct mgb4_i2c_client *ser = &voutdev->ser;
+       struct device *dev = &voutdev->mgbdev->pdev->dev;
+
+       if (MGB4_IS_GMSL(voutdev->mgbdev))
+               return 0;
+
+       rv = mgb4_i2c_init(ser, voutdev->mgbdev->i2c_adap, info, 8);
+       if (rv < 0) {
+               dev_err(dev, "failed to create serializer\n");
+               return rv;
+       }
+       rv = mgb4_i2c_configure(ser, fpdl3_i2c, ARRAY_SIZE(fpdl3_i2c));
+       if (rv < 0) {
+               dev_err(dev, "failed to configure serializer\n");
+               goto err_i2c_dev;
+       }
+
+       return 0;
+
+err_i2c_dev:
+       mgb4_i2c_free(ser);
+
+       return rv;
+}
+
+static void fpga_init(struct mgb4_vout_dev *voutdev)
+{
+       struct mgb4_regs *video = &voutdev->mgbdev->video;
+       const struct mgb4_vout_regs *regs = &voutdev->config->regs;
+
+       mgb4_write_reg(video, regs->config, 0x00000011);
+       mgb4_write_reg(video, regs->resolution,
+                      (MGB4_DEFAULT_WIDTH << 16) | MGB4_DEFAULT_HEIGHT);
+       mgb4_write_reg(video, regs->hsync, 0x00102020);
+       mgb4_write_reg(video, regs->vsync, 0x40020202);
+       mgb4_write_reg(video, regs->frame_period, MGB4_DEFAULT_PERIOD);
+       mgb4_write_reg(video, regs->padding, 0x00000000);
+
+       voutdev->freq = mgb4_cmt_set_vout_freq(voutdev, 70000 >> 1) << 1;
+
+       mgb4_write_reg(video, regs->config,
+                      (voutdev->config->id + MGB4_VIN_DEVICES) << 2 | 1 << 4);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void debugfs_init(struct mgb4_vout_dev *voutdev)
+{
+       struct mgb4_regs *video = &voutdev->mgbdev->video;
+
+       voutdev->debugfs = debugfs_create_dir(voutdev->vdev.name,
+                                             voutdev->mgbdev->debugfs);
+       if (!voutdev->debugfs)
+               return;
+
+       voutdev->regs[0].name = "CONFIG";
+       voutdev->regs[0].offset = voutdev->config->regs.config;
+       voutdev->regs[1].name = "STATUS";
+       voutdev->regs[1].offset = voutdev->config->regs.status;
+       voutdev->regs[2].name = "RESOLUTION";
+       voutdev->regs[2].offset = voutdev->config->regs.resolution;
+       voutdev->regs[3].name = "VIDEO_PARAMS_1";
+       voutdev->regs[3].offset = voutdev->config->regs.hsync;
+       voutdev->regs[4].name = "VIDEO_PARAMS_2";
+       voutdev->regs[4].offset = voutdev->config->regs.vsync;
+       voutdev->regs[5].name = "FRAME_PERIOD";
+       voutdev->regs[5].offset = voutdev->config->regs.frame_period;
+       voutdev->regs[6].name = "PADDING";
+       voutdev->regs[6].offset = voutdev->config->regs.padding;
+
+       voutdev->regset.base = video->membase;
+       voutdev->regset.regs = voutdev->regs;
+       voutdev->regset.nregs = ARRAY_SIZE(voutdev->regs);
+
+       debugfs_create_regset32("registers", 0444, voutdev->debugfs,
+                               &voutdev->regset);
+}
+#endif
+
+struct mgb4_vout_dev *mgb4_vout_create(struct mgb4_dev *mgbdev, int id)
+{
+       int rv, irq;
+       const struct attribute_group **groups;
+       struct mgb4_vout_dev *voutdev;
+       struct pci_dev *pdev = mgbdev->pdev;
+       struct device *dev = &pdev->dev;
+
+       voutdev = kzalloc(sizeof(*voutdev), GFP_KERNEL);
+       if (!voutdev)
+               return NULL;
+
+       voutdev->mgbdev = mgbdev;
+       voutdev->config = &vout_cfg[id];
+
+       /* Frame queue */
+       INIT_LIST_HEAD(&voutdev->buf_list);
+       spin_lock_init(&voutdev->qlock);
+
+       /* DMA transfer stuff */
+       INIT_WORK(&voutdev->dma_work, dma_transfer);
+
+       /* IRQ callback */
+       irq = xdma_get_user_irq(mgbdev->xdev, voutdev->config->irq);
+       rv = request_irq(irq, handler, 0, "mgb4-vout", voutdev);
+       if (rv) {
+               dev_err(dev, "failed to register irq handler\n");
+               goto err_alloc;
+       }
+
+       /* Set the FPGA registers default values */
+       fpga_init(voutdev);
+
+       /* Set the serializer default values */
+       rv = ser_init(voutdev, id);
+       if (rv)
+               goto err_irq;
+
+       /* V4L2 stuff init  */
+       rv = v4l2_device_register(dev, &voutdev->v4l2dev);
+       if (rv) {
+               dev_err(dev, "failed to register v4l2 device\n");
+               goto err_irq;
+       }
+
+       mutex_init(&voutdev->lock);
+
+       voutdev->queue.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
+       voutdev->queue.io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE;
+       voutdev->queue.buf_struct_size = sizeof(struct mgb4_frame_buffer);
+       voutdev->queue.ops = &queue_ops;
+       voutdev->queue.mem_ops = &vb2_dma_sg_memops;
+       voutdev->queue.gfp_flags = GFP_DMA32;
+       voutdev->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+       voutdev->queue.min_buffers_needed = 2;
+       voutdev->queue.drv_priv = voutdev;
+       voutdev->queue.lock = &voutdev->lock;
+       voutdev->queue.dev = dev;
+       rv = vb2_queue_init(&voutdev->queue);
+       if (rv) {
+               dev_err(dev, "failed to initialize vb2 queue\n");
+               goto err_v4l2_dev;
+       }
+
+       snprintf(voutdev->vdev.name, sizeof(voutdev->vdev.name), "mgb4-out%d",
+                id + 1);
+       voutdev->vdev.device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_READWRITE
+         | V4L2_CAP_STREAMING;
+       voutdev->vdev.vfl_dir = VFL_DIR_TX;
+       voutdev->vdev.fops = &video_fops;
+       voutdev->vdev.ioctl_ops = &video_ioctl_ops;
+       voutdev->vdev.release = video_device_release_empty;
+       voutdev->vdev.v4l2_dev = &voutdev->v4l2dev;
+       voutdev->vdev.lock = &voutdev->lock;
+       voutdev->vdev.queue = &voutdev->queue;
+       video_set_drvdata(&voutdev->vdev, voutdev);
+
+       rv = video_register_device(&voutdev->vdev, VFL_TYPE_VIDEO, -1);
+       if (rv) {
+               dev_err(dev, "failed to register video device\n");
+               goto err_v4l2_dev;
+       }
+
+       /* Module sysfs attributes */
+       groups = MGB4_IS_GMSL(mgbdev)
+         ? mgb4_gmsl_out_groups : mgb4_fpdl3_out_groups;
+       rv = device_add_groups(&voutdev->vdev.dev, groups);
+       if (rv) {
+               dev_err(dev, "failed to create sysfs attributes\n");
+               goto err_video_dev;
+       }
+
+#ifdef CONFIG_DEBUG_FS
+       debugfs_init(voutdev);
+#endif
+
+       return voutdev;
+
+err_video_dev:
+       video_unregister_device(&voutdev->vdev);
+err_v4l2_dev:
+       v4l2_device_unregister(&voutdev->v4l2dev);
+err_irq:
+       free_irq(irq, voutdev);
+err_alloc:
+       kfree(voutdev);
+
+       return NULL;
+}
+
+void mgb4_vout_free(struct mgb4_vout_dev *voutdev)
+{
+       const struct attribute_group **groups;
+       int irq = xdma_get_user_irq(voutdev->mgbdev->xdev, voutdev->config->irq);
+
+       free_irq(irq, voutdev);
+
+#ifdef CONFIG_DEBUG_FS
+       debugfs_remove_recursive(voutdev->debugfs);
+#endif
+
+       groups = MGB4_IS_GMSL(voutdev->mgbdev)
+         ? mgb4_gmsl_out_groups : mgb4_fpdl3_out_groups;
+       device_remove_groups(&voutdev->vdev.dev, groups);
+
+       mgb4_i2c_free(&voutdev->ser);
+       video_unregister_device(&voutdev->vdev);
+       v4l2_device_unregister(&voutdev->v4l2dev);
+
+       kfree(voutdev);
+}
diff --git a/drivers/media/pci/mgb4/mgb4_vout.h b/drivers/media/pci/mgb4/mgb4_vout.h
new file mode 100644 (file)
index 0000000..b163dee
--- /dev/null
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021-2023 Digiteq Automotive
+ *     author: Martin Tuma <martin.tuma@digiteqautomotive.com>
+ */
+
+#ifndef __MGB4_VOUT_H__
+#define __MGB4_VOUT_H__
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-ctrls.h>
+#include <media/videobuf2-core.h>
+#include <linux/debugfs.h>
+#include "mgb4_i2c.h"
+
+struct mgb4_vout_regs {
+       u32 address;
+       u32 config;
+       u32 status;
+       u32 resolution;
+       u32 frame_period;
+       u32 hsync;
+       u32 vsync;
+       u32 padding;
+};
+
+struct mgb4_vout_config {
+       int id;
+       int dma_channel;
+       int irq;
+       struct mgb4_vout_regs regs;
+};
+
+struct mgb4_vout_dev {
+       struct mgb4_dev *mgbdev;
+       struct v4l2_device v4l2dev;
+       struct video_device vdev;
+       struct vb2_queue queue;
+       struct mutex lock; /* vdev lock */
+
+       spinlock_t qlock; /* buffer queue lock */
+       struct list_head buf_list;
+       struct work_struct dma_work;
+
+       u32 width;
+       u32 height;
+       u32 freq;
+       u32 padding;
+
+       struct mgb4_i2c_client ser;
+
+       const struct mgb4_vout_config *config;
+
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *debugfs;
+       struct debugfs_regset32 regset;
+       struct debugfs_reg32 regs[7];
+#endif
+};
+
+struct mgb4_vout_dev *mgb4_vout_create(struct mgb4_dev *mgbdev, int id);
+void mgb4_vout_free(struct mgb4_vout_dev *voutdev);
+
+#endif
index 56340553b2823977e052963f0ce27d841aeae32b..1cd990468d3de9db8b14b72483972041c57bfee2 100644 (file)
@@ -219,7 +219,7 @@ struct zoran {
        const struct tvnorm *timing;
 
        unsigned short id;      /* number of this device */
-       char name[32];          /* name of this device */
+       char name[40];          /* name of this device */
        struct pci_dev *pci_dev;        /* PCI device */
        unsigned char revision; /* revision of zr36057 */
        unsigned char __iomem *zr36057_mem;/* pointer to mapped IO memory */
index ee579916f8744a5df7c8f805c6605dd3a67b4497..91e54215de3a9b8170e26b7b38f8abc842fcc844 100644 (file)
@@ -73,6 +73,7 @@ source "drivers/media/platform/intel/Kconfig"
 source "drivers/media/platform/marvell/Kconfig"
 source "drivers/media/platform/mediatek/Kconfig"
 source "drivers/media/platform/microchip/Kconfig"
+source "drivers/media/platform/nuvoton/Kconfig"
 source "drivers/media/platform/nvidia/Kconfig"
 source "drivers/media/platform/nxp/Kconfig"
 source "drivers/media/platform/qcom/Kconfig"
index 5453bb868e6794d634fa3c32ee1b6a3de35c4880..3296ec1ebe16d7d38f0b1c8b54e4fe275d2d788e 100644 (file)
@@ -16,6 +16,7 @@ obj-y += intel/
 obj-y += marvell/
 obj-y += mediatek/
 obj-y += microchip/
+obj-y += nuvoton/
 obj-y += nvidia/
 obj-y += nxp/
 obj-y += qcom/
index 16effad107469878d6115cd5771df0f75160e261..aadc947a77ae053ce9129f1b4f63e49f9c1daa03 100644 (file)
@@ -16,7 +16,7 @@
 
 const char *msg_type_name(enum mcu_msg_type type)
 {
-       static char buf[9];
+       static char buf[13];
 
        switch (type) {
        case MCU_MSG_TYPE_INIT:
index a5686058d7547e806b3623cfb3e6d621ad34502f..c0c9013f1aab34b11319f7870d2be08899fd48a9 100644 (file)
@@ -184,7 +184,7 @@ struct mcu_msg_push_buffers_internal {
        struct mcu_msg_header header;
        u32 channel_id;
        size_t num_buffers;
-       struct mcu_msg_push_buffers_internal_buffer buffer[];
+       struct mcu_msg_push_buffers_internal_buffer buffer[] __counted_by(num_buffers);
 };
 
 struct mcu_msg_put_stream_buffer {
index 667637eedb5d45d607e4b13b51a4e5027c6987f6..7320852668d647fbc16e71ab7ac2ad827ad08356 100644 (file)
@@ -71,6 +71,7 @@ enum {
        VPU_MSG_ID_TIMESTAMP_INFO,
        VPU_MSG_ID_FIRMWARE_XCPT,
        VPU_MSG_ID_PIC_SKIPPED,
+       VPU_MSG_ID_DBG_MSG,
 };
 
 enum VPU_ENC_MEMORY_RESOURSE {
index af3b336e5dc32d0065a578087e9978e3fdd82944..d12310af9ebce14322f15f891f40aff924eaa409 100644 (file)
@@ -489,6 +489,7 @@ const char *vpu_id_name(u32 id)
        case VPU_MSG_ID_UNSUPPORTED: return "unsupported";
        case VPU_MSG_ID_FIRMWARE_XCPT: return "exception";
        case VPU_MSG_ID_PIC_SKIPPED: return "skipped";
+       case VPU_MSG_ID_DBG_MSG: return "debug msg";
        }
        return "<unknown>";
 }
index f771661980c0126d10f15311dafddfaf5683eec9..d3425de7bccd3161063ad4774f02c42afd133f71 100644 (file)
@@ -745,6 +745,7 @@ static struct vpu_pair malone_msgs[] = {
        {VPU_MSG_ID_UNSUPPORTED, VID_API_EVENT_UNSUPPORTED_STREAM},
        {VPU_MSG_ID_FIRMWARE_XCPT, VID_API_EVENT_FIRMWARE_XCPT},
        {VPU_MSG_ID_PIC_SKIPPED, VID_API_EVENT_PIC_SKIPPED},
+       {VPU_MSG_ID_DBG_MSG, VID_API_EVENT_DBG_MSG_DEC},
 };
 
 static void vpu_malone_pack_fs_alloc(struct vpu_rpc_event *pkt,
index d0ead051f7d18de03a78ff3663bd361e5d6ffc40..b74a407a19f225cec05cc2b83d947bdcf433ff65 100644 (file)
@@ -23,6 +23,7 @@
 struct vpu_msg_handler {
        u32 id;
        void (*done)(struct vpu_inst *inst, struct vpu_rpc_event *pkt);
+       u32 is_str;
 };
 
 static void vpu_session_handle_start_done(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
@@ -154,7 +155,7 @@ static void vpu_session_handle_error(struct vpu_inst *inst, struct vpu_rpc_event
 {
        char *str = (char *)pkt->data;
 
-       if (strlen(str))
+       if (*str)
                dev_err(inst->dev, "instance %d firmware error : %s\n", inst->id, str);
        else
                dev_err(inst->dev, "instance %d is unsupported stream\n", inst->id);
@@ -180,6 +181,21 @@ static void vpu_session_handle_pic_skipped(struct vpu_inst *inst, struct vpu_rpc
        vpu_inst_unlock(inst);
 }
 
+static void vpu_session_handle_dbg_msg(struct vpu_inst *inst, struct vpu_rpc_event *pkt)
+{
+       char *str = (char *)pkt->data;
+
+       if (*str)
+               dev_info(inst->dev, "instance %d firmware dbg msg : %s\n", inst->id, str);
+}
+
+static void vpu_terminate_string_msg(struct vpu_rpc_event *pkt)
+{
+       if (pkt->hdr.num == ARRAY_SIZE(pkt->data))
+               pkt->hdr.num--;
+       pkt->data[pkt->hdr.num] = 0;
+}
+
 static struct vpu_msg_handler handlers[] = {
        {VPU_MSG_ID_START_DONE, vpu_session_handle_start_done},
        {VPU_MSG_ID_STOP_DONE, vpu_session_handle_stop_done},
@@ -193,9 +209,10 @@ static struct vpu_msg_handler handlers[] = {
        {VPU_MSG_ID_PIC_DECODED, vpu_session_handle_pic_decoded},
        {VPU_MSG_ID_DEC_DONE, vpu_session_handle_pic_done},
        {VPU_MSG_ID_PIC_EOS, vpu_session_handle_eos},
-       {VPU_MSG_ID_UNSUPPORTED, vpu_session_handle_error},
-       {VPU_MSG_ID_FIRMWARE_XCPT, vpu_session_handle_firmware_xcpt},
+       {VPU_MSG_ID_UNSUPPORTED, vpu_session_handle_error, true},
+       {VPU_MSG_ID_FIRMWARE_XCPT, vpu_session_handle_firmware_xcpt, true},
        {VPU_MSG_ID_PIC_SKIPPED, vpu_session_handle_pic_skipped},
+       {VPU_MSG_ID_DBG_MSG, vpu_session_handle_dbg_msg, true},
 };
 
 static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *msg)
@@ -219,8 +236,12 @@ static int vpu_session_handle_msg(struct vpu_inst *inst, struct vpu_rpc_event *m
                }
        }
 
-       if (handler && handler->done)
-               handler->done(inst, msg);
+       if (handler) {
+               if (handler->is_str)
+                       vpu_terminate_string_msg(msg);
+               if (handler->done)
+                       handler->done(inst, msg);
+       }
 
        vpu_response_cmd(inst, msg_id, 1);
 
index a9c2c69b2ed99f2808128732cce5b6b741db6fd0..d08aa7f73d4fbdd33850cc56ff44efa298a06f30 100644 (file)
@@ -1970,22 +1970,15 @@ static void aspeed_video_debugfs_remove(struct aspeed_video *video)
        debugfs_entry = NULL;
 }
 
-static int aspeed_video_debugfs_create(struct aspeed_video *video)
+static void aspeed_video_debugfs_create(struct aspeed_video *video)
 {
        debugfs_entry = debugfs_create_file(DEVICE_NAME, 0444, NULL,
                                            video,
                                            &aspeed_video_debugfs_fops);
-       if (!debugfs_entry)
-               aspeed_video_debugfs_remove(video);
-
-       return !debugfs_entry ? -EIO : 0;
 }
 #else
 static void aspeed_video_debugfs_remove(struct aspeed_video *video) { }
-static int aspeed_video_debugfs_create(struct aspeed_video *video)
-{
-       return 0;
-}
+static void aspeed_video_debugfs_create(struct aspeed_video *video) { }
 #endif /* CONFIG_DEBUG_FS */
 
 static int aspeed_video_setup_video(struct aspeed_video *video)
@@ -2198,9 +2191,7 @@ static int aspeed_video_probe(struct platform_device *pdev)
                return rc;
        }
 
-       rc = aspeed_video_debugfs_create(video);
-       if (rc)
-               dev_err(video->dev, "debugfs create failed\n");
+       aspeed_video_debugfs_create(video);
 
        return 0;
 }
index 480325d053dea70fc5e5e40c56cffb4b0a4cf7f0..1aa608c00dbce9b52d0d48b5ac8c877db7494fd7 100644 (file)
@@ -8,6 +8,8 @@ config VIDEO_CADENCE_CSI2RX
        select MEDIA_CONTROLLER
        select VIDEO_V4L2_SUBDEV_API
        select V4L2_FWNODE
+       select GENERIC_PHY
+       select GENERIC_PHY_MIPI_DPHY
        help
          Support for the Cadence MIPI CSI2 Receiver controller.
 
index 0d879d71d8185014baa11ec653aef1a11ecc582b..889f4fbbafb3cd2e580d8a1ff34708a1165201cb 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/io.h>
+#include <linux/iopoll.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_graph.h>
 #define CSI2RX_STREAM_BASE(n)          (((n) + 1) * 0x100)
 
 #define CSI2RX_STREAM_CTRL_REG(n)              (CSI2RX_STREAM_BASE(n) + 0x000)
+#define CSI2RX_STREAM_CTRL_SOFT_RST                    BIT(4)
+#define CSI2RX_STREAM_CTRL_STOP                                BIT(1)
 #define CSI2RX_STREAM_CTRL_START                       BIT(0)
 
+#define CSI2RX_STREAM_STATUS_REG(n)            (CSI2RX_STREAM_BASE(n) + 0x004)
+#define CSI2RX_STREAM_STATUS_RDY                       BIT(31)
+
 #define CSI2RX_STREAM_DATA_CFG_REG(n)          (CSI2RX_STREAM_BASE(n) + 0x008)
-#define CSI2RX_STREAM_DATA_CFG_EN_VC_SELECT            BIT(31)
 #define CSI2RX_STREAM_DATA_CFG_VC_SELECT(n)            BIT((n) + 16)
 
 #define CSI2RX_STREAM_CFG_REG(n)               (CSI2RX_STREAM_BASE(n) + 0x00c)
@@ -61,6 +66,11 @@ enum csi2rx_pads {
        CSI2RX_PAD_MAX,
 };
 
+struct csi2rx_fmt {
+       u32                             code;
+       u8                              bpp;
+};
+
 struct csi2rx_priv {
        struct device                   *dev;
        unsigned int                    count;
@@ -95,6 +105,32 @@ struct csi2rx_priv {
        int                             source_pad;
 };
 
+static const struct csi2rx_fmt formats[] = {
+       { .code = MEDIA_BUS_FMT_YUYV8_1X16, .bpp = 16, },
+       { .code = MEDIA_BUS_FMT_UYVY8_1X16, .bpp = 16, },
+       { .code = MEDIA_BUS_FMT_YVYU8_1X16, .bpp = 16, },
+       { .code = MEDIA_BUS_FMT_VYUY8_1X16, .bpp = 16, },
+       { .code = MEDIA_BUS_FMT_SBGGR8_1X8, .bpp = 8, },
+       { .code = MEDIA_BUS_FMT_SGBRG8_1X8, .bpp = 8, },
+       { .code = MEDIA_BUS_FMT_SGRBG8_1X8, .bpp = 8, },
+       { .code = MEDIA_BUS_FMT_SRGGB8_1X8, .bpp = 8, },
+       { .code = MEDIA_BUS_FMT_SBGGR10_1X10, .bpp = 10, },
+       { .code = MEDIA_BUS_FMT_SGBRG10_1X10, .bpp = 10, },
+       { .code = MEDIA_BUS_FMT_SGRBG10_1X10, .bpp = 10, },
+       { .code = MEDIA_BUS_FMT_SRGGB10_1X10, .bpp = 10, },
+};
+
+static const struct csi2rx_fmt *csi2rx_get_fmt_by_code(u32 code)
+{
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(formats); i++)
+               if (formats[i].code == code)
+                       return &formats[i];
+
+       return NULL;
+}
+
 static inline
 struct csi2rx_priv *v4l2_subdev_to_csi2rx(struct v4l2_subdev *subdev)
 {
@@ -103,19 +139,54 @@ struct csi2rx_priv *v4l2_subdev_to_csi2rx(struct v4l2_subdev *subdev)
 
 static void csi2rx_reset(struct csi2rx_priv *csi2rx)
 {
+       unsigned int i;
+
+       /* Reset module */
        writel(CSI2RX_SOFT_RESET_PROTOCOL | CSI2RX_SOFT_RESET_FRONT,
               csi2rx->base + CSI2RX_SOFT_RESET_REG);
+       /* Reset individual streams. */
+       for (i = 0; i < csi2rx->max_streams; i++) {
+               writel(CSI2RX_STREAM_CTRL_SOFT_RST,
+                      csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
+       }
 
-       udelay(10);
+       usleep_range(10, 20);
 
+       /* Clear resets */
        writel(0, csi2rx->base + CSI2RX_SOFT_RESET_REG);
+       for (i = 0; i < csi2rx->max_streams; i++)
+               writel(0, csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
 }
 
 static int csi2rx_configure_ext_dphy(struct csi2rx_priv *csi2rx)
 {
        union phy_configure_opts opts = { };
+       struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
+       struct v4l2_subdev_format sd_fmt = {
+               .which  = V4L2_SUBDEV_FORMAT_ACTIVE,
+               .pad    = CSI2RX_PAD_SINK,
+       };
+       const struct csi2rx_fmt *fmt;
+       s64 link_freq;
        int ret;
 
+       ret = v4l2_subdev_call_state_active(&csi2rx->subdev, pad, get_fmt,
+                                           &sd_fmt);
+       if (ret < 0)
+               return ret;
+
+       fmt = csi2rx_get_fmt_by_code(sd_fmt.format.code);
+
+       link_freq = v4l2_get_link_freq(csi2rx->source_subdev->ctrl_handler,
+                                      fmt->bpp, 2 * csi2rx->num_lanes);
+       if (link_freq < 0)
+               return link_freq;
+
+       ret = phy_mipi_dphy_get_default_config_for_hsclk(link_freq,
+                                                        csi2rx->num_lanes, cfg);
+       if (ret)
+               return ret;
+
        ret = phy_power_on(csi2rx->dphy);
        if (ret)
                return ret;
@@ -199,8 +270,11 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
                writel(CSI2RX_STREAM_CFG_FIFO_MODE_LARGE_BUF,
                       csi2rx->base + CSI2RX_STREAM_CFG_REG(i));
 
-               writel(CSI2RX_STREAM_DATA_CFG_EN_VC_SELECT |
-                      CSI2RX_STREAM_DATA_CFG_VC_SELECT(i),
+               /*
+                * Enable one virtual channel. When multiple virtual channels
+                * are supported this will have to be changed.
+                */
+               writel(CSI2RX_STREAM_DATA_CFG_VC_SELECT(0),
                       csi2rx->base + CSI2RX_STREAM_DATA_CFG_REG(i));
 
                writel(CSI2RX_STREAM_CTRL_START,
@@ -243,13 +317,25 @@ err_disable_pclk:
 static void csi2rx_stop(struct csi2rx_priv *csi2rx)
 {
        unsigned int i;
+       u32 val;
+       int ret;
 
        clk_prepare_enable(csi2rx->p_clk);
        reset_control_assert(csi2rx->sys_rst);
        clk_disable_unprepare(csi2rx->sys_clk);
 
        for (i = 0; i < csi2rx->max_streams; i++) {
-               writel(0, csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
+               writel(CSI2RX_STREAM_CTRL_STOP,
+                      csi2rx->base + CSI2RX_STREAM_CTRL_REG(i));
+
+               ret = readl_relaxed_poll_timeout(csi2rx->base +
+                                                CSI2RX_STREAM_STATUS_REG(i),
+                                                val,
+                                                !(val & CSI2RX_STREAM_STATUS_RDY),
+                                                10, 10000);
+               if (ret)
+                       dev_warn(csi2rx->dev,
+                                "Failed to stop streaming on pad%u\n", i);
 
                reset_control_assert(csi2rx->pixel_rst[i]);
                clk_disable_unprepare(csi2rx->pixel_clk[i]);
@@ -303,12 +389,72 @@ out:
        return ret;
 }
 
+static int csi2rx_set_fmt(struct v4l2_subdev *subdev,
+                         struct v4l2_subdev_state *state,
+                         struct v4l2_subdev_format *format)
+{
+       struct v4l2_mbus_framefmt *fmt;
+       unsigned int i;
+
+       /* No transcoding, source and sink formats must match. */
+       if (format->pad != CSI2RX_PAD_SINK)
+               return v4l2_subdev_get_fmt(subdev, state, format);
+
+       if (!csi2rx_get_fmt_by_code(format->format.code))
+               format->format.code = formats[0].code;
+
+       format->format.field = V4L2_FIELD_NONE;
+
+       /* Set sink format */
+       fmt = v4l2_subdev_get_pad_format(subdev, state, format->pad);
+       *fmt = format->format;
+
+       /* Propagate to source formats */
+       for (i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++) {
+               fmt = v4l2_subdev_get_pad_format(subdev, state, i);
+               *fmt = format->format;
+       }
+
+       return 0;
+}
+
+static int csi2rx_init_cfg(struct v4l2_subdev *subdev,
+                          struct v4l2_subdev_state *state)
+{
+       struct v4l2_subdev_format format = {
+               .pad = CSI2RX_PAD_SINK,
+               .format = {
+                       .width = 640,
+                       .height = 480,
+                       .code = MEDIA_BUS_FMT_UYVY8_1X16,
+                       .field = V4L2_FIELD_NONE,
+                       .colorspace = V4L2_COLORSPACE_SRGB,
+                       .ycbcr_enc = V4L2_YCBCR_ENC_601,
+                       .quantization = V4L2_QUANTIZATION_LIM_RANGE,
+                       .xfer_func = V4L2_XFER_FUNC_SRGB,
+               },
+       };
+
+       return csi2rx_set_fmt(subdev, state, &format);
+}
+
+static const struct v4l2_subdev_pad_ops csi2rx_pad_ops = {
+       .get_fmt        = v4l2_subdev_get_fmt,
+       .set_fmt        = csi2rx_set_fmt,
+       .init_cfg       = csi2rx_init_cfg,
+};
+
 static const struct v4l2_subdev_video_ops csi2rx_video_ops = {
        .s_stream       = csi2rx_s_stream,
 };
 
 static const struct v4l2_subdev_ops csi2rx_subdev_ops = {
        .video          = &csi2rx_video_ops,
+       .pad            = &csi2rx_pad_ops,
+};
+
+static const struct media_entity_operations csi2rx_media_ops = {
+       .link_validate = v4l2_subdev_link_validate,
 };
 
 static int csi2rx_async_bound(struct v4l2_async_notifier *notifier,
@@ -479,8 +625,10 @@ static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx)
        asd = v4l2_async_nf_add_fwnode_remote(&csi2rx->notifier, fwh,
                                              struct v4l2_async_connection);
        of_node_put(ep);
-       if (IS_ERR(asd))
+       if (IS_ERR(asd)) {
+               v4l2_async_nf_cleanup(&csi2rx->notifier);
                return PTR_ERR(asd);
+       }
 
        csi2rx->notifier.ops = &csi2rx_notifier_ops;
 
@@ -516,23 +664,29 @@ static int csi2rx_probe(struct platform_device *pdev)
        csi2rx->subdev.dev = &pdev->dev;
        v4l2_subdev_init(&csi2rx->subdev, &csi2rx_subdev_ops);
        v4l2_set_subdevdata(&csi2rx->subdev, &pdev->dev);
-       snprintf(csi2rx->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s.%s",
-                KBUILD_MODNAME, dev_name(&pdev->dev));
+       snprintf(csi2rx->subdev.name, sizeof(csi2rx->subdev.name),
+                "%s.%s", KBUILD_MODNAME, dev_name(&pdev->dev));
 
        /* Create our media pads */
        csi2rx->subdev.entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
        csi2rx->pads[CSI2RX_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
        for (i = CSI2RX_PAD_SOURCE_STREAM0; i < CSI2RX_PAD_MAX; i++)
                csi2rx->pads[i].flags = MEDIA_PAD_FL_SOURCE;
+       csi2rx->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+       csi2rx->subdev.entity.ops = &csi2rx_media_ops;
 
        ret = media_entity_pads_init(&csi2rx->subdev.entity, CSI2RX_PAD_MAX,
                                     csi2rx->pads);
        if (ret)
                goto err_cleanup;
 
+       ret = v4l2_subdev_init_finalize(&csi2rx->subdev);
+       if (ret)
+               goto err_cleanup;
+
        ret = v4l2_async_register_subdev(&csi2rx->subdev);
        if (ret < 0)
-               goto err_cleanup;
+               goto err_free_state;
 
        dev_info(&pdev->dev,
                 "Probed CSI2RX with %u/%u lanes, %u streams, %s D-PHY\n",
@@ -542,8 +696,12 @@ static int csi2rx_probe(struct platform_device *pdev)
 
        return 0;
 
+err_free_state:
+       v4l2_subdev_cleanup(&csi2rx->subdev);
 err_cleanup:
+       v4l2_async_nf_unregister(&csi2rx->notifier);
        v4l2_async_nf_cleanup(&csi2rx->notifier);
+       media_entity_cleanup(&csi2rx->subdev.entity);
 err_free_priv:
        kfree(csi2rx);
        return ret;
@@ -553,7 +711,11 @@ static void csi2rx_remove(struct platform_device *pdev)
 {
        struct csi2rx_priv *csi2rx = platform_get_drvdata(pdev);
 
+       v4l2_async_nf_unregister(&csi2rx->notifier);
+       v4l2_async_nf_cleanup(&csi2rx->notifier);
        v4l2_async_unregister_subdev(&csi2rx->subdev);
+       v4l2_subdev_cleanup(&csi2rx->subdev);
+       media_entity_cleanup(&csi2rx->subdev.entity);
        kfree(csi2rx);
 }
 
index 1e0400b7803e939dc5266480f49c60a7f3ef7ec5..c115742f347ff17d1367cc44d4e27cbd8d5b5f6b 100644 (file)
@@ -480,7 +480,7 @@ static int csi2tx_get_resources(struct csi2tx_priv *csi2tx,
        csi2tx->has_internal_dphy = !!(dev_cfg & CSI2TX_DEVICE_CONFIG_HAS_DPHY);
 
        for (i = 0; i < csi2tx->max_streams; i++) {
-               char clk_name[16];
+               char clk_name[23];
 
                snprintf(clk_name, sizeof(clk_name), "pixel_if%u_clk", i);
                csi2tx->pixel_clk[i] = devm_clk_get(&pdev->dev, clk_name);
@@ -592,8 +592,8 @@ static int csi2tx_probe(struct platform_device *pdev)
        csi2tx->subdev.owner = THIS_MODULE;
        csi2tx->subdev.dev = &pdev->dev;
        csi2tx->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-       snprintf(csi2tx->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s.%s",
-                KBUILD_MODNAME, dev_name(&pdev->dev));
+       snprintf(csi2tx->subdev.name, sizeof(csi2tx->subdev.name),
+                "%s.%s", KBUILD_MODNAME, dev_name(&pdev->dev));
 
        ret = csi2tx_check_lanes(csi2tx);
        if (ret)
index 2bbc48c7402ca5be8a17ceba0e961218096a5cac..f8fa3b841ccfb0602246f307742591d7ac61b531 100644 (file)
@@ -127,6 +127,7 @@ void mtk_jpeg_set_enc_params(struct mtk_jpeg_ctx *ctx,  void __iomem *base)
        u32 img_stride;
        u32 mem_stride;
        u32 i, enc_quality;
+       u32 nr_enc_quality = ARRAY_SIZE(mtk_jpeg_enc_quality);
 
        value = width << 16 | height;
        writel(value, base + JPEG_ENC_IMG_SIZE);
@@ -157,8 +158,8 @@ void mtk_jpeg_set_enc_params(struct mtk_jpeg_ctx *ctx,  void __iomem *base)
        writel(img_stride, base + JPEG_ENC_IMG_STRIDE);
        writel(mem_stride, base + JPEG_ENC_STRIDE);
 
-       enc_quality = mtk_jpeg_enc_quality[0].hardware_value;
-       for (i = 0; i < ARRAY_SIZE(mtk_jpeg_enc_quality); i++) {
+       enc_quality = mtk_jpeg_enc_quality[nr_enc_quality - 1].hardware_value;
+       for (i = 0; i < nr_enc_quality; i++) {
                if (ctx->enc_quality <= mtk_jpeg_enc_quality[i].quality_param) {
                        enc_quality = mtk_jpeg_enc_quality[i].hardware_value;
                        break;
index 3177592490bee4f7aab2c1407b513b88879a73ae..6adac857a4779dbc0974109e1057cd583875562e 100644 (file)
@@ -261,11 +261,11 @@ static int mdp_path_config(struct mdp_dev *mdp, struct mdp_cmdq_cmd *cmd,
                const struct v4l2_rect *compose;
                u32 out = 0;
 
+               ctx = &path->comps[index];
                if (CFG_CHECK(MT8183, p_id))
                        out = CFG_COMP(MT8183, ctx->param, outputs[0]);
 
                compose = path->composes[out];
-               ctx = &path->comps[index];
                ret = call_op(ctx, config_frame, cmd, compose);
                if (ret)
                        return ret;
index 667933ea15f446e4d806bd88a32dbddf5f8f4bac..575c8d52acd1913d489c7e3b6ae649448eb9bc59 100644 (file)
@@ -1137,6 +1137,7 @@ int mdp_comp_config(struct mdp_dev *mdp)
                comp = mdp_comp_create(mdp, node, id);
                if (IS_ERR(comp)) {
                        ret = PTR_ERR(comp);
+                       of_node_put(node);
                        goto err_init_comps;
                }
 
index 9e744d07a1e8eab245d6bafec65f61acc3af497e..6bbe55de6ce9ac1a96c3dac21dd45ebde45842ef 100644 (file)
@@ -68,7 +68,7 @@ struct mtk_vcodec_fw *mtk_vcodec_fw_scp_init(void *priv, enum mtk_vcodec_fw_use
 
                plat_dev = dec_dev->plat_dev;
        } else {
-               pr_err("Invalid fw_use %d (use a resonable fw id here)\n", fw_use);
+               pr_err("Invalid fw_use %d (use a reasonable fw id here)\n", fw_use);
                return ERR_PTR(-EINVAL);
        }
 
index 5e03b08865599a8a7848ded68f030a926a415287..9f6e4b59455dab9fcd392633103e4a85a15c1364 100644 (file)
@@ -109,7 +109,7 @@ struct mtk_vcodec_fw *mtk_vcodec_fw_vpu_init(void *priv, enum mtk_vcodec_fw_use
                plat_dev = dec_dev->plat_dev;
                rst_id = VPU_RST_DEC;
        } else {
-               pr_err("Invalid fw_use %d (use a resonable fw id here)\n", fw_use);
+               pr_err("Invalid fw_use %d (use a reasonable fw id here)\n", fw_use);
                return ERR_PTR(-EINVAL);
        }
 
index 908602031fd0e3128dc86c9578b62978022852c6..9ce34a3b5ee67daf5ae6a575159ac4f8e5467565 100644 (file)
@@ -47,20 +47,32 @@ EXPORT_SYMBOL(mtk_vcodec_write_vdecsys);
 
 int mtk_vcodec_mem_alloc(void *priv, struct mtk_vcodec_mem *mem)
 {
+       enum mtk_instance_type inst_type = *((unsigned int *)priv);
+       struct platform_device *plat_dev;
        unsigned long size = mem->size;
-       struct mtk_vcodec_dec_ctx *ctx = priv;
-       struct device *dev = &ctx->dev->plat_dev->dev;
+       int id;
 
-       mem->va = dma_alloc_coherent(dev, size, &mem->dma_addr, GFP_KERNEL);
+       if (inst_type == MTK_INST_ENCODER) {
+               struct mtk_vcodec_enc_ctx *enc_ctx = priv;
+
+               plat_dev = enc_ctx->dev->plat_dev;
+               id = enc_ctx->id;
+       } else {
+               struct mtk_vcodec_dec_ctx *dec_ctx = priv;
+
+               plat_dev = dec_ctx->dev->plat_dev;
+               id = dec_ctx->id;
+       }
+
+       mem->va = dma_alloc_coherent(&plat_dev->dev, size, &mem->dma_addr, GFP_KERNEL);
        if (!mem->va) {
-               mtk_v4l2_vdec_err(ctx, "%s dma_alloc size=%ld failed!", dev_name(dev), size);
+               mtk_v4l2_err(plat_dev, "%s dma_alloc size=%ld failed!",
+                            dev_name(&plat_dev->dev), size);
                return -ENOMEM;
        }
 
-       mtk_v4l2_vdec_dbg(3, ctx, "[%d]  - va      = %p", ctx->id, mem->va);
-       mtk_v4l2_vdec_dbg(3, ctx, "[%d]  - dma     = 0x%lx", ctx->id,
-                         (unsigned long)mem->dma_addr);
-       mtk_v4l2_vdec_dbg(3, ctx, "[%d]    size = 0x%lx", ctx->id, size);
+       mtk_v4l2_debug(plat_dev, 3, "[%d] - va = %p dma = 0x%lx size = 0x%lx", id, mem->va,
+                      (unsigned long)mem->dma_addr, size);
 
        return 0;
 }
@@ -68,21 +80,33 @@ EXPORT_SYMBOL(mtk_vcodec_mem_alloc);
 
 void mtk_vcodec_mem_free(void *priv, struct mtk_vcodec_mem *mem)
 {
+       enum mtk_instance_type inst_type = *((unsigned int *)priv);
+       struct platform_device *plat_dev;
        unsigned long size = mem->size;
-       struct mtk_vcodec_dec_ctx *ctx = priv;
-       struct device *dev = &ctx->dev->plat_dev->dev;
+       int id;
+
+       if (inst_type == MTK_INST_ENCODER) {
+               struct mtk_vcodec_enc_ctx *enc_ctx = priv;
+
+               plat_dev = enc_ctx->dev->plat_dev;
+               id = enc_ctx->id;
+       } else {
+               struct mtk_vcodec_dec_ctx *dec_ctx = priv;
+
+               plat_dev = dec_ctx->dev->plat_dev;
+               id = dec_ctx->id;
+       }
 
        if (!mem->va) {
-               mtk_v4l2_vdec_err(ctx, "%s dma_free size=%ld failed!", dev_name(dev), size);
+               mtk_v4l2_err(plat_dev, "%s dma_free size=%ld failed!",
+                            dev_name(&plat_dev->dev), size);
                return;
        }
 
-       mtk_v4l2_vdec_dbg(3, ctx, "[%d]  - va      = %p", ctx->id, mem->va);
-       mtk_v4l2_vdec_dbg(3, ctx, "[%d]  - dma     = 0x%lx", ctx->id,
-                         (unsigned long)mem->dma_addr);
-       mtk_v4l2_vdec_dbg(3, ctx, "[%d]    size = 0x%lx", ctx->id, size);
+       mtk_v4l2_debug(plat_dev, 3, "[%d] - va = %p dma = 0x%lx size = 0x%lx", id, mem->va,
+                      (unsigned long)mem->dma_addr, size);
 
-       dma_free_coherent(dev, size, mem->va, mem->dma_addr);
+       dma_free_coherent(&plat_dev->dev, size, mem->va, mem->dma_addr);
        mem->va = NULL;
        mem->dma_addr = 0;
        mem->size = 0;
index 04948d3eb011a61c57bb004e7d49cb748baf0c02..eb381fa6e7d14ec94a774afa1bb39d5399374e6f 100644 (file)
@@ -866,7 +866,7 @@ static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
 {
        struct mtk_vcodec_enc_ctx *ctx = vb2_get_drv_priv(q);
        struct venc_enc_param param;
-       int ret, pm_ret;
+       int ret;
        int i;
 
        /* Once state turn into MTK_STATE_ABORT, we need stop_streaming
@@ -886,18 +886,12 @@ static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
                        return 0;
        }
 
-       ret = pm_runtime_resume_and_get(&ctx->dev->plat_dev->dev);
-       if (ret < 0) {
-               mtk_v4l2_venc_err(ctx, "pm_runtime_resume_and_get fail %d", ret);
-               goto err_start_stream;
-       }
-
        mtk_venc_set_param(ctx, &param);
        ret = venc_if_set_param(ctx, VENC_SET_PARAM_ENC, &param);
        if (ret) {
                mtk_v4l2_venc_err(ctx, "venc_if_set_param failed=%d", ret);
                ctx->state = MTK_STATE_ABORT;
-               goto err_set_param;
+               goto err_start_stream;
        }
        ctx->param_change = MTK_ENCODE_PARAM_NONE;
 
@@ -910,18 +904,13 @@ static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
                if (ret) {
                        mtk_v4l2_venc_err(ctx, "venc_if_set_param failed=%d", ret);
                        ctx->state = MTK_STATE_ABORT;
-                       goto err_set_param;
+                       goto err_start_stream;
                }
                ctx->state = MTK_STATE_HEADER;
        }
 
        return 0;
 
-err_set_param:
-       pm_ret = pm_runtime_put(&ctx->dev->plat_dev->dev);
-       if (pm_ret < 0)
-               mtk_v4l2_venc_err(ctx, "pm_runtime_put fail %d", pm_ret);
-
 err_start_stream:
        for (i = 0; i < q->num_buffers; ++i) {
                struct vb2_buffer *buf = vb2_get_buffer(q, i);
@@ -1004,10 +993,6 @@ static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
        if (ret)
                mtk_v4l2_venc_err(ctx, "venc_if_deinit failed=%d", ret);
 
-       ret = pm_runtime_put(&ctx->dev->plat_dev->dev);
-       if (ret < 0)
-               mtk_v4l2_venc_err(ctx, "pm_runtime_put fail %d", ret);
-
        ctx->state = MTK_STATE_FREE;
 }
 
index 3fce936e61b9f298185965a7ed4b73cbe0e2d02d..a22b7dfc656e145f995ae36cd80a802dcfeb2b35 100644 (file)
@@ -58,6 +58,24 @@ int mtk_vcodec_init_enc_clk(struct mtk_vcodec_enc_dev *mtkdev)
        return 0;
 }
 
+void mtk_vcodec_enc_pw_on(struct mtk_vcodec_pm *pm)
+{
+       int ret;
+
+       ret = pm_runtime_resume_and_get(pm->dev);
+       if (ret)
+               dev_err(pm->dev, "pm_runtime_resume_and_get fail: %d", ret);
+}
+
+void mtk_vcodec_enc_pw_off(struct mtk_vcodec_pm *pm)
+{
+       int ret;
+
+       ret = pm_runtime_put(pm->dev);
+       if (ret && ret != -EAGAIN)
+               dev_err(pm->dev, "pm_runtime_put fail %d", ret);
+}
+
 void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm)
 {
        struct mtk_vcodec_clk *enc_clk = &pm->venc_clk;
index e50be0575190a9a1fe23568113a8cf2e1e499919..157ea08ba9e36a15b117af71f88b55988e5efe29 100644 (file)
@@ -10,7 +10,8 @@
 #include "mtk_vcodec_enc_drv.h"
 
 int mtk_vcodec_init_enc_clk(struct mtk_vcodec_enc_dev *dev);
-
+void mtk_vcodec_enc_pw_on(struct mtk_vcodec_pm *pm);
+void mtk_vcodec_enc_pw_off(struct mtk_vcodec_pm *pm);
 void mtk_vcodec_enc_clock_on(struct mtk_vcodec_pm *pm);
 void mtk_vcodec_enc_clock_off(struct mtk_vcodec_pm *pm);
 
index 1bdaecdd64a79575caea1550dcb41de2f8868245..c402a686f3cb2d57813383f66a68e0b0cf1f884a 100644 (file)
@@ -32,9 +32,7 @@ int venc_if_init(struct mtk_vcodec_enc_ctx *ctx, unsigned int fourcc)
        }
 
        mtk_venc_lock(ctx);
-       mtk_vcodec_enc_clock_on(&ctx->dev->pm);
        ret = ctx->enc_if->init(ctx);
-       mtk_vcodec_enc_clock_off(&ctx->dev->pm);
        mtk_venc_unlock(ctx);
 
        return ret;
@@ -46,9 +44,7 @@ int venc_if_set_param(struct mtk_vcodec_enc_ctx *ctx,
        int ret = 0;
 
        mtk_venc_lock(ctx);
-       mtk_vcodec_enc_clock_on(&ctx->dev->pm);
        ret = ctx->enc_if->set_param(ctx->drv_handle, type, in);
-       mtk_vcodec_enc_clock_off(&ctx->dev->pm);
        mtk_venc_unlock(ctx);
 
        return ret;
@@ -68,10 +64,12 @@ int venc_if_encode(struct mtk_vcodec_enc_ctx *ctx,
        ctx->dev->curr_ctx = ctx;
        spin_unlock_irqrestore(&ctx->dev->irqlock, flags);
 
+       mtk_vcodec_enc_pw_on(&ctx->dev->pm);
        mtk_vcodec_enc_clock_on(&ctx->dev->pm);
        ret = ctx->enc_if->encode(ctx->drv_handle, opt, frm_buf,
                                  bs_buf, result);
        mtk_vcodec_enc_clock_off(&ctx->dev->pm);
+       mtk_vcodec_enc_pw_off(&ctx->dev->pm);
 
        spin_lock_irqsave(&ctx->dev->irqlock, flags);
        ctx->dev->curr_ctx = NULL;
@@ -89,9 +87,7 @@ int venc_if_deinit(struct mtk_vcodec_enc_ctx *ctx)
                return 0;
 
        mtk_venc_lock(ctx);
-       mtk_vcodec_enc_clock_on(&ctx->dev->pm);
        ret = ctx->enc_if->deinit(ctx->drv_handle);
-       mtk_vcodec_enc_clock_off(&ctx->dev->pm);
        mtk_venc_unlock(ctx);
 
        ctx->drv_handle = NULL;
index ae6290d28f8e98eabfee667d60a73ea29c5ea60a..84ad1cc6ad171ef2ea2767653d60e6d779e5604e 100644 (file)
@@ -154,6 +154,11 @@ int vpu_enc_init(struct venc_vpu_inst *vpu)
                return -EINVAL;
        }
 
+       if (IS_ERR_OR_NULL(vpu->vsi)) {
+               mtk_venc_err(vpu->ctx, "invalid venc vsi");
+               return -EINVAL;
+       }
+
        return 0;
 }
 
index 8dbf7bc1e863ba2e945d58826a4224568d0efc6b..1f8528844497092644758c8653a6e345dcf6f51d 100644 (file)
@@ -478,12 +478,8 @@ static const struct vb2_ops isc_vb2_ops = {
 static int isc_querycap(struct file *file, void *priv,
                        struct v4l2_capability *cap)
 {
-       struct isc_device *isc = video_drvdata(file);
-
        strscpy(cap->driver, "microchip-isc", sizeof(cap->driver));
        strscpy(cap->card, "Microchip Image Sensor Controller", sizeof(cap->card));
-       snprintf(cap->bus_info, sizeof(cap->bus_info),
-                "platform:%s", isc->v4l2_dev.name);
 
        return 0;
 }
@@ -1993,8 +1989,6 @@ int isc_mc_init(struct isc_device *isc, u32 ver)
        strscpy(isc->mdev.driver_name, KBUILD_MODNAME,
                sizeof(isc->mdev.driver_name));
        strscpy(isc->mdev.model, match->compatible, sizeof(isc->mdev.model));
-       snprintf(isc->mdev.bus_info, sizeof(isc->mdev.bus_info), "platform:%s",
-                isc->v4l2_dev.name);
        isc->mdev.hw_revision = ver;
 
        media_device_init(&isc->mdev);
diff --git a/drivers/media/platform/nuvoton/Kconfig b/drivers/media/platform/nuvoton/Kconfig
new file mode 100644 (file)
index 0000000..40b36d1
--- /dev/null
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "Nuvoton media platform drivers"
+
+config VIDEO_NPCM_VCD_ECE
+       tristate "Nuvoton NPCM Video Capture/Encode Engine driver"
+       depends on V4L_PLATFORM_DRIVERS && VIDEO_DEV
+       depends on ARCH_NPCM || COMPILE_TEST
+       select VIDEOBUF2_DMA_CONTIG
+       help
+         Support for the Video Capture/Differentiation Engine (VCD) and
+         Encoding Compression Engine (ECE) present on Nuvoton NPCM SoCs.
+         The VCD can capture a frame from digital video input and compare
+         two frames in memory, and then the ECE can compress the frame
+         data into HEXTILE format.
diff --git a/drivers/media/platform/nuvoton/Makefile b/drivers/media/platform/nuvoton/Makefile
new file mode 100644 (file)
index 0000000..74a4e3f
--- /dev/null
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_VIDEO_NPCM_VCD_ECE) += npcm-video.o
diff --git a/drivers/media/platform/nuvoton/npcm-regs.h b/drivers/media/platform/nuvoton/npcm-regs.h
new file mode 100644 (file)
index 0000000..4a44f47
--- /dev/null
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Register definition header for NPCM video driver
+ *
+ * Copyright (C) 2022 Nuvoton Technologies
+ */
+
+#ifndef _NPCM_REGS_H
+#define _NPCM_REGS_H
+
+/* VCD Registers */
+#define VCD_DIFF_TBL                   0x0000
+#define VCD_FBA_ADR                    0x8000
+#define VCD_FBB_ADR                    0x8004
+
+#define VCD_FB_LP                      0x8008
+#define  VCD_FBA_LP                    GENMASK(15, 0)
+#define  VCD_FBB_LP                    GENMASK(31, 16)
+
+#define VCD_CAP_RES                    0x800c
+#define  VCD_CAP_RES_VERT_RES          GENMASK(10, 0)
+#define  VCD_CAP_RES_HOR_RES           GENMASK(26, 16)
+
+#define VCD_MODE                       0x8014
+#define  VCD_MODE_VCDE                 BIT(0)
+#define  VCD_MODE_CM565                        BIT(1)
+#define  VCD_MODE_IDBC                 BIT(3)
+#define  VCD_MODE_KVM_BW_SET           BIT(16)
+
+#define VCD_CMD                                0x8018
+#define  VCD_CMD_GO                    BIT(0)
+#define  VCD_CMD_RST                   BIT(1)
+#define  VCD_CMD_OPERATION             GENMASK(6, 4)
+#define   VCD_CMD_OPERATION_CAPTURE    0
+#define   VCD_CMD_OPERATION_COMPARE    2
+
+#define        VCD_STAT                        0x801c
+#define         VCD_STAT_DONE                  BIT(0)
+#define         VCD_STAT_IFOT                  BIT(2)
+#define         VCD_STAT_IFOR                  BIT(3)
+#define         VCD_STAT_VHT_CHG               BIT(5)
+#define         VCD_STAT_HAC_CHG               BIT(8)
+#define         VCD_STAT_BUSY                  BIT(30)
+#define        VCD_STAT_CLEAR                  0x3fff
+
+#define VCD_INTE                       0x8020
+#define  VCD_INTE_DONE_IE              BIT(0)
+#define  VCD_INTE_IFOT_IE              BIT(2)
+#define  VCD_INTE_IFOR_IE              BIT(3)
+#define  VCD_INTE_VHT_IE               BIT(5)
+#define  VCD_INTE_HAC_IE               BIT(8)
+
+#define VCD_RCHG                       0x8028
+#define  VCD_RCHG_IG_CHG0              GENMASK(2, 0)
+#define  VCD_RCHG_TIM_PRSCL            GENMASK(12, 9)
+
+#define VCD_VER_HI_TIM                 0x8044
+#define  VCD_VER_HI_TIME               GENMASK(23, 0)
+
+#define VCD_VER_HI_LST                 0x8048
+#define  VCD_VER_HI_LAST               GENMASK(23, 0)
+
+#define VCD_HOR_AC_TIM                 0x804c
+#define  VCD_HOR_AC_TIME               GENMASK(13, 0)
+
+#define VCD_HOR_AC_LST                 0x8050
+#define  VCD_HOR_AC_LAST               GENMASK(13, 0)
+
+#define VCD_FIFO                       0x805c
+#define  VCD_FIFO_TH                   0x100350ff
+
+#define VCD_FB_SIZE                    0x500000 /* support up to 1920 x 1200 */
+#define VCD_KVM_BW_PCLK                        120000000UL
+#define VCD_TIMEOUT_US                 300000
+
+/* ECE Registers */
+#define ECE_DDA_CTRL                   0x0000
+#define  ECE_DDA_CTRL_ECEEN            BIT(0)
+#define  ECE_DDA_CTRL_INTEN            BIT(8)
+
+#define ECE_DDA_STS                    0x0004
+#define  ECE_DDA_STS_CDREADY           BIT(8)
+#define  ECE_DDA_STS_ACDRDY            BIT(10)
+
+#define ECE_FBR_BA                     0x0008
+#define ECE_ED_BA                      0x000c
+#define ECE_RECT_XY                    0x0010
+
+#define ECE_RECT_DIMEN                 0x0014
+#define  ECE_RECT_DIMEN_WR             GENMASK(10, 0)
+#define  ECE_RECT_DIMEN_WLTR           GENMASK(14, 11)
+#define  ECE_RECT_DIMEN_HR             GENMASK(26, 16)
+#define  ECE_RECT_DIMEN_HLTR           GENMASK(30, 27)
+
+#define ECE_RESOL                      0x001c
+#define  ECE_RESOL_FB_LP_512           0
+#define  ECE_RESOL_FB_LP_1024          1
+#define  ECE_RESOL_FB_LP_2048          2
+#define  ECE_RESOL_FB_LP_2560          3
+#define  ECE_RESOL_FB_LP_4096          4
+
+#define ECE_HEX_CTRL                   0x0040
+#define  ECE_HEX_CTRL_ENCDIS           BIT(0)
+#define  ECE_HEX_CTRL_ENC_GAP          GENMASK(12, 8)
+
+#define ECE_HEX_RECT_OFFSET            0x0048
+#define  ECE_HEX_RECT_OFFSET_MASK      GENMASK(22, 0)
+
+#define ECE_TILE_W                     16
+#define ECE_TILE_H                     16
+#define ECE_POLL_TIMEOUT_US            300000
+
+/* GCR Registers */
+#define INTCR                          0x3c
+#define  INTCR_GFXIFDIS                        GENMASK(9, 8)
+#define  INTCR_DEHS                    BIT(27)
+
+#define INTCR2                         0x60
+#define  INTCR2_GIRST2                 BIT(2)
+#define  INTCR2_GIHCRST                        BIT(5)
+#define  INTCR2_GIVCRST                        BIT(6)
+
+/* GFXI Register */
+#define DISPST                         0x00
+#define  DISPST_HSCROFF                        BIT(1)
+#define  DISPST_MGAMODE                        BIT(7)
+
+#define HVCNTL                         0x10
+#define  HVCNTL_MASK                   GENMASK(7, 0)
+
+#define HVCNTH                         0x14
+#define  HVCNTH_MASK                   GENMASK(2, 0)
+
+#define VVCNTL                         0x20
+#define  VVCNTL_MASK                   GENMASK(7, 0)
+
+#define VVCNTH                         0x24
+#define  VVCNTH_MASK                   GENMASK(2, 0)
+
+#define GPLLINDIV                      0x40
+#define  GPLLINDIV_MASK                        GENMASK(5, 0)
+#define  GPLLINDIV_GPLLFBDV8           BIT(7)
+
+#define GPLLFBDIV                      0x44
+#define  GPLLFBDIV_MASK                        GENMASK(7, 0)
+
+#define GPLLST                         0x48
+#define  GPLLST_PLLOTDIV1              GENMASK(2, 0)
+#define  GPLLST_PLLOTDIV2              GENMASK(5, 3)
+#define  GPLLST_GPLLFBDV109            GENMASK(7, 6)
+
+#endif /* _NPCM_REGS_H */
diff --git a/drivers/media/platform/nuvoton/npcm-video.c b/drivers/media/platform/nuvoton/npcm-video.c
new file mode 100644 (file)
index 0000000..b9e6782
--- /dev/null
@@ -0,0 +1,1831 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for Video Capture/Differentiation Engine (VCD) and Encoding
+ * Compression Engine (ECE) present on Nuvoton NPCM SoCs.
+ *
+ * Copyright (C) 2022 Nuvoton Technologies
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/v4l2-controls.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/videobuf2-dma-contig.h>
+#include <uapi/linux/npcm-video.h>
+#include "npcm-regs.h"
+
+#define DEVICE_NAME    "npcm-video"
+#define MAX_WIDTH      1920
+#define MAX_HEIGHT     1200
+#define MIN_WIDTH      320
+#define MIN_HEIGHT     240
+#define MIN_LP         512
+#define MAX_LP         4096
+#define RECT_W         16
+#define RECT_H         16
+#define BITMAP_SIZE    32
+
+struct npcm_video_addr {
+       size_t size;
+       dma_addr_t dma;
+       void *virt;
+};
+
+struct npcm_video_buffer {
+       struct vb2_v4l2_buffer vb;
+       struct list_head link;
+};
+
+#define to_npcm_video_buffer(x) \
+       container_of((x), struct npcm_video_buffer, vb)
+
+/*
+ * VIDEO_STREAMING:    a flag indicating if the video has started streaming
+ * VIDEO_CAPTURING:    a flag indicating if the VCD is capturing a frame
+ * VIDEO_RES_CHANGING: a flag indicating if the resolution is changing
+ * VIDEO_STOPPED:      a flag indicating if the video has stopped streaming
+ */
+enum {
+       VIDEO_STREAMING,
+       VIDEO_CAPTURING,
+       VIDEO_RES_CHANGING,
+       VIDEO_STOPPED,
+};
+
+struct rect_list {
+       struct v4l2_clip clip;
+       struct list_head list;
+};
+
+struct rect_list_info {
+       struct rect_list *list;
+       struct rect_list *first;
+       struct list_head *head;
+       unsigned int index;
+       unsigned int tile_perline;
+       unsigned int tile_perrow;
+       unsigned int offset_perline;
+       unsigned int tile_size;
+       unsigned int tile_cnt;
+};
+
+struct npcm_ece {
+       struct regmap *regmap;
+       atomic_t clients;
+       struct reset_control *reset;
+       bool enable;
+};
+
+struct npcm_video {
+       struct regmap *gcr_regmap;
+       struct regmap *gfx_regmap;
+       struct regmap *vcd_regmap;
+
+       struct device *dev;
+       struct v4l2_ctrl_handler ctrl_handler;
+       struct v4l2_ctrl *rect_cnt_ctrl;
+       struct v4l2_device v4l2_dev;
+       struct v4l2_pix_format pix_fmt;
+       struct v4l2_bt_timings active_timings;
+       struct v4l2_bt_timings detected_timings;
+       unsigned int v4l2_input_status;
+       struct vb2_queue queue;
+       struct video_device vdev;
+       struct mutex video_lock; /* v4l2 and videobuf2 lock */
+
+       struct list_head buffers;
+       spinlock_t lock; /* buffer list lock */
+       unsigned long flags;
+       unsigned int sequence;
+
+       struct npcm_video_addr src;
+       struct reset_control *reset;
+       struct npcm_ece ece;
+
+       unsigned int bytesperline;
+       unsigned int bytesperpixel;
+       unsigned int rect_cnt;
+       struct list_head list[VIDEO_MAX_FRAME];
+       unsigned int rect[VIDEO_MAX_FRAME];
+       unsigned int ctrl_cmd;
+       unsigned int op_cmd;
+};
+
+#define to_npcm_video(x) container_of((x), struct npcm_video, v4l2_dev)
+
+struct npcm_fmt {
+       unsigned int fourcc;
+       unsigned int bpp; /* bytes per pixel */
+};
+
+static const struct npcm_fmt npcm_fmt_list[] = {
+       {
+               .fourcc = V4L2_PIX_FMT_RGB565,
+               .bpp    = 2,
+       },
+       {
+               .fourcc = V4L2_PIX_FMT_HEXTILE,
+               .bpp    = 2,
+       },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(npcm_fmt_list)
+
+static const struct v4l2_dv_timings_cap npcm_video_timings_cap = {
+       .type = V4L2_DV_BT_656_1120,
+       .bt = {
+               .min_width = MIN_WIDTH,
+               .max_width = MAX_WIDTH,
+               .min_height = MIN_HEIGHT,
+               .max_height = MAX_HEIGHT,
+               .min_pixelclock = 6574080, /* 640 x 480 x 24Hz */
+               .max_pixelclock = 138240000, /* 1920 x 1200 x 60Hz */
+               .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+                            V4L2_DV_BT_STD_CVT | V4L2_DV_BT_STD_GTF,
+               .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
+                               V4L2_DV_BT_CAP_REDUCED_BLANKING |
+                               V4L2_DV_BT_CAP_CUSTOM,
+       },
+};
+
+static DECLARE_BITMAP(bitmap, BITMAP_SIZE);
+
+static const struct npcm_fmt *npcm_video_find_format(struct v4l2_format *f)
+{
+       const struct npcm_fmt *fmt;
+       unsigned int k;
+
+       for (k = 0; k < NUM_FORMATS; k++) {
+               fmt = &npcm_fmt_list[k];
+               if (fmt->fourcc == f->fmt.pix.pixelformat)
+                       break;
+       }
+
+       if (k == NUM_FORMATS)
+               return NULL;
+
+       return &npcm_fmt_list[k];
+}
+
+static void npcm_video_ece_prepend_rect_header(void *addr, u16 x, u16 y, u16 w, u16 h)
+{
+       __be16 x_pos = cpu_to_be16(x);
+       __be16 y_pos = cpu_to_be16(y);
+       __be16 width = cpu_to_be16(w);
+       __be16 height = cpu_to_be16(h);
+       __be32 encoding = cpu_to_be32(5); /* Hextile encoding */
+
+       memcpy(addr, &x_pos, 2);
+       memcpy(addr + 2, &y_pos, 2);
+       memcpy(addr + 4, &width, 2);
+       memcpy(addr + 6, &height, 2);
+       memcpy(addr + 8, &encoding, 4);
+}
+
+static unsigned int npcm_video_ece_get_ed_size(struct npcm_video *video,
+                                              unsigned int offset, void *addr)
+{
+       struct regmap *ece = video->ece.regmap;
+       unsigned int size, gap, val;
+       int ret;
+
+       ret = regmap_read_poll_timeout(ece, ECE_DDA_STS, val,
+                                      (val & ECE_DDA_STS_CDREADY), 0,
+                                      ECE_POLL_TIMEOUT_US);
+
+       if (ret) {
+               dev_warn(video->dev, "Wait for ECE_DDA_STS_CDREADY timeout\n");
+               return 0;
+       }
+
+       size = readl((void __iomem *)addr + offset);
+       regmap_read(ece, ECE_HEX_CTRL, &val);
+       gap = FIELD_GET(ECE_HEX_CTRL_ENC_GAP, val);
+
+       dev_dbg(video->dev, "offset = %u, ed_size = %u, gap = %u\n", offset,
+               size, gap);
+
+       return size + gap;
+}
+
+static void npcm_video_ece_enc_rect(struct npcm_video *video,
+                                   unsigned int r_off_x, unsigned int r_off_y,
+                                   unsigned int r_w, unsigned int r_h)
+{
+       struct regmap *ece = video->ece.regmap;
+       unsigned int rect_offset = (r_off_y * video->bytesperline) + (r_off_x * 2);
+       unsigned int w_size = ECE_TILE_W, h_size = ECE_TILE_H;
+       unsigned int temp, w_tile, h_tile;
+
+       regmap_update_bits(ece, ECE_DDA_CTRL, ECE_DDA_CTRL_ECEEN, 0);
+       regmap_update_bits(ece, ECE_DDA_CTRL, ECE_DDA_CTRL_ECEEN, ECE_DDA_CTRL_ECEEN);
+       regmap_write(ece, ECE_DDA_STS, ECE_DDA_STS_CDREADY | ECE_DDA_STS_ACDRDY);
+       regmap_write(ece, ECE_RECT_XY, rect_offset);
+
+       w_tile = r_w / ECE_TILE_W;
+       h_tile = r_h / ECE_TILE_H;
+
+       if (r_w % ECE_TILE_W) {
+               w_tile += 1;
+               w_size = r_w % ECE_TILE_W;
+       }
+       if (r_h % ECE_TILE_H || !h_tile) {
+               h_tile += 1;
+               h_size = r_h % ECE_TILE_H;
+       }
+
+       temp = FIELD_PREP(ECE_RECT_DIMEN_WLTR, w_size - 1) |
+              FIELD_PREP(ECE_RECT_DIMEN_HLTR, h_size - 1) |
+              FIELD_PREP(ECE_RECT_DIMEN_WR, w_tile - 1) |
+              FIELD_PREP(ECE_RECT_DIMEN_HR, h_tile - 1);
+
+       regmap_write(ece, ECE_RECT_DIMEN, temp);
+}
+
+static unsigned int npcm_video_ece_read_rect_offset(struct npcm_video *video)
+{
+       struct regmap *ece = video->ece.regmap;
+       unsigned int offset;
+
+       regmap_read(ece, ECE_HEX_RECT_OFFSET, &offset);
+       return FIELD_GET(ECE_HEX_RECT_OFFSET_MASK, offset);
+}
+
+/*
+ * Set the line pitch (in bytes) for the frame buffers.
+ * Can be on of those values: 512, 1024, 2048, 2560 or 4096 bytes.
+ */
+static void npcm_video_ece_set_lp(struct npcm_video *video, unsigned int pitch)
+{
+       struct regmap *ece = video->ece.regmap;
+       unsigned int lp;
+
+       switch (pitch) {
+       case 512:
+               lp = ECE_RESOL_FB_LP_512;
+               break;
+       case 1024:
+               lp = ECE_RESOL_FB_LP_1024;
+               break;
+       case 2048:
+               lp = ECE_RESOL_FB_LP_2048;
+               break;
+       case 2560:
+               lp = ECE_RESOL_FB_LP_2560;
+               break;
+       case 4096:
+               lp = ECE_RESOL_FB_LP_4096;
+               break;
+       default:
+               return;
+       }
+
+       regmap_write(ece, ECE_RESOL, lp);
+}
+
+static inline void npcm_video_ece_set_fb_addr(struct npcm_video *video,
+                                             unsigned int buffer)
+{
+       struct regmap *ece = video->ece.regmap;
+
+       regmap_write(ece, ECE_FBR_BA, buffer);
+}
+
+static inline void npcm_video_ece_set_enc_dba(struct npcm_video *video,
+                                             unsigned int addr)
+{
+       struct regmap *ece = video->ece.regmap;
+
+       regmap_write(ece, ECE_ED_BA, addr);
+}
+
+static inline void npcm_video_ece_clear_rect_offset(struct npcm_video *video)
+{
+       struct regmap *ece = video->ece.regmap;
+
+       regmap_write(ece, ECE_HEX_RECT_OFFSET, 0);
+}
+
+static void npcm_video_ece_ctrl_reset(struct npcm_video *video)
+{
+       struct regmap *ece = video->ece.regmap;
+
+       regmap_update_bits(ece, ECE_DDA_CTRL, ECE_DDA_CTRL_ECEEN, 0);
+       regmap_update_bits(ece, ECE_HEX_CTRL, ECE_HEX_CTRL_ENCDIS, ECE_HEX_CTRL_ENCDIS);
+       regmap_update_bits(ece, ECE_DDA_CTRL, ECE_DDA_CTRL_ECEEN, ECE_DDA_CTRL_ECEEN);
+       regmap_update_bits(ece, ECE_HEX_CTRL, ECE_HEX_CTRL_ENCDIS, 0);
+
+       npcm_video_ece_clear_rect_offset(video);
+}
+
+static void npcm_video_ece_ip_reset(struct npcm_video *video)
+{
+       /*
+        * After resetting a module and clearing the reset bit, it should wait
+        * at least 10 us before accessing the module.
+        */
+       reset_control_assert(video->ece.reset);
+       usleep_range(10, 20);
+       reset_control_deassert(video->ece.reset);
+       usleep_range(10, 20);
+}
+
+static void npcm_video_ece_stop(struct npcm_video *video)
+{
+       struct regmap *ece = video->ece.regmap;
+
+       regmap_update_bits(ece, ECE_DDA_CTRL, ECE_DDA_CTRL_ECEEN, 0);
+       regmap_update_bits(ece, ECE_DDA_CTRL, ECE_DDA_CTRL_INTEN, 0);
+       regmap_update_bits(ece, ECE_HEX_CTRL, ECE_HEX_CTRL_ENCDIS, ECE_HEX_CTRL_ENCDIS);
+       npcm_video_ece_clear_rect_offset(video);
+}
+
+static bool npcm_video_alloc_fb(struct npcm_video *video,
+                               struct npcm_video_addr *addr)
+{
+       addr->virt = dma_alloc_coherent(video->dev, VCD_FB_SIZE, &addr->dma,
+                                       GFP_KERNEL);
+       if (!addr->virt)
+               return false;
+
+       addr->size = VCD_FB_SIZE;
+       return true;
+}
+
+static void npcm_video_free_fb(struct npcm_video *video,
+                              struct npcm_video_addr *addr)
+{
+       dma_free_coherent(video->dev, addr->size, addr->virt, addr->dma);
+       addr->size = 0;
+       addr->dma = 0ULL;
+       addr->virt = NULL;
+}
+
+static void npcm_video_free_diff_table(struct npcm_video *video)
+{
+       struct list_head *head, *pos, *nx;
+       struct rect_list *tmp;
+       unsigned int i;
+
+       for (i = 0; i < video->queue.num_buffers; i++) {
+               head = &video->list[i];
+               list_for_each_safe(pos, nx, head) {
+                       tmp = list_entry(pos, struct rect_list, list);
+                       list_del(&tmp->list);
+                       kfree(tmp);
+               }
+       }
+}
+
+static unsigned int npcm_video_add_rect(struct npcm_video *video,
+                                       unsigned int index,
+                                       unsigned int x, unsigned int y,
+                                       unsigned int w, unsigned int h)
+{
+       struct list_head *head = &video->list[index];
+       struct rect_list *list = NULL;
+       struct v4l2_rect *r;
+
+       list = kzalloc(sizeof(*list), GFP_KERNEL);
+       if (!list)
+               return 0;
+
+       r = &list->clip.c;
+       r->left = x;
+       r->top = y;
+       r->width = w;
+       r->height = h;
+
+       list_add_tail(&list->list, head);
+       return 1;
+}
+
+static void npcm_video_merge_rect(struct npcm_video *video,
+                                 struct rect_list_info *info)
+{
+       struct list_head *head = info->head;
+       struct rect_list *list = info->list, *first = info->first;
+       struct v4l2_rect *r = &list->clip.c, *f = &first->clip.c;
+
+       if (!first) {
+               first = list;
+               info->first = first;
+               list_add_tail(&list->list, head);
+               video->rect_cnt++;
+       } else {
+               if ((r->left == (f->left + f->width)) && r->top == f->top) {
+                       f->width += r->width;
+                       kfree(list);
+               } else if ((r->top == (f->top + f->height)) &&
+                          (r->left == f->left)) {
+                       f->height += r->height;
+                       kfree(list);
+               } else if (((r->top > f->top) &&
+                          (r->top < (f->top + f->height))) &&
+                          ((r->left > f->left) &&
+                          (r->left < (f->left + f->width)))) {
+                       kfree(list);
+               } else {
+                       list_add_tail(&list->list, head);
+                       video->rect_cnt++;
+                       info->first = list;
+               }
+       }
+}
+
+static struct rect_list *npcm_video_new_rect(struct npcm_video *video,
+                                            unsigned int offset,
+                                            unsigned int index)
+{
+       struct v4l2_bt_timings *act = &video->active_timings;
+       struct rect_list *list = NULL;
+       struct v4l2_rect *r;
+
+       list = kzalloc(sizeof(*list), GFP_KERNEL);
+       if (!list)
+               return NULL;
+
+       r = &list->clip.c;
+
+       r->left = (offset << 4);
+       r->top = (index >> 2);
+       r->width = RECT_W;
+       r->height = RECT_H;
+       if ((r->left + RECT_W) > act->width)
+               r->width = act->width - r->left;
+       if ((r->top + RECT_H) > act->height)
+               r->height = act->height - r->top;
+
+       return list;
+}
+
+static int npcm_video_find_rect(struct npcm_video *video,
+                               struct rect_list_info *info,
+                               unsigned int offset)
+{
+       if (offset < info->tile_perline) {
+               info->list = npcm_video_new_rect(video, offset, info->index);
+               if (!info->list) {
+                       dev_err(video->dev, "Failed to allocate rect_list\n");
+                       return -ENOMEM;
+               }
+
+               npcm_video_merge_rect(video, info);
+       }
+       return 0;
+}
+
+static int npcm_video_build_table(struct npcm_video *video,
+                                 struct rect_list_info *info)
+{
+       struct regmap *vcd = video->vcd_regmap;
+       unsigned int j, bit, value;
+       int ret;
+
+       for (j = 0; j < info->offset_perline; j += 4) {
+               regmap_read(vcd, VCD_DIFF_TBL + (j + info->index), &value);
+
+               bitmap_from_arr32(bitmap, &value, BITMAP_SIZE);
+
+               for_each_set_bit(bit, bitmap, BITMAP_SIZE) {
+                       ret = npcm_video_find_rect(video, info, bit + (j << 3));
+                       if (ret)
+                               return ret;
+               }
+       }
+       info->index += 64;
+       return info->tile_perline;
+}
+
+static void npcm_video_get_rect_list(struct npcm_video *video, unsigned int index)
+{
+       struct v4l2_bt_timings *act = &video->active_timings;
+       struct rect_list_info info;
+       unsigned int tile_cnt = 0, mod;
+       int ret = 0;
+
+       memset(&info, 0, sizeof(struct rect_list_info));
+       info.head = &video->list[index];
+
+       info.tile_perline = act->width >> 4;
+       mod = act->width % RECT_W;
+       if (mod != 0)
+               info.tile_perline += 1;
+
+       info.tile_perrow = act->height >> 4;
+       mod = act->height % RECT_H;
+       if (mod != 0)
+               info.tile_perrow += 1;
+
+       info.tile_size = info.tile_perrow * info.tile_perline;
+
+       info.offset_perline = info.tile_perline >> 5;
+       mod = info.tile_perline % 32;
+       if (mod != 0)
+               info.offset_perline += 1;
+
+       info.offset_perline *= 4;
+
+       do {
+               ret = npcm_video_build_table(video, &info);
+               if (ret < 0)
+                       return;
+
+               tile_cnt += ret;
+       } while (tile_cnt < info.tile_size);
+}
+
+static unsigned int npcm_video_is_mga(struct npcm_video *video)
+{
+       struct regmap *gfxi = video->gfx_regmap;
+       unsigned int dispst;
+
+       regmap_read(gfxi, DISPST, &dispst);
+       return ((dispst & DISPST_MGAMODE) == DISPST_MGAMODE);
+}
+
+static unsigned int npcm_video_hres(struct npcm_video *video)
+{
+       struct regmap *gfxi = video->gfx_regmap;
+       unsigned int hvcnth, hvcntl, apb_hor_res;
+
+       regmap_read(gfxi, HVCNTH, &hvcnth);
+       regmap_read(gfxi, HVCNTL, &hvcntl);
+       apb_hor_res = (((hvcnth & HVCNTH_MASK) << 8) + (hvcntl & HVCNTL_MASK) + 1);
+
+       return apb_hor_res;
+}
+
+static unsigned int npcm_video_vres(struct npcm_video *video)
+{
+       struct regmap *gfxi = video->gfx_regmap;
+       unsigned int vvcnth, vvcntl, apb_ver_res;
+
+       regmap_read(gfxi, VVCNTH, &vvcnth);
+       regmap_read(gfxi, VVCNTL, &vvcntl);
+
+       apb_ver_res = (((vvcnth & VVCNTH_MASK) << 8) + (vvcntl & VVCNTL_MASK));
+
+       return apb_ver_res;
+}
+
+static int npcm_video_capres(struct npcm_video *video, unsigned int hor_res,
+                            unsigned int vert_res)
+{
+       struct regmap *vcd = video->vcd_regmap;
+       unsigned int res, cap_res;
+
+       if (hor_res > MAX_WIDTH || vert_res > MAX_HEIGHT)
+               return -EINVAL;
+
+       res = FIELD_PREP(VCD_CAP_RES_VERT_RES, vert_res) |
+             FIELD_PREP(VCD_CAP_RES_HOR_RES, hor_res);
+
+       regmap_write(vcd, VCD_CAP_RES, res);
+       regmap_read(vcd, VCD_CAP_RES, &cap_res);
+
+       if (cap_res != res)
+               return -EINVAL;
+
+       return 0;
+}
+
+static void npcm_video_vcd_ip_reset(struct npcm_video *video)
+{
+       /*
+        * After resetting a module and clearing the reset bit, it should wait
+        * at least 10 us before accessing the module.
+        */
+       reset_control_assert(video->reset);
+       usleep_range(10, 20);
+       reset_control_deassert(video->reset);
+       usleep_range(10, 20);
+}
+
+static void npcm_video_vcd_state_machine_reset(struct npcm_video *video)
+{
+       struct regmap *vcd = video->vcd_regmap;
+
+       regmap_update_bits(vcd, VCD_MODE, VCD_MODE_VCDE, 0);
+       regmap_update_bits(vcd, VCD_MODE, VCD_MODE_IDBC, 0);
+       regmap_update_bits(vcd, VCD_CMD, VCD_CMD_RST, VCD_CMD_RST);
+
+       /*
+        * VCD_CMD_RST will reset VCD internal state machines and clear FIFOs,
+        * it should wait at least 800 us for the reset operations completed.
+        */
+       usleep_range(800, 1000);
+
+       regmap_write(vcd, VCD_STAT, VCD_STAT_CLEAR);
+       regmap_update_bits(vcd, VCD_MODE, VCD_MODE_VCDE, VCD_MODE_VCDE);
+       regmap_update_bits(vcd, VCD_MODE, VCD_MODE_IDBC, VCD_MODE_IDBC);
+}
+
+static void npcm_video_gfx_reset(struct npcm_video *video)
+{
+       struct regmap *gcr = video->gcr_regmap;
+
+       regmap_update_bits(gcr, INTCR2, INTCR2_GIRST2, INTCR2_GIRST2);
+       npcm_video_vcd_state_machine_reset(video);
+       regmap_update_bits(gcr, INTCR2, INTCR2_GIRST2, 0);
+}
+
+static void npcm_video_kvm_bw(struct npcm_video *video, bool set_bw)
+{
+       struct regmap *vcd = video->vcd_regmap;
+
+       if (set_bw || !npcm_video_is_mga(video))
+               regmap_update_bits(vcd, VCD_MODE, VCD_MODE_KVM_BW_SET,
+                                  VCD_MODE_KVM_BW_SET);
+       else
+               regmap_update_bits(vcd, VCD_MODE, VCD_MODE_KVM_BW_SET, 0);
+}
+
+static unsigned int npcm_video_pclk(struct npcm_video *video)
+{
+       struct regmap *gfxi = video->gfx_regmap;
+       unsigned int tmp, pllfbdiv, pllinotdiv, gpllfbdiv;
+       unsigned int gpllfbdv109, gpllfbdv8, gpllindiv;
+       unsigned int gpllst_pllotdiv1, gpllst_pllotdiv2;
+
+       regmap_read(gfxi, GPLLST, &tmp);
+       gpllfbdv109 = FIELD_GET(GPLLST_GPLLFBDV109, tmp);
+       gpllst_pllotdiv1 = FIELD_GET(GPLLST_PLLOTDIV1, tmp);
+       gpllst_pllotdiv2 = FIELD_GET(GPLLST_PLLOTDIV2, tmp);
+
+       regmap_read(gfxi, GPLLINDIV, &tmp);
+       gpllfbdv8 = FIELD_GET(GPLLINDIV_GPLLFBDV8, tmp);
+       gpllindiv = FIELD_GET(GPLLINDIV_MASK, tmp);
+
+       regmap_read(gfxi, GPLLFBDIV, &tmp);
+       gpllfbdiv = FIELD_GET(GPLLFBDIV_MASK, tmp);
+
+       pllfbdiv = (512 * gpllfbdv109 + 256 * gpllfbdv8 + gpllfbdiv);
+       pllinotdiv = (gpllindiv * gpllst_pllotdiv1 * gpllst_pllotdiv2);
+       if (pllfbdiv == 0 || pllinotdiv == 0)
+               return 0;
+
+       return ((pllfbdiv * 25000) / pllinotdiv) * 1000;
+}
+
+static unsigned int npcm_video_get_bpp(struct npcm_video *video)
+{
+       const struct npcm_fmt *fmt;
+       unsigned int k;
+
+       for (k = 0; k < NUM_FORMATS; k++) {
+               fmt = &npcm_fmt_list[k];
+               if (fmt->fourcc == video->pix_fmt.pixelformat)
+                       break;
+       }
+
+       return fmt->bpp;
+}
+
+/*
+ * Pitch must be a power of 2, >= linebytes,
+ * at least 512, and no more than 4096.
+ */
+static void npcm_video_set_linepitch(struct npcm_video *video,
+                                    unsigned int linebytes)
+{
+       struct regmap *vcd = video->vcd_regmap;
+       unsigned int pitch = MIN_LP;
+
+       while ((pitch < linebytes) && (pitch < MAX_LP))
+               pitch *= 2;
+
+       regmap_write(vcd, VCD_FB_LP, FIELD_PREP(VCD_FBA_LP, pitch) |
+                    FIELD_PREP(VCD_FBB_LP, pitch));
+}
+
+static unsigned int npcm_video_get_linepitch(struct npcm_video *video)
+{
+       struct regmap *vcd = video->vcd_regmap;
+       unsigned int linepitch;
+
+       regmap_read(vcd, VCD_FB_LP, &linepitch);
+       return FIELD_GET(VCD_FBA_LP, linepitch);
+}
+
+static void npcm_video_command(struct npcm_video *video, unsigned int value)
+{
+       struct regmap *vcd = video->vcd_regmap;
+       unsigned int cmd;
+
+       regmap_write(vcd, VCD_STAT, VCD_STAT_CLEAR);
+       regmap_read(vcd, VCD_CMD, &cmd);
+       cmd |= FIELD_PREP(VCD_CMD_OPERATION, value);
+
+       regmap_write(vcd, VCD_CMD, cmd);
+       regmap_update_bits(vcd, VCD_CMD, VCD_CMD_GO, VCD_CMD_GO);
+       video->op_cmd = value;
+}
+
+static void npcm_video_init_reg(struct npcm_video *video)
+{
+       struct regmap *gcr = video->gcr_regmap, *vcd = video->vcd_regmap;
+
+       /* Selects Data Enable */
+       regmap_update_bits(gcr, INTCR, INTCR_DEHS, 0);
+
+       /* Enable display of KVM GFX and access to memory */
+       regmap_update_bits(gcr, INTCR, INTCR_GFXIFDIS, 0);
+
+       /* Active Vertical/Horizontal Counters Reset */
+       regmap_update_bits(gcr, INTCR2, INTCR2_GIHCRST | INTCR2_GIVCRST,
+                          INTCR2_GIHCRST | INTCR2_GIVCRST);
+
+       /* Reset video modules */
+       npcm_video_vcd_ip_reset(video);
+       npcm_video_gfx_reset(video);
+
+       /* Set the FIFO thresholds */
+       regmap_write(vcd, VCD_FIFO, VCD_FIFO_TH);
+
+       /* Set RCHG timer */
+       regmap_write(vcd, VCD_RCHG, FIELD_PREP(VCD_RCHG_TIM_PRSCL, 0xf) |
+                    FIELD_PREP(VCD_RCHG_IG_CHG0, 0x3));
+
+       /* Set video mode */
+       regmap_write(vcd, VCD_MODE, VCD_MODE_VCDE | VCD_MODE_CM565 |
+                    VCD_MODE_IDBC | VCD_MODE_KVM_BW_SET);
+}
+
+static int npcm_video_start_frame(struct npcm_video *video)
+{
+       struct npcm_video_buffer *buf;
+       struct regmap *vcd = video->vcd_regmap;
+       unsigned long flags;
+       unsigned int val;
+       int ret;
+
+       if (video->v4l2_input_status) {
+               dev_dbg(video->dev, "No video signal; skip capture frame\n");
+               return 0;
+       }
+
+       ret = regmap_read_poll_timeout(vcd, VCD_STAT, val, !(val & VCD_STAT_BUSY),
+                                      1000, VCD_TIMEOUT_US);
+       if (ret) {
+               dev_err(video->dev, "Wait for VCD_STAT_BUSY timeout\n");
+               return -EBUSY;
+       }
+
+       spin_lock_irqsave(&video->lock, flags);
+       buf = list_first_entry_or_null(&video->buffers,
+                                      struct npcm_video_buffer, link);
+       if (!buf) {
+               spin_unlock_irqrestore(&video->lock, flags);
+               dev_dbg(video->dev, "No empty buffers; skip capture frame\n");
+               return 0;
+       }
+
+       set_bit(VIDEO_CAPTURING, &video->flags);
+       spin_unlock_irqrestore(&video->lock, flags);
+
+       npcm_video_vcd_state_machine_reset(video);
+
+       regmap_read(vcd, VCD_HOR_AC_TIM, &val);
+       regmap_update_bits(vcd, VCD_HOR_AC_LST, VCD_HOR_AC_LAST,
+                          FIELD_GET(VCD_HOR_AC_TIME, val));
+
+       regmap_read(vcd, VCD_VER_HI_TIM, &val);
+       regmap_update_bits(vcd, VCD_VER_HI_LST, VCD_VER_HI_LAST,
+                          FIELD_GET(VCD_VER_HI_TIME, val));
+
+       regmap_update_bits(vcd, VCD_INTE, VCD_INTE_DONE_IE | VCD_INTE_IFOT_IE |
+                          VCD_INTE_IFOR_IE | VCD_INTE_HAC_IE | VCD_INTE_VHT_IE,
+                          VCD_INTE_DONE_IE | VCD_INTE_IFOT_IE | VCD_INTE_IFOR_IE |
+                          VCD_INTE_HAC_IE | VCD_INTE_VHT_IE);
+
+       npcm_video_command(video, video->ctrl_cmd);
+
+       return 0;
+}
+
+static void npcm_video_bufs_done(struct npcm_video *video,
+                                enum vb2_buffer_state state)
+{
+       struct npcm_video_buffer *buf;
+       unsigned long flags;
+
+       spin_lock_irqsave(&video->lock, flags);
+       list_for_each_entry(buf, &video->buffers, link)
+               vb2_buffer_done(&buf->vb.vb2_buf, state);
+
+       INIT_LIST_HEAD(&video->buffers);
+       spin_unlock_irqrestore(&video->lock, flags);
+}
+
+static void npcm_video_get_diff_rect(struct npcm_video *video, unsigned int index)
+{
+       unsigned int width = video->active_timings.width;
+       unsigned int height = video->active_timings.height;
+
+       if (video->op_cmd != VCD_CMD_OPERATION_CAPTURE) {
+               video->rect_cnt = 0;
+               npcm_video_get_rect_list(video, index);
+               video->rect[index] = video->rect_cnt;
+       } else {
+               video->rect[index] = npcm_video_add_rect(video, index, 0, 0,
+                                                        width, height);
+       }
+}
+
+static void npcm_video_detect_resolution(struct npcm_video *video)
+{
+       struct v4l2_bt_timings *act = &video->active_timings;
+       struct v4l2_bt_timings *det = &video->detected_timings;
+       struct regmap *gfxi = video->gfx_regmap;
+       unsigned int dispst;
+
+       video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL;
+       det->width = npcm_video_hres(video);
+       det->height = npcm_video_vres(video);
+
+       if (act->width != det->width || act->height != det->height) {
+               dev_dbg(video->dev, "Resolution changed\n");
+
+               if (npcm_video_hres(video) > 0 && npcm_video_vres(video) > 0) {
+                       if (test_bit(VIDEO_STREAMING, &video->flags)) {
+                               /*
+                                * Wait for resolution is available,
+                                * and it is also captured by host.
+                                */
+                               do {
+                                       mdelay(100);
+                                       regmap_read(gfxi, DISPST, &dispst);
+                               } while (npcm_video_vres(video) < 100 ||
+                                        npcm_video_pclk(video) == 0 ||
+                                        (dispst & DISPST_HSCROFF));
+                       }
+
+                       det->width = npcm_video_hres(video);
+                       det->height = npcm_video_vres(video);
+                       det->pixelclock = npcm_video_pclk(video);
+               }
+
+               clear_bit(VIDEO_RES_CHANGING, &video->flags);
+       }
+
+       if (det->width && det->height)
+               video->v4l2_input_status = 0;
+
+       dev_dbg(video->dev, "Got resolution[%dx%d] -> [%dx%d], status %d\n",
+               act->width, act->height, det->width, det->height,
+               video->v4l2_input_status);
+}
+
+static int npcm_video_set_resolution(struct npcm_video *video,
+                                    struct v4l2_bt_timings *timing)
+{
+       struct regmap *vcd = video->vcd_regmap;
+       unsigned int mode;
+
+       if (npcm_video_capres(video, timing->width, timing->height)) {
+               dev_err(video->dev, "Failed to set VCD_CAP_RES\n");
+               return -EINVAL;
+       }
+
+       video->active_timings = *timing;
+       video->bytesperpixel = npcm_video_get_bpp(video);
+       npcm_video_set_linepitch(video, timing->width * video->bytesperpixel);
+       video->bytesperline = npcm_video_get_linepitch(video);
+       video->pix_fmt.width = timing->width ? timing->width : MIN_WIDTH;
+       video->pix_fmt.height = timing->height ? timing->height : MIN_HEIGHT;
+       video->pix_fmt.sizeimage = video->pix_fmt.width * video->pix_fmt.height *
+                                  video->bytesperpixel;
+       video->pix_fmt.bytesperline = video->bytesperline;
+
+       npcm_video_kvm_bw(video, timing->pixelclock > VCD_KVM_BW_PCLK);
+       npcm_video_gfx_reset(video);
+       regmap_read(vcd, VCD_MODE, &mode);
+
+       dev_dbg(video->dev, "VCD mode = 0x%x, %s mode\n", mode,
+               npcm_video_is_mga(video) ? "Hi Res" : "VGA");
+
+       dev_dbg(video->dev,
+               "Digital mode: %d x %d x %d, pixelclock %lld, bytesperline %d\n",
+               timing->width, timing->height, video->bytesperpixel,
+               timing->pixelclock, video->bytesperline);
+
+       return 0;
+}
+
+static void npcm_video_start(struct npcm_video *video)
+{
+       npcm_video_init_reg(video);
+
+       if (!npcm_video_alloc_fb(video, &video->src)) {
+               dev_err(video->dev, "Failed to allocate VCD frame buffer\n");
+               return;
+       }
+
+       npcm_video_detect_resolution(video);
+       if (npcm_video_set_resolution(video, &video->detected_timings)) {
+               dev_err(video->dev, "Failed to set resolution\n");
+               return;
+       }
+
+       /* Set frame buffer physical address */
+       regmap_write(video->vcd_regmap, VCD_FBA_ADR, video->src.dma);
+       regmap_write(video->vcd_regmap, VCD_FBB_ADR, video->src.dma);
+
+       if (video->ece.enable && atomic_inc_return(&video->ece.clients) == 1) {
+               npcm_video_ece_ip_reset(video);
+               npcm_video_ece_ctrl_reset(video);
+               npcm_video_ece_set_fb_addr(video, video->src.dma);
+               npcm_video_ece_set_lp(video, video->bytesperline);
+
+               dev_dbg(video->dev, "ECE open: client %d\n",
+                       atomic_read(&video->ece.clients));
+       }
+}
+
+static void npcm_video_stop(struct npcm_video *video)
+{
+       struct regmap *vcd = video->vcd_regmap;
+
+       set_bit(VIDEO_STOPPED, &video->flags);
+
+       regmap_write(vcd, VCD_INTE, 0);
+       regmap_write(vcd, VCD_MODE, 0);
+       regmap_write(vcd, VCD_RCHG, 0);
+       regmap_write(vcd, VCD_STAT, VCD_STAT_CLEAR);
+
+       if (video->src.size)
+               npcm_video_free_fb(video, &video->src);
+
+       npcm_video_free_diff_table(video);
+       video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL;
+       video->flags = 0;
+       video->ctrl_cmd = VCD_CMD_OPERATION_CAPTURE;
+
+       if (video->ece.enable && atomic_dec_return(&video->ece.clients) == 0) {
+               npcm_video_ece_stop(video);
+               dev_dbg(video->dev, "ECE close: client %d\n",
+                       atomic_read(&video->ece.clients));
+       }
+}
+
+static unsigned int npcm_video_raw(struct npcm_video *video, int index, void *addr)
+{
+       unsigned int width = video->active_timings.width;
+       unsigned int height = video->active_timings.height;
+       unsigned int i, len, offset, bytes = 0;
+
+       video->rect[index] = npcm_video_add_rect(video, index, 0, 0, width, height);
+
+       for (i = 0; i < height; i++) {
+               len = width * video->bytesperpixel;
+               offset = i * video->bytesperline;
+
+               memcpy(addr + bytes, video->src.virt + offset, len);
+               bytes += len;
+       }
+
+       return bytes;
+}
+
+static unsigned int npcm_video_hextile(struct npcm_video *video, unsigned int index,
+                                      unsigned int dma_addr, void *vaddr)
+{
+       struct rect_list *rect_list;
+       struct v4l2_rect *rect;
+       unsigned int offset, len, bytes = 0;
+
+       npcm_video_ece_ctrl_reset(video);
+       npcm_video_ece_clear_rect_offset(video);
+       npcm_video_ece_set_fb_addr(video, video->src.dma);
+
+       /* Set base address of encoded data to video buffer */
+       npcm_video_ece_set_enc_dba(video, dma_addr);
+
+       npcm_video_ece_set_lp(video, video->bytesperline);
+       npcm_video_get_diff_rect(video, index);
+
+       list_for_each_entry(rect_list, &video->list[index], list) {
+               rect = &rect_list->clip.c;
+               offset = npcm_video_ece_read_rect_offset(video);
+               npcm_video_ece_enc_rect(video, rect->left, rect->top,
+                                       rect->width, rect->height);
+
+               len = npcm_video_ece_get_ed_size(video, offset, vaddr);
+               npcm_video_ece_prepend_rect_header(vaddr + offset,
+                                                  rect->left, rect->top,
+                                                  rect->width, rect->height);
+               bytes += len;
+       }
+
+       return bytes;
+}
+
+static irqreturn_t npcm_video_irq(int irq, void *arg)
+{
+       struct npcm_video *video = arg;
+       struct regmap *vcd = video->vcd_regmap;
+       struct npcm_video_buffer *buf;
+       unsigned int index, size, status, fmt;
+       dma_addr_t dma_addr;
+       void *addr;
+       static const struct v4l2_event ev = {
+               .type = V4L2_EVENT_SOURCE_CHANGE,
+               .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+       };
+
+       regmap_read(vcd, VCD_STAT, &status);
+       dev_dbg(video->dev, "VCD irq status 0x%x\n", status);
+
+       regmap_write(vcd, VCD_STAT, VCD_STAT_CLEAR);
+
+       if (test_bit(VIDEO_STOPPED, &video->flags) ||
+           !test_bit(VIDEO_STREAMING, &video->flags))
+               return IRQ_NONE;
+
+       if (status & VCD_STAT_DONE) {
+               regmap_write(vcd, VCD_INTE, 0);
+               spin_lock(&video->lock);
+               clear_bit(VIDEO_CAPTURING, &video->flags);
+               buf = list_first_entry_or_null(&video->buffers,
+                                              struct npcm_video_buffer, link);
+               if (!buf) {
+                       spin_unlock(&video->lock);
+                       return IRQ_NONE;
+               }
+
+               addr = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
+               index = buf->vb.vb2_buf.index;
+               fmt = video->pix_fmt.pixelformat;
+
+               switch (fmt) {
+               case V4L2_PIX_FMT_RGB565:
+                       size = npcm_video_raw(video, index, addr);
+                       break;
+               case V4L2_PIX_FMT_HEXTILE:
+                       dma_addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
+                       size = npcm_video_hextile(video, index, dma_addr, addr);
+                       break;
+               default:
+                       spin_unlock(&video->lock);
+                       return IRQ_NONE;
+               }
+
+               vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
+               buf->vb.vb2_buf.timestamp = ktime_get_ns();
+               buf->vb.sequence = video->sequence++;
+               buf->vb.field = V4L2_FIELD_NONE;
+
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+               list_del(&buf->link);
+               spin_unlock(&video->lock);
+
+               if (npcm_video_start_frame(video))
+                       dev_err(video->dev, "Failed to capture next frame\n");
+       }
+
+       /* Resolution changed */
+       if (status & VCD_STAT_VHT_CHG || status & VCD_STAT_HAC_CHG) {
+               if (!test_bit(VIDEO_RES_CHANGING, &video->flags)) {
+                       set_bit(VIDEO_RES_CHANGING, &video->flags);
+
+                       vb2_queue_error(&video->queue);
+                       v4l2_event_queue(&video->vdev, &ev);
+               }
+       }
+
+       if (status & VCD_STAT_IFOR || status & VCD_STAT_IFOT) {
+               dev_warn(video->dev, "VCD FIFO overrun or over thresholds\n");
+               if (npcm_video_start_frame(video))
+                       dev_err(video->dev, "Failed to recover from FIFO overrun\n");
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int npcm_video_querycap(struct file *file, void *fh,
+                              struct v4l2_capability *cap)
+{
+       strscpy(cap->driver, DEVICE_NAME, sizeof(cap->driver));
+       strscpy(cap->card, "NPCM Video Engine", sizeof(cap->card));
+
+       return 0;
+}
+
+static int npcm_video_enum_format(struct file *file, void *fh,
+                                 struct v4l2_fmtdesc *f)
+{
+       struct npcm_video *video = video_drvdata(file);
+       const struct npcm_fmt *fmt;
+
+       if (f->index >= NUM_FORMATS)
+               return -EINVAL;
+
+       fmt = &npcm_fmt_list[f->index];
+       if (fmt->fourcc == V4L2_PIX_FMT_HEXTILE && !video->ece.enable)
+               return -EINVAL;
+
+       f->pixelformat = fmt->fourcc;
+       return 0;
+}
+
+static int npcm_video_try_format(struct file *file, void *fh,
+                                struct v4l2_format *f)
+{
+       struct npcm_video *video = video_drvdata(file);
+       const struct npcm_fmt *fmt;
+
+       fmt = npcm_video_find_format(f);
+
+       /* If format not found or HEXTILE not supported, use RGB565 as default */
+       if (!fmt || (fmt->fourcc == V4L2_PIX_FMT_HEXTILE && !video->ece.enable))
+               f->fmt.pix.pixelformat = npcm_fmt_list[0].fourcc;
+
+       f->fmt.pix.field = V4L2_FIELD_NONE;
+       f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB;
+       f->fmt.pix.quantization = V4L2_QUANTIZATION_FULL_RANGE;
+       f->fmt.pix.width = video->pix_fmt.width;
+       f->fmt.pix.height = video->pix_fmt.height;
+       f->fmt.pix.bytesperline = video->bytesperline;
+       f->fmt.pix.sizeimage = video->pix_fmt.sizeimage;
+
+       return 0;
+}
+
+static int npcm_video_get_format(struct file *file, void *fh,
+                                struct v4l2_format *f)
+{
+       struct npcm_video *video = video_drvdata(file);
+
+       f->fmt.pix = video->pix_fmt;
+       return 0;
+}
+
+static int npcm_video_set_format(struct file *file, void *fh,
+                                struct v4l2_format *f)
+{
+       struct npcm_video *video = video_drvdata(file);
+       int ret;
+
+       ret = npcm_video_try_format(file, fh, f);
+       if (ret)
+               return ret;
+
+       if (vb2_is_busy(&video->queue)) {
+               dev_err(video->dev, "%s device busy\n", __func__);
+               return -EBUSY;
+       }
+
+       video->pix_fmt.pixelformat = f->fmt.pix.pixelformat;
+       return 0;
+}
+
+static int npcm_video_enum_input(struct file *file, void *fh,
+                                struct v4l2_input *inp)
+{
+       struct npcm_video *video = video_drvdata(file);
+
+       if (inp->index)
+               return -EINVAL;
+
+       strscpy(inp->name, "Host VGA capture", sizeof(inp->name));
+       inp->type = V4L2_INPUT_TYPE_CAMERA;
+       inp->capabilities = V4L2_IN_CAP_DV_TIMINGS;
+       inp->status = video->v4l2_input_status;
+
+       return 0;
+}
+
+static int npcm_video_get_input(struct file *file, void *fh, unsigned int *i)
+{
+       *i = 0;
+
+       return 0;
+}
+
+static int npcm_video_set_input(struct file *file, void *fh, unsigned int i)
+{
+       if (i)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int npcm_video_set_dv_timings(struct file *file, void *fh,
+                                    struct v4l2_dv_timings *timings)
+{
+       struct npcm_video *video = video_drvdata(file);
+       int rc;
+
+       if (timings->bt.width == video->active_timings.width &&
+           timings->bt.height == video->active_timings.height)
+               return 0;
+
+       if (vb2_is_busy(&video->queue)) {
+               dev_err(video->dev, "%s device busy\n", __func__);
+               return -EBUSY;
+       }
+
+       rc = npcm_video_set_resolution(video, &timings->bt);
+       if (rc)
+               return rc;
+
+       timings->type = V4L2_DV_BT_656_1120;
+
+       return 0;
+}
+
+static int npcm_video_get_dv_timings(struct file *file, void *fh,
+                                    struct v4l2_dv_timings *timings)
+{
+       struct npcm_video *video = video_drvdata(file);
+
+       timings->type = V4L2_DV_BT_656_1120;
+       timings->bt = video->active_timings;
+
+       return 0;
+}
+
+static int npcm_video_query_dv_timings(struct file *file, void *fh,
+                                      struct v4l2_dv_timings *timings)
+{
+       struct npcm_video *video = video_drvdata(file);
+
+       npcm_video_detect_resolution(video);
+       timings->type = V4L2_DV_BT_656_1120;
+       timings->bt = video->detected_timings;
+
+       return video->v4l2_input_status ? -ENOLINK : 0;
+}
+
+static int npcm_video_enum_dv_timings(struct file *file, void *fh,
+                                     struct v4l2_enum_dv_timings *timings)
+{
+       return v4l2_enum_dv_timings_cap(timings, &npcm_video_timings_cap,
+                                       NULL, NULL);
+}
+
+static int npcm_video_dv_timings_cap(struct file *file, void *fh,
+                                    struct v4l2_dv_timings_cap *cap)
+{
+       *cap = npcm_video_timings_cap;
+
+       return 0;
+}
+
+static int npcm_video_sub_event(struct v4l2_fh *fh,
+                               const struct v4l2_event_subscription *sub)
+{
+       switch (sub->type) {
+       case V4L2_EVENT_SOURCE_CHANGE:
+               return v4l2_src_change_event_subscribe(fh, sub);
+       }
+
+       return v4l2_ctrl_subscribe_event(fh, sub);
+}
+
+static const struct v4l2_ioctl_ops npcm_video_ioctls = {
+       .vidioc_querycap = npcm_video_querycap,
+
+       .vidioc_enum_fmt_vid_cap = npcm_video_enum_format,
+       .vidioc_g_fmt_vid_cap = npcm_video_get_format,
+       .vidioc_s_fmt_vid_cap = npcm_video_set_format,
+       .vidioc_try_fmt_vid_cap = npcm_video_try_format,
+
+       .vidioc_reqbufs = vb2_ioctl_reqbufs,
+       .vidioc_querybuf = vb2_ioctl_querybuf,
+       .vidioc_qbuf = vb2_ioctl_qbuf,
+       .vidioc_expbuf = vb2_ioctl_expbuf,
+       .vidioc_dqbuf = vb2_ioctl_dqbuf,
+       .vidioc_create_bufs = vb2_ioctl_create_bufs,
+       .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+       .vidioc_streamon = vb2_ioctl_streamon,
+       .vidioc_streamoff = vb2_ioctl_streamoff,
+
+       .vidioc_enum_input = npcm_video_enum_input,
+       .vidioc_g_input = npcm_video_get_input,
+       .vidioc_s_input = npcm_video_set_input,
+
+       .vidioc_s_dv_timings = npcm_video_set_dv_timings,
+       .vidioc_g_dv_timings = npcm_video_get_dv_timings,
+       .vidioc_query_dv_timings = npcm_video_query_dv_timings,
+       .vidioc_enum_dv_timings = npcm_video_enum_dv_timings,
+       .vidioc_dv_timings_cap = npcm_video_dv_timings_cap,
+
+       .vidioc_subscribe_event = npcm_video_sub_event,
+       .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int npcm_video_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+       struct npcm_video *video = container_of(ctrl->handler, struct npcm_video,
+                                               ctrl_handler);
+
+       switch (ctrl->id) {
+       case V4L2_CID_NPCM_CAPTURE_MODE:
+               if (ctrl->val == V4L2_NPCM_CAPTURE_MODE_COMPLETE)
+                       video->ctrl_cmd = VCD_CMD_OPERATION_CAPTURE;
+               else if (ctrl->val == V4L2_NPCM_CAPTURE_MODE_DIFF)
+                       video->ctrl_cmd = VCD_CMD_OPERATION_COMPARE;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static const struct v4l2_ctrl_ops npcm_video_ctrl_ops = {
+       .s_ctrl = npcm_video_set_ctrl,
+};
+
+static const char * const npcm_ctrl_capture_mode_menu[] = {
+       "COMPLETE",
+       "DIFF",
+       NULL,
+};
+
+static const struct v4l2_ctrl_config npcm_ctrl_capture_mode = {
+       .ops = &npcm_video_ctrl_ops,
+       .id = V4L2_CID_NPCM_CAPTURE_MODE,
+       .name = "NPCM Video Capture Mode",
+       .type = V4L2_CTRL_TYPE_MENU,
+       .min = 0,
+       .max = V4L2_NPCM_CAPTURE_MODE_DIFF,
+       .def = 0,
+       .qmenu = npcm_ctrl_capture_mode_menu,
+};
+
+/*
+ * This control value is set when a buffer is dequeued by userspace, i.e. in
+ * npcm_video_buf_finish function.
+ */
+static const struct v4l2_ctrl_config npcm_ctrl_rect_count = {
+       .id = V4L2_CID_NPCM_RECT_COUNT,
+       .name = "NPCM Hextile Rectangle Count",
+       .type = V4L2_CTRL_TYPE_INTEGER,
+       .min = 0,
+       .max = (MAX_WIDTH / RECT_W) * (MAX_HEIGHT / RECT_H),
+       .step = 1,
+       .def = 0,
+};
+
+static int npcm_video_open(struct file *file)
+{
+       struct npcm_video *video = video_drvdata(file);
+       int rc;
+
+       mutex_lock(&video->video_lock);
+       rc = v4l2_fh_open(file);
+       if (rc) {
+               mutex_unlock(&video->video_lock);
+               return rc;
+       }
+
+       if (v4l2_fh_is_singular_file(file))
+               npcm_video_start(video);
+
+       mutex_unlock(&video->video_lock);
+       return 0;
+}
+
+static int npcm_video_release(struct file *file)
+{
+       struct npcm_video *video = video_drvdata(file);
+       int rc;
+
+       mutex_lock(&video->video_lock);
+       if (v4l2_fh_is_singular_file(file))
+               npcm_video_stop(video);
+
+       rc = _vb2_fop_release(file, NULL);
+
+       mutex_unlock(&video->video_lock);
+       return rc;
+}
+
+static const struct v4l2_file_operations npcm_video_v4l2_fops = {
+       .owner = THIS_MODULE,
+       .read = vb2_fop_read,
+       .poll = vb2_fop_poll,
+       .unlocked_ioctl = video_ioctl2,
+       .mmap = vb2_fop_mmap,
+       .open = npcm_video_open,
+       .release = npcm_video_release,
+};
+
+static int npcm_video_queue_setup(struct vb2_queue *q, unsigned int *num_buffers,
+                                 unsigned int *num_planes, unsigned int sizes[],
+                                 struct device *alloc_devs[])
+{
+       struct npcm_video *video = vb2_get_drv_priv(q);
+       unsigned int i;
+
+       if (*num_planes) {
+               if (sizes[0] < video->pix_fmt.sizeimage)
+                       return -EINVAL;
+
+               return 0;
+       }
+
+       *num_planes = 1;
+       sizes[0] = video->pix_fmt.sizeimage;
+
+       for (i = 0; i < VIDEO_MAX_FRAME; i++)
+               INIT_LIST_HEAD(&video->list[i]);
+
+       return 0;
+}
+
+static int npcm_video_buf_prepare(struct vb2_buffer *vb)
+{
+       struct npcm_video *video = vb2_get_drv_priv(vb->vb2_queue);
+
+       if (vb2_plane_size(vb, 0) < video->pix_fmt.sizeimage)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int npcm_video_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+       struct npcm_video *video = vb2_get_drv_priv(q);
+       int rc;
+
+       video->sequence = 0;
+       rc = npcm_video_start_frame(video);
+       if (rc) {
+               npcm_video_bufs_done(video, VB2_BUF_STATE_QUEUED);
+               return rc;
+       }
+
+       set_bit(VIDEO_STREAMING, &video->flags);
+       return 0;
+}
+
+static void npcm_video_stop_streaming(struct vb2_queue *q)
+{
+       struct npcm_video *video = vb2_get_drv_priv(q);
+       struct regmap *vcd = video->vcd_regmap;
+
+       clear_bit(VIDEO_STREAMING, &video->flags);
+       regmap_write(vcd, VCD_INTE, 0);
+       regmap_write(vcd, VCD_STAT, VCD_STAT_CLEAR);
+       npcm_video_gfx_reset(video);
+       npcm_video_bufs_done(video, VB2_BUF_STATE_ERROR);
+       video->ctrl_cmd = VCD_CMD_OPERATION_CAPTURE;
+       v4l2_ctrl_s_ctrl(video->rect_cnt_ctrl, 0);
+}
+
+static void npcm_video_buf_queue(struct vb2_buffer *vb)
+{
+       struct npcm_video *video = vb2_get_drv_priv(vb->vb2_queue);
+       struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+       struct npcm_video_buffer *nvb = to_npcm_video_buffer(vbuf);
+       unsigned long flags;
+       bool empty;
+
+       spin_lock_irqsave(&video->lock, flags);
+       empty = list_empty(&video->buffers);
+       list_add_tail(&nvb->link, &video->buffers);
+       spin_unlock_irqrestore(&video->lock, flags);
+
+       if (test_bit(VIDEO_STREAMING, &video->flags) &&
+           !test_bit(VIDEO_CAPTURING, &video->flags) && empty) {
+               if (npcm_video_start_frame(video))
+                       dev_err(video->dev, "Failed to capture next frame\n");
+       }
+}
+
+static void npcm_video_buf_finish(struct vb2_buffer *vb)
+{
+       struct npcm_video *video = vb2_get_drv_priv(vb->vb2_queue);
+       struct list_head *head, *pos, *nx;
+       struct rect_list *tmp;
+
+       /*
+        * This callback is called when the buffer is dequeued, so update
+        * V4L2_CID_NPCM_RECT_COUNT control value with the number of rectangles
+        * in this buffer and free associated rect_list.
+        */
+       if (test_bit(VIDEO_STREAMING, &video->flags)) {
+               v4l2_ctrl_s_ctrl(video->rect_cnt_ctrl, video->rect[vb->index]);
+
+               head = &video->list[vb->index];
+               list_for_each_safe(pos, nx, head) {
+                       tmp = list_entry(pos, struct rect_list, list);
+                       list_del(&tmp->list);
+                       kfree(tmp);
+               }
+       }
+}
+
+static const struct regmap_config npcm_video_regmap_cfg = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = VCD_FIFO,
+};
+
+static const struct regmap_config npcm_video_ece_regmap_cfg = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = ECE_HEX_RECT_OFFSET,
+};
+
+static const struct vb2_ops npcm_video_vb2_ops = {
+       .queue_setup = npcm_video_queue_setup,
+       .wait_prepare = vb2_ops_wait_prepare,
+       .wait_finish = vb2_ops_wait_finish,
+       .buf_prepare = npcm_video_buf_prepare,
+       .buf_finish = npcm_video_buf_finish,
+       .start_streaming = npcm_video_start_streaming,
+       .stop_streaming = npcm_video_stop_streaming,
+       .buf_queue =  npcm_video_buf_queue,
+};
+
+static int npcm_video_setup_video(struct npcm_video *video)
+{
+       struct v4l2_device *v4l2_dev = &video->v4l2_dev;
+       struct video_device *vdev = &video->vdev;
+       struct vb2_queue *vbq = &video->queue;
+       int rc;
+
+       if (video->ece.enable)
+               video->pix_fmt.pixelformat = V4L2_PIX_FMT_HEXTILE;
+       else
+               video->pix_fmt.pixelformat = V4L2_PIX_FMT_RGB565;
+
+       video->pix_fmt.field = V4L2_FIELD_NONE;
+       video->pix_fmt.colorspace = V4L2_COLORSPACE_SRGB;
+       video->pix_fmt.quantization = V4L2_QUANTIZATION_FULL_RANGE;
+       video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL;
+
+       rc = v4l2_device_register(video->dev, v4l2_dev);
+       if (rc) {
+               dev_err(video->dev, "Failed to register v4l2 device\n");
+               return rc;
+       }
+
+       v4l2_ctrl_handler_init(&video->ctrl_handler, 2);
+       v4l2_ctrl_new_custom(&video->ctrl_handler, &npcm_ctrl_capture_mode, NULL);
+       video->rect_cnt_ctrl = v4l2_ctrl_new_custom(&video->ctrl_handler,
+                                                   &npcm_ctrl_rect_count, NULL);
+       if (video->ctrl_handler.error) {
+               dev_err(video->dev, "Failed to init controls: %d\n",
+                       video->ctrl_handler.error);
+
+               rc = video->ctrl_handler.error;
+               goto rel_ctrl_handler;
+       }
+       v4l2_dev->ctrl_handler = &video->ctrl_handler;
+
+       vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+       vbq->io_modes = VB2_MMAP | VB2_DMABUF;
+       vbq->dev = v4l2_dev->dev;
+       vbq->lock = &video->video_lock;
+       vbq->ops = &npcm_video_vb2_ops;
+       vbq->mem_ops = &vb2_dma_contig_memops;
+       vbq->drv_priv = video;
+       vbq->buf_struct_size = sizeof(struct npcm_video_buffer);
+       vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+       vbq->min_buffers_needed = 3;
+
+       rc = vb2_queue_init(vbq);
+       if (rc) {
+               dev_err(video->dev, "Failed to init vb2 queue\n");
+               goto rel_ctrl_handler;
+       }
+       vdev->queue = vbq;
+       vdev->fops = &npcm_video_v4l2_fops;
+       vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+       vdev->v4l2_dev = v4l2_dev;
+       strscpy(vdev->name, DEVICE_NAME, sizeof(vdev->name));
+       vdev->vfl_type = VFL_TYPE_VIDEO;
+       vdev->vfl_dir = VFL_DIR_RX;
+       vdev->release = video_device_release_empty;
+       vdev->ioctl_ops = &npcm_video_ioctls;
+       vdev->lock = &video->video_lock;
+
+       video_set_drvdata(vdev, video);
+       rc = video_register_device(vdev, VFL_TYPE_VIDEO, 0);
+       if (rc) {
+               dev_err(video->dev, "Failed to register video device\n");
+               goto rel_vb_queue;
+       }
+
+       return 0;
+
+rel_vb_queue:
+       vb2_queue_release(vbq);
+rel_ctrl_handler:
+       v4l2_ctrl_handler_free(&video->ctrl_handler);
+       v4l2_device_unregister(v4l2_dev);
+
+       return rc;
+}
+
+static int npcm_video_ece_init(struct npcm_video *video)
+{
+       struct device *dev = video->dev;
+       struct device_node *ece_node;
+       struct platform_device *ece_pdev;
+       void __iomem *regs;
+
+       ece_node = of_parse_phandle(video->dev->of_node, "nuvoton,ece", 0);
+       if (!ece_node) {
+               dev_err(dev, "Failed to get ECE phandle in DTS\n");
+               return -ENODEV;
+       }
+
+       video->ece.enable = of_device_is_available(ece_node);
+
+       if (video->ece.enable) {
+               dev_info(dev, "Support HEXTILE pixel format\n");
+
+               ece_pdev = of_find_device_by_node(ece_node);
+               if (IS_ERR(ece_pdev)) {
+                       dev_err(dev, "Failed to find ECE device\n");
+                       return PTR_ERR(ece_pdev);
+               }
+               of_node_put(ece_node);
+
+               regs = devm_platform_ioremap_resource(ece_pdev, 0);
+               if (IS_ERR(regs)) {
+                       dev_err(dev, "Failed to parse ECE reg in DTS\n");
+                       return PTR_ERR(regs);
+               }
+
+               video->ece.regmap = devm_regmap_init_mmio(dev, regs,
+                                                         &npcm_video_ece_regmap_cfg);
+               if (IS_ERR(video->ece.regmap)) {
+                       dev_err(dev, "Failed to initialize ECE regmap\n");
+                       return PTR_ERR(video->ece.regmap);
+               }
+
+               video->ece.reset = devm_reset_control_get(&ece_pdev->dev, NULL);
+               if (IS_ERR(video->ece.reset)) {
+                       dev_err(dev, "Failed to get ECE reset control in DTS\n");
+                       return PTR_ERR(video->ece.reset);
+               }
+       }
+
+       return 0;
+}
+
+static int npcm_video_init(struct npcm_video *video)
+{
+       struct device *dev = video->dev;
+       int irq, rc;
+
+       irq = irq_of_parse_and_map(dev->of_node, 0);
+       if (!irq) {
+               dev_err(dev, "Failed to find VCD IRQ\n");
+               return -ENODEV;
+       }
+
+       rc = devm_request_threaded_irq(dev, irq, NULL, npcm_video_irq,
+                                      IRQF_ONESHOT, DEVICE_NAME, video);
+       if (rc < 0) {
+               dev_err(dev, "Failed to request IRQ %d\n", irq);
+               return rc;
+       }
+
+       of_reserved_mem_device_init(dev);
+       rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+       if (rc) {
+               dev_err(dev, "Failed to set DMA mask\n");
+               of_reserved_mem_device_release(dev);
+       }
+
+       rc = npcm_video_ece_init(video);
+       if (rc) {
+               dev_err(dev, "Failed to initialize ECE\n");
+               return rc;
+       }
+
+       return 0;
+}
+
+static int npcm_video_probe(struct platform_device *pdev)
+{
+       struct npcm_video *video = kzalloc(sizeof(*video), GFP_KERNEL);
+       int rc;
+       void __iomem *regs;
+
+       if (!video)
+               return -ENOMEM;
+
+       video->dev = &pdev->dev;
+       spin_lock_init(&video->lock);
+       mutex_init(&video->video_lock);
+       INIT_LIST_HEAD(&video->buffers);
+
+       regs = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(regs)) {
+               dev_err(&pdev->dev, "Failed to parse VCD reg in DTS\n");
+               return PTR_ERR(regs);
+       }
+
+       video->vcd_regmap = devm_regmap_init_mmio(&pdev->dev, regs,
+                                                 &npcm_video_regmap_cfg);
+       if (IS_ERR(video->vcd_regmap)) {
+               dev_err(&pdev->dev, "Failed to initialize VCD regmap\n");
+               return PTR_ERR(video->vcd_regmap);
+       }
+
+       video->reset = devm_reset_control_get(&pdev->dev, NULL);
+       if (IS_ERR(video->reset)) {
+               dev_err(&pdev->dev, "Failed to get VCD reset control in DTS\n");
+               return PTR_ERR(video->reset);
+       }
+
+       video->gcr_regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+                                                           "nuvoton,sysgcr");
+       if (IS_ERR(video->gcr_regmap))
+               return PTR_ERR(video->gcr_regmap);
+
+       video->gfx_regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+                                                           "nuvoton,sysgfxi");
+       if (IS_ERR(video->gfx_regmap))
+               return PTR_ERR(video->gfx_regmap);
+
+       rc = npcm_video_init(video);
+       if (rc)
+               return rc;
+
+       rc = npcm_video_setup_video(video);
+       if (rc)
+               return rc;
+
+       dev_info(video->dev, "NPCM video driver probed\n");
+       return 0;
+}
+
+static int npcm_video_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct v4l2_device *v4l2_dev = dev_get_drvdata(dev);
+       struct npcm_video *video = to_npcm_video(v4l2_dev);
+
+       video_unregister_device(&video->vdev);
+       vb2_queue_release(&video->queue);
+       v4l2_ctrl_handler_free(&video->ctrl_handler);
+       v4l2_device_unregister(v4l2_dev);
+       if (video->ece.enable)
+               npcm_video_ece_stop(video);
+       of_reserved_mem_device_release(dev);
+
+       return 0;
+}
+
+static const struct of_device_id npcm_video_match[] = {
+       { .compatible = "nuvoton,npcm750-vcd" },
+       { .compatible = "nuvoton,npcm845-vcd" },
+       {},
+};
+
+MODULE_DEVICE_TABLE(of, npcm_video_match);
+
+static struct platform_driver npcm_video_driver = {
+       .driver = {
+               .name = DEVICE_NAME,
+               .of_match_table = npcm_video_match,
+       },
+       .probe = npcm_video_probe,
+       .remove = npcm_video_remove,
+};
+
+module_platform_driver(npcm_video_driver);
+
+MODULE_AUTHOR("Joseph Liu <kwliu@nuvoton.com>");
+MODULE_AUTHOR("Marvin Lin <kflin@nuvoton.com>");
+MODULE_DESCRIPTION("Driver for Nuvoton NPCM Video Capture/Encode Engine");
+MODULE_LICENSE("GPL v2");
index a2b4fb9e29e7d10ba3905745a66b1d53dd24ea3d..d579c804b04790166a429dfdfab4a68ee61870e9 100644 (file)
@@ -115,28 +115,17 @@ void print_cast_status(struct device *dev, void __iomem *reg,
 void print_wrapper_info(struct device *dev, void __iomem *reg);
 void mxc_jpeg_sw_reset(void __iomem *reg);
 int mxc_jpeg_enable(void __iomem *reg);
-void wait_frmdone(struct device *dev, void __iomem *reg);
 void mxc_jpeg_enc_mode_conf(struct device *dev, void __iomem *reg, u8 extseq);
 void mxc_jpeg_enc_mode_go(struct device *dev, void __iomem *reg, u8 extseq);
 void mxc_jpeg_enc_set_quality(struct device *dev, void __iomem *reg, u8 quality);
 void mxc_jpeg_dec_mode_go(struct device *dev, void __iomem *reg);
-int mxc_jpeg_get_slot(void __iomem *reg);
-u32 mxc_jpeg_get_offset(void __iomem *reg, int slot);
 void mxc_jpeg_enable_slot(void __iomem *reg, int slot);
 void mxc_jpeg_set_l_endian(void __iomem *reg, int le);
 void mxc_jpeg_enable_irq(void __iomem *reg, int slot);
 void mxc_jpeg_disable_irq(void __iomem *reg, int slot);
-int mxc_jpeg_set_input(void __iomem *reg, u32 in_buf, u32 bufsize);
-int mxc_jpeg_set_output(void __iomem *reg, u16 out_pitch, u32 out_buf,
-                       u16 w, u16 h);
-void mxc_jpeg_set_config_mode(void __iomem *reg, int config_mode);
-int mxc_jpeg_set_params(struct mxc_jpeg_desc *desc,  u32 bufsize, u16
-                       out_pitch, u32 format);
 void mxc_jpeg_set_bufsize(struct mxc_jpeg_desc *desc,  u32 bufsize);
 void mxc_jpeg_set_res(struct mxc_jpeg_desc *desc, u16 w, u16 h);
 void mxc_jpeg_set_line_pitch(struct mxc_jpeg_desc *desc, u32 line_pitch);
 void mxc_jpeg_set_desc(u32 desc, void __iomem *reg, int slot);
 void mxc_jpeg_clr_desc(void __iomem *reg, int slot);
-void mxc_jpeg_set_regs_from_desc(struct mxc_jpeg_desc *desc,
-                                void __iomem *reg);
 #endif
index b7a720198ce57ae0e1bdec9f963f8efd141751d1..64112b63298ca0a91364b723d7a3dd49b6a26a77 100644 (file)
@@ -1322,6 +1322,20 @@ static bool mxc_jpeg_compare_format(const struct mxc_jpeg_fmt *fmt1,
        return false;
 }
 
+static void mxc_jpeg_set_last_buffer(struct mxc_jpeg_ctx *ctx)
+{
+       struct vb2_v4l2_buffer *next_dst_buf;
+
+       next_dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+       if (!next_dst_buf) {
+               ctx->fh.m2m_ctx->is_draining = true;
+               ctx->fh.m2m_ctx->next_buf_last = true;
+               return;
+       }
+
+       v4l2_m2m_last_buffer_done(ctx->fh.m2m_ctx, next_dst_buf);
+}
+
 static bool mxc_jpeg_source_change(struct mxc_jpeg_ctx *ctx,
                                   struct mxc_jpeg_src_buf *jpeg_src_buf)
 {
@@ -1334,7 +1348,8 @@ static bool mxc_jpeg_source_change(struct mxc_jpeg_ctx *ctx,
        q_data_cap = mxc_jpeg_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
        if (mxc_jpeg_compare_format(q_data_cap->fmt, jpeg_src_buf->fmt))
                jpeg_src_buf->fmt = q_data_cap->fmt;
-       if (q_data_cap->fmt != jpeg_src_buf->fmt ||
+       if (ctx->need_initial_source_change_evt ||
+           q_data_cap->fmt != jpeg_src_buf->fmt ||
            q_data_cap->w != jpeg_src_buf->w ||
            q_data_cap->h != jpeg_src_buf->h) {
                dev_dbg(dev, "Detected jpeg res=(%dx%d)->(%dx%d), pixfmt=%c%c%c%c\n",
@@ -1378,6 +1393,9 @@ static bool mxc_jpeg_source_change(struct mxc_jpeg_ctx *ctx,
                mxc_jpeg_sizeimage(q_data_cap);
                notify_src_chg(ctx);
                ctx->source_change = 1;
+               ctx->need_initial_source_change_evt = false;
+               if (vb2_is_streaming(v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx)))
+                       mxc_jpeg_set_last_buffer(ctx);
        }
 
        return ctx->source_change ? true : false;
@@ -1595,6 +1613,9 @@ static int mxc_jpeg_queue_setup(struct vb2_queue *q,
        for (i = 0; i < *nplanes; i++)
                sizes[i] = mxc_jpeg_get_plane_size(q_data, i);
 
+       if (V4L2_TYPE_IS_OUTPUT(q->type))
+               ctx->need_initial_source_change_evt = true;
+
        return 0;
 }
 
@@ -1638,8 +1659,13 @@ static void mxc_jpeg_stop_streaming(struct vb2_queue *q)
                v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
        }
 
-       if (V4L2_TYPE_IS_OUTPUT(q->type) || !ctx->source_change)
-               v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q);
+       v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q);
+       /* if V4L2_DEC_CMD_STOP is sent before the source change triggered,
+        * restore the is_draining flag
+        */
+       if (V4L2_TYPE_IS_CAPTURE(q->type) && ctx->source_change && ctx->fh.m2m_ctx->last_src_buf)
+               ctx->fh.m2m_ctx->is_draining = true;
+
        if (V4L2_TYPE_IS_OUTPUT(q->type) &&
            v4l2_m2m_has_stopped(ctx->fh.m2m_ctx)) {
                notify_eos(ctx);
@@ -1916,7 +1942,7 @@ static int mxc_jpeg_buf_prepare(struct vb2_buffer *vb)
                return -EINVAL;
        for (i = 0; i < q_data->fmt->mem_planes; i++) {
                sizeimage = mxc_jpeg_get_plane_size(q_data, i);
-               if (vb2_plane_size(vb, i) < sizeimage) {
+               if (!ctx->source_change && vb2_plane_size(vb, i) < sizeimage) {
                        dev_err(dev, "plane %d too small (%lu < %lu)",
                                i, vb2_plane_size(vb, i), sizeimage);
                        return -EINVAL;
@@ -2768,7 +2794,7 @@ static int mxc_jpeg_probe(struct platform_device *pdev)
        ret = mxc_jpeg_attach_pm_domains(jpeg);
        if (ret < 0) {
                dev_err(dev, "failed to attach power domains %d\n", ret);
-               return ret;
+               goto err_clk;
        }
 
        /* v4l2 */
index d80e94cc9d992451cc7cf6d0f5e62c98f45aaa8e..dc4afeeff5b65bb77c1674f9cc19ad3a32100378 100644 (file)
@@ -99,6 +99,7 @@ struct mxc_jpeg_ctx {
        enum mxc_jpeg_enc_state         enc_state;
        int                             slot;
        unsigned int                    source_change;
+       bool                            need_initial_source_change_evt;
        bool                            header_parsed;
        struct v4l2_ctrl_handler        ctrl_handler;
        u8                              jpeg_quality;
index 5f93712bf485408cb3a7d174b4ddb44b65250ff4..6cb20b45e0a1373c643dfc69b69d48cb2f83287c 100644 (file)
@@ -1114,8 +1114,6 @@ static int mipi_csis_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
        fd->type = V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL;
        fd->num_entries = 1;
 
-       memset(entry, 0, sizeof(*entry));
-
        entry->flags = 0;
        entry->pixelcode = csis_fmt->code;
        entry->bus.csi2.vc = 0;
index 81be744e9f1b586181174624b7d2e6b5ff39832a..f73facb97dc5036d111ceeb31897832140519ceb 100644 (file)
@@ -406,12 +406,10 @@ static int mxc_isi_clk_get(struct mxc_isi_dev *isi)
                          * sizeof(*isi->clks);
        int ret;
 
-       isi->clks = devm_kmalloc(isi->dev, size, GFP_KERNEL);
+       isi->clks = devm_kmemdup(isi->dev, isi->pdata->clks, size, GFP_KERNEL);
        if (!isi->clks)
                return -ENOMEM;
 
-       memcpy(isi->clks, isi->pdata->clks, size);
-
        ret = devm_clk_bulk_get(isi->dev, isi->pdata->num_clks,
                                isi->clks);
        if (ret < 0) {
index d2aec0679dfc0251241bd012f8ac81c7c287b61c..dd49a40e6a70476a81ea00151eef42047e824aa8 100644 (file)
 
 static const struct csid_format csid_formats[] = {
        {
-               MEDIA_BUS_FMT_UYVY8_2X8,
+               MEDIA_BUS_FMT_UYVY8_1X16,
                DATA_TYPE_YUV422_8BIT,
                DECODE_FORMAT_UNCOMPRESSED_8_BIT,
                8,
                2,
        },
        {
-               MEDIA_BUS_FMT_VYUY8_2X8,
+               MEDIA_BUS_FMT_VYUY8_1X16,
                DATA_TYPE_YUV422_8BIT,
                DECODE_FORMAT_UNCOMPRESSED_8_BIT,
                8,
                2,
        },
        {
-               MEDIA_BUS_FMT_YUYV8_2X8,
+               MEDIA_BUS_FMT_YUYV8_1X16,
                DATA_TYPE_YUV422_8BIT,
                DECODE_FORMAT_UNCOMPRESSED_8_BIT,
                8,
                2,
        },
        {
-               MEDIA_BUS_FMT_YVYU8_2X8,
+               MEDIA_BUS_FMT_YVYU8_1X16,
                DATA_TYPE_YUV422_8BIT,
                DECODE_FORMAT_UNCOMPRESSED_8_BIT,
                8,
index e7436ec6d02bf4feb4fa2269207c9a6b5247da9d..6b26e036294e4c134ebfcd1b9729320bef04ac11 100644 (file)
 
 static const struct csid_format csid_formats[] = {
        {
-               MEDIA_BUS_FMT_UYVY8_2X8,
+               MEDIA_BUS_FMT_UYVY8_1X16,
                DATA_TYPE_YUV422_8BIT,
                DECODE_FORMAT_UNCOMPRESSED_8_BIT,
                8,
                2,
        },
        {
-               MEDIA_BUS_FMT_VYUY8_2X8,
+               MEDIA_BUS_FMT_VYUY8_1X16,
                DATA_TYPE_YUV422_8BIT,
                DECODE_FORMAT_UNCOMPRESSED_8_BIT,
                8,
                2,
        },
        {
-               MEDIA_BUS_FMT_YUYV8_2X8,
+               MEDIA_BUS_FMT_YUYV8_1X16,
                DATA_TYPE_YUV422_8BIT,
                DECODE_FORMAT_UNCOMPRESSED_8_BIT,
                8,
                2,
        },
        {
-               MEDIA_BUS_FMT_YVYU8_2X8,
+               MEDIA_BUS_FMT_YVYU8_1X16,
                DATA_TYPE_YUV422_8BIT,
                DECODE_FORMAT_UNCOMPRESSED_8_BIT,
                8,
index 0f8ac29d038db8562fcefb953113b023c593c69e..05ff5fa8095a88f5bce7536db7b9650bf8585d22 100644 (file)
 
 static const struct csid_format csid_formats[] = {
        {
-               MEDIA_BUS_FMT_UYVY8_2X8,
+               MEDIA_BUS_FMT_UYVY8_1X16,
                DATA_TYPE_YUV422_8BIT,
                DECODE_FORMAT_UNCOMPRESSED_8_BIT,
                8,
                2,
        },
        {
-               MEDIA_BUS_FMT_VYUY8_2X8,
+               MEDIA_BUS_FMT_VYUY8_1X16,
                DATA_TYPE_YUV422_8BIT,
                DECODE_FORMAT_UNCOMPRESSED_8_BIT,
                8,
                2,
        },
        {
-               MEDIA_BUS_FMT_YUYV8_2X8,
+               MEDIA_BUS_FMT_YUYV8_1X16,
                DATA_TYPE_YUV422_8BIT,
                DECODE_FORMAT_UNCOMPRESSED_8_BIT,
                8,
                2,
        },
        {
-               MEDIA_BUS_FMT_YVYU8_2X8,
+               MEDIA_BUS_FMT_YVYU8_1X16,
                DATA_TYPE_YUV422_8BIT,
                DECODE_FORMAT_UNCOMPRESSED_8_BIT,
                8,
@@ -352,12 +352,21 @@ static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
                phy_sel = csid->phy.csiphy_id;
 
        if (enable) {
-               u8 dt_id = vc;
+               /*
+                * DT_ID is a two bit bitfield that is concatenated with
+                * the four least significant bits of the five bit VC
+                * bitfield to generate an internal CID value.
+                *
+                * CSID_RDI_CFG0(vc)
+                * DT_ID : 28:27
+                * VC    : 26:22
+                * DT    : 21:16
+                *
+                * CID   : VC 3:0 << 2 | DT_ID 1:0
+                */
+               u8 dt_id = vc & 0x03;
 
                if (tg->enabled) {
-                       /* Config Test Generator */
-                       vc = 0xa;
-
                        /* configure one DT, infinite frames */
                        val = vc << TPG_VC_CFG0_VC_NUM;
                        val |= INTELEAVING_MODE_ONE_SHOT << TPG_VC_CFG0_LINE_INTERLEAVING_MODE;
@@ -370,14 +379,14 @@ static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
 
                        writel_relaxed(0x12345678, csid->base + CSID_TPG_LFSR_SEED);
 
-                       val = input_format->height & 0x1fff << TPG_DT_n_CFG_0_FRAME_HEIGHT;
-                       val |= input_format->width & 0x1fff << TPG_DT_n_CFG_0_FRAME_WIDTH;
+                       val = (input_format->height & 0x1fff) << TPG_DT_n_CFG_0_FRAME_HEIGHT;
+                       val |= (input_format->width & 0x1fff) << TPG_DT_n_CFG_0_FRAME_WIDTH;
                        writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_0(0));
 
                        val = format->data_type << TPG_DT_n_CFG_1_DATA_TYPE;
                        writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_1(0));
 
-                       val = tg->mode << TPG_DT_n_CFG_2_PAYLOAD_MODE;
+                       val = (tg->mode - 1) << TPG_DT_n_CFG_2_PAYLOAD_MODE;
                        val |= 0xBE << TPG_DT_n_CFG_2_USER_SPECIFIED_PAYLOAD;
                        val |= format->decode_format << TPG_DT_n_CFG_2_ENCODE_FORMAT;
                        writel_relaxed(val, csid->base + CSID_TPG_DT_n_CFG_2(0));
@@ -449,6 +458,8 @@ static void __csid_configure_stream(struct csid_device *csid, u8 enable, u8 vc)
        writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG0);
 
        val = 1 << CSI2_RX_CFG1_PACKET_ECC_CORRECTION_EN;
+       if (vc > 3)
+               val |= 1 << CSI2_RX_CFG1_VC_MODE;
        val |= 1 << CSI2_RX_CFG1_MISR_EN;
        writel_relaxed(val, csid->base + CSID_CSI2_RX_CFG1);
 
index 6360314f04a636f1addd249d84f986f0b73fe703..95873f988f7e25150b8b2d9457da0932cfbebcc8 100644 (file)
@@ -159,15 +159,17 @@ static int csid_set_power(struct v4l2_subdev *sd, int on)
        struct camss *camss = csid->camss;
        struct device *dev = camss->dev;
        struct vfe_device *vfe = &camss->vfe[csid->id];
-       u32 version = camss->version;
        int ret = 0;
 
        if (on) {
-               if (version == CAMSS_8250 || version == CAMSS_845) {
-                       ret = vfe_get(vfe);
-                       if (ret < 0)
-                               return ret;
-               }
+               /*
+                * From SDM845 onwards, the VFE needs to be powered on before
+                * switching on the CSID. Do so unconditionally, as there is no
+                * drawback in following the same powering order on older SoCs.
+                */
+               ret = vfe_get(vfe);
+               if (ret < 0)
+                       return ret;
 
                ret = pm_runtime_resume_and_get(dev);
                if (ret < 0)
@@ -217,8 +219,7 @@ static int csid_set_power(struct v4l2_subdev *sd, int on)
                regulator_bulk_disable(csid->num_supplies,
                                       csid->supplies);
                pm_runtime_put_sync(dev);
-               if (version == CAMSS_8250 || version == CAMSS_845)
-                       vfe_put(vfe);
+               vfe_put(vfe);
        }
 
        return ret;
@@ -307,7 +308,7 @@ static void csid_try_format(struct csid_device *csid,
 
                /* If not found, use UYVY as default */
                if (i >= csid->nformats)
-                       fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
+                       fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
 
                fmt->width = clamp_t(u32, fmt->width, 1, 8191);
                fmt->height = clamp_t(u32, fmt->height, 1, 8191);
@@ -336,7 +337,7 @@ static void csid_try_format(struct csid_device *csid,
 
                        /* If not found, use UYVY as default */
                        if (i >= csid->nformats)
-                               fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
+                               fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
 
                        fmt->width = clamp_t(u32, fmt->width, 1, 8191);
                        fmt->height = clamp_t(u32, fmt->height, 1, 8191);
@@ -503,7 +504,7 @@ static int csid_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
                .which = fh ? V4L2_SUBDEV_FORMAT_TRY :
                              V4L2_SUBDEV_FORMAT_ACTIVE,
                .format = {
-                       .code = MEDIA_BUS_FMT_UYVY8_2X8,
+                       .code = MEDIA_BUS_FMT_UYVY8_1X16,
                        .width = 1920,
                        .height = 1080
                }
@@ -566,7 +567,7 @@ static const struct v4l2_ctrl_ops csid_ctrl_ops = {
  * Return 0 on success or a negative error code otherwise
  */
 int msm_csid_subdev_init(struct camss *camss, struct csid_device *csid,
-                        const struct resources *res, u8 id)
+                        const struct camss_subdev_resources *res, u8 id)
 {
        struct device *dev = camss->dev;
        struct platform_device *pdev = to_platform_device(dev);
@@ -575,23 +576,13 @@ int msm_csid_subdev_init(struct camss *camss, struct csid_device *csid,
 
        csid->camss = camss;
        csid->id = id;
+       csid->ops = res->ops;
 
-       if (camss->version == CAMSS_8x16) {
-               csid->ops = &csid_ops_4_1;
-       } else if (camss->version == CAMSS_8x96 ||
-                  camss->version == CAMSS_660) {
-               csid->ops = &csid_ops_4_7;
-       } else if (camss->version == CAMSS_845 ||
-                  camss->version == CAMSS_8250) {
-               csid->ops = &csid_ops_gen2;
-       } else {
-               return -EINVAL;
-       }
        csid->ops->subdev_init(csid);
 
        /* Memory */
 
-       if (camss->version == CAMSS_8250) {
+       if (camss->res->version == CAMSS_8250) {
                /* for titan 480, CSID registers are inside the VFE region,
                 * between the VFE "top" and "bus" registers. this requires
                 * VFE to be initialized before CSID
index d4b48432a0973b2e2eda671871ce2a4fd03182e9..30d94eb2eb041af772e72ac9e62d22086a185dcb 100644 (file)
@@ -172,7 +172,7 @@ struct csid_device {
        const struct csid_hw_ops *ops;
 };
 
-struct resources;
+struct camss_subdev_resources;
 
 /*
  * csid_find_code - Find a format code in an array using array index or format code
@@ -200,7 +200,7 @@ const struct csid_format *csid_get_fmt_entry(const struct csid_format *formats,
                                             u32 code);
 
 int msm_csid_subdev_init(struct camss *camss, struct csid_device *csid,
-                        const struct resources *res, u8 id);
+                        const struct camss_subdev_resources *res, u8 id);
 
 int msm_csid_register_entity(struct csid_device *csid,
                             struct v4l2_device *v4l2_dev);
index 04baa80494c6672f8c5bd70f77a749af7d685488..f50e2235c37fcd4d45314543316e0468225f3786 100644 (file)
@@ -402,7 +402,7 @@ static void csiphy_gen1_config_lanes(struct csiphy_device *csiphy,
        val = CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG;
        writel_relaxed(val, csiphy->base + CSIPHY_3PH_LNn_CFG1(l));
 
-       if (csiphy->camss->version == CAMSS_660)
+       if (csiphy->camss->res->version == CAMSS_660)
                val = CSIPHY_3PH_LNn_CFG4_T_HS_CLK_MISS_660;
        else
                val = CSIPHY_3PH_LNn_CFG4_T_HS_CLK_MISS;
@@ -419,7 +419,7 @@ static void csiphy_gen2_config_lanes(struct csiphy_device *csiphy,
        int i, l, array_size;
        u32 val;
 
-       switch (csiphy->camss->version) {
+       switch (csiphy->camss->res->version) {
        case CAMSS_845:
                r = &lane_regs_sdm845[0][0];
                array_size = ARRAY_SIZE(lane_regs_sdm845[0]);
@@ -468,15 +468,15 @@ static void csiphy_lanes_enable(struct csiphy_device *csiphy,
                                s64 link_freq, u8 lane_mask)
 {
        struct csiphy_lanes_cfg *c = &cfg->csi2->lane_cfg;
-       bool is_gen2 = (csiphy->camss->version == CAMSS_845 ||
-                       csiphy->camss->version == CAMSS_8250);
+       bool is_gen2 = (csiphy->camss->res->version == CAMSS_845 ||
+                       csiphy->camss->res->version == CAMSS_8250);
        u8 settle_cnt;
        u8 val;
        int i;
 
        settle_cnt = csiphy_settle_cnt_calc(link_freq, csiphy->timer_clk_rate);
 
-       val = is_gen2 ? BIT(7) : CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE;
+       val = CSIPHY_3PH_CMN_CSI_COMMON_CTRL5_CLK_ENABLE;
        for (i = 0; i < c->num_data; i++)
                val |= BIT(c->data[i].pos * 2);
 
index 3f726a7237f5663001954c29f9680f56febcc76c..edd573606a6ae4365bebcdba23ceaabc997ceec2 100644 (file)
@@ -30,10 +30,10 @@ struct csiphy_format {
 };
 
 static const struct csiphy_format csiphy_formats_8x16[] = {
-       { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
-       { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
-       { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
-       { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+       { MEDIA_BUS_FMT_UYVY8_1X16, 8 },
+       { MEDIA_BUS_FMT_VYUY8_1X16, 8 },
+       { MEDIA_BUS_FMT_YUYV8_1X16, 8 },
+       { MEDIA_BUS_FMT_YVYU8_1X16, 8 },
        { MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
        { MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
        { MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
@@ -50,10 +50,10 @@ static const struct csiphy_format csiphy_formats_8x16[] = {
 };
 
 static const struct csiphy_format csiphy_formats_8x96[] = {
-       { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
-       { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
-       { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
-       { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+       { MEDIA_BUS_FMT_UYVY8_1X16, 8 },
+       { MEDIA_BUS_FMT_VYUY8_1X16, 8 },
+       { MEDIA_BUS_FMT_YUYV8_1X16, 8 },
+       { MEDIA_BUS_FMT_YVYU8_1X16, 8 },
        { MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
        { MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
        { MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
@@ -74,10 +74,10 @@ static const struct csiphy_format csiphy_formats_8x96[] = {
 };
 
 static const struct csiphy_format csiphy_formats_sdm845[] = {
-       { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
-       { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
-       { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
-       { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+       { MEDIA_BUS_FMT_UYVY8_1X16, 8 },
+       { MEDIA_BUS_FMT_VYUY8_1X16, 8 },
+       { MEDIA_BUS_FMT_YUYV8_1X16, 8 },
+       { MEDIA_BUS_FMT_YVYU8_1X16, 8 },
        { MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
        { MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
        { MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
@@ -357,7 +357,7 @@ static void csiphy_try_format(struct csiphy_device *csiphy,
 
                /* If not found, use UYVY as default */
                if (i >= csiphy->nformats)
-                       fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
+                       fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
 
                fmt->width = clamp_t(u32, fmt->width, 1, 8191);
                fmt->height = clamp_t(u32, fmt->height, 1, 8191);
@@ -527,7 +527,7 @@ static int csiphy_init_formats(struct v4l2_subdev *sd,
                .which = fh ? V4L2_SUBDEV_FORMAT_TRY :
                              V4L2_SUBDEV_FORMAT_ACTIVE,
                .format = {
-                       .code = MEDIA_BUS_FMT_UYVY8_2X8,
+                       .code = MEDIA_BUS_FMT_UYVY8_1X16,
                        .width = 1920,
                        .height = 1080
                }
@@ -536,6 +536,15 @@ static int csiphy_init_formats(struct v4l2_subdev *sd,
        return csiphy_set_format(sd, fh ? fh->state : NULL, &format);
 }
 
+static bool csiphy_match_clock_name(const char *clock_name, const char *format,
+                                   int index)
+{
+       char name[16]; /* csiphyXXX_timer\0 */
+
+       snprintf(name, sizeof(name), format, index);
+       return !strcmp(clock_name, name);
+}
+
 /*
  * msm_csiphy_subdev_init - Initialize CSIPHY device structure and resources
  * @csiphy: CSIPHY device
@@ -546,33 +555,33 @@ static int csiphy_init_formats(struct v4l2_subdev *sd,
  */
 int msm_csiphy_subdev_init(struct camss *camss,
                           struct csiphy_device *csiphy,
-                          const struct resources *res, u8 id)
+                          const struct camss_subdev_resources *res, u8 id)
 {
        struct device *dev = camss->dev;
        struct platform_device *pdev = to_platform_device(dev);
-       int i, j;
+       int i, j, k;
        int ret;
 
        csiphy->camss = camss;
        csiphy->id = id;
        csiphy->cfg.combo_mode = 0;
+       csiphy->ops = res->ops;
 
-       if (camss->version == CAMSS_8x16) {
-               csiphy->ops = &csiphy_ops_2ph_1_0;
+       switch (camss->res->version) {
+       case CAMSS_8x16:
                csiphy->formats = csiphy_formats_8x16;
                csiphy->nformats = ARRAY_SIZE(csiphy_formats_8x16);
-       } else if (camss->version == CAMSS_8x96 ||
-                  camss->version == CAMSS_660) {
-               csiphy->ops = &csiphy_ops_3ph_1_0;
+               break;
+       case CAMSS_8x96:
+       case CAMSS_660:
                csiphy->formats = csiphy_formats_8x96;
                csiphy->nformats = ARRAY_SIZE(csiphy_formats_8x96);
-       } else if (camss->version == CAMSS_845 ||
-                  camss->version == CAMSS_8250) {
-               csiphy->ops = &csiphy_ops_3ph_1_0;
+               break;
+       case CAMSS_845:
+       case CAMSS_8250:
                csiphy->formats = csiphy_formats_sdm845;
                csiphy->nformats = ARRAY_SIZE(csiphy_formats_sdm845);
-       } else {
-               return -EINVAL;
+               break;
        }
 
        /* Memory */
@@ -581,8 +590,8 @@ int msm_csiphy_subdev_init(struct camss *camss,
        if (IS_ERR(csiphy->base))
                return PTR_ERR(csiphy->base);
 
-       if (camss->version == CAMSS_8x16 ||
-           camss->version == CAMSS_8x96) {
+       if (camss->res->version == CAMSS_8x16 ||
+           camss->res->version == CAMSS_8x96) {
                csiphy->base_clk_mux =
                        devm_platform_ioremap_resource_byname(pdev, res->reg[1]);
                if (IS_ERR(csiphy->base_clk_mux))
@@ -656,19 +665,23 @@ int msm_csiphy_subdev_init(struct camss *camss,
                for (j = 0; j < clock->nfreqs; j++)
                        clock->freq[j] = res->clock_rate[i][j];
 
-               if (!strcmp(clock->name, "csiphy0_timer") ||
-                   !strcmp(clock->name, "csiphy1_timer") ||
-                   !strcmp(clock->name, "csiphy2_timer") ||
-                   !strcmp(clock->name, "csiphy3_timer") ||
-                   !strcmp(clock->name, "csiphy4_timer") ||
-                   !strcmp(clock->name, "csiphy5_timer"))
-                       csiphy->rate_set[i] = true;
-
-               if (camss->version == CAMSS_660 &&
-                   (!strcmp(clock->name, "csi0_phy") ||
-                    !strcmp(clock->name, "csi1_phy") ||
-                    !strcmp(clock->name, "csi2_phy")))
-                       csiphy->rate_set[i] = true;
+               for (k = 0; k < camss->res->csiphy_num; k++) {
+                       csiphy->rate_set[i] = csiphy_match_clock_name(clock->name,
+                                                                     "csiphy%d_timer", k);
+                       if (csiphy->rate_set[i])
+                               break;
+
+                       if (camss->res->version == CAMSS_660) {
+                               csiphy->rate_set[i] = csiphy_match_clock_name(clock->name,
+                                                                             "csi%d_phy", k);
+                               if (csiphy->rate_set[i])
+                                       break;
+                       }
+
+                       csiphy->rate_set[i] = csiphy_match_clock_name(clock->name, "csiphy%d", k);
+                       if (csiphy->rate_set[i])
+                               break;
+               }
        }
 
        return 0;
index 1c14947f92d35a683e3e62a8ebb2e688492f71c4..c9b7fe82b1f0d71de9027cbc9132af871054b193 100644 (file)
@@ -83,11 +83,11 @@ struct csiphy_device {
        unsigned int nformats;
 };
 
-struct resources;
+struct camss_subdev_resources;
 
 int msm_csiphy_subdev_init(struct camss *camss,
                           struct csiphy_device *csiphy,
-                          const struct resources *res, u8 id);
+                          const struct camss_subdev_resources *res, u8 id);
 
 int msm_csiphy_register_entity(struct csiphy_device *csiphy,
                               struct v4l2_device *v4l2_dev);
index b713f5b86aba696229d531953d38aa21d00bab1e..be9d2f0a10c18cd8108692aebb7766508871169b 100644 (file)
@@ -106,10 +106,10 @@ enum ispif_intf_cmd {
 };
 
 static const u32 ispif_formats_8x16[] = {
-       MEDIA_BUS_FMT_UYVY8_2X8,
-       MEDIA_BUS_FMT_VYUY8_2X8,
-       MEDIA_BUS_FMT_YUYV8_2X8,
-       MEDIA_BUS_FMT_YVYU8_2X8,
+       MEDIA_BUS_FMT_UYVY8_1X16,
+       MEDIA_BUS_FMT_VYUY8_1X16,
+       MEDIA_BUS_FMT_YUYV8_1X16,
+       MEDIA_BUS_FMT_YVYU8_1X16,
        MEDIA_BUS_FMT_SBGGR8_1X8,
        MEDIA_BUS_FMT_SGBRG8_1X8,
        MEDIA_BUS_FMT_SGRBG8_1X8,
@@ -126,10 +126,10 @@ static const u32 ispif_formats_8x16[] = {
 };
 
 static const u32 ispif_formats_8x96[] = {
-       MEDIA_BUS_FMT_UYVY8_2X8,
-       MEDIA_BUS_FMT_VYUY8_2X8,
-       MEDIA_BUS_FMT_YUYV8_2X8,
-       MEDIA_BUS_FMT_YVYU8_2X8,
+       MEDIA_BUS_FMT_UYVY8_1X16,
+       MEDIA_BUS_FMT_VYUY8_1X16,
+       MEDIA_BUS_FMT_YUYV8_1X16,
+       MEDIA_BUS_FMT_YVYU8_1X16,
        MEDIA_BUS_FMT_SBGGR8_1X8,
        MEDIA_BUS_FMT_SGBRG8_1X8,
        MEDIA_BUS_FMT_SGRBG8_1X8,
@@ -270,7 +270,7 @@ static int ispif_vfe_reset(struct ispif_device *ispif, u8 vfe_id)
        unsigned long time;
        u32 val;
 
-       if (vfe_id > (camss->vfe_num - 1)) {
+       if (vfe_id > camss->res->vfe_num - 1) {
                dev_err(camss->dev,
                        "Error: asked reset for invalid VFE%d\n", vfe_id);
                return -ENOENT;
@@ -829,8 +829,8 @@ static int ispif_set_stream(struct v4l2_subdev *sd, int enable)
                ispif_select_csid(ispif, intf, csid, vfe, 1);
                ispif_select_cid(ispif, intf, cid, vfe, 1);
                ispif_config_irq(ispif, intf, vfe, 1);
-               if (camss->version == CAMSS_8x96 ||
-                   camss->version == CAMSS_660)
+               if (camss->res->version == CAMSS_8x96 ||
+                   camss->res->version == CAMSS_660)
                        ispif_config_pack(ispif,
                                          line->fmt[MSM_ISPIF_PAD_SINK].code,
                                          intf, cid, vfe, 1);
@@ -847,8 +847,8 @@ static int ispif_set_stream(struct v4l2_subdev *sd, int enable)
                        return ret;
 
                mutex_lock(&ispif->config_lock);
-               if (camss->version == CAMSS_8x96 ||
-                   camss->version == CAMSS_660)
+               if (camss->res->version == CAMSS_8x96 ||
+                   camss->res->version == CAMSS_660)
                        ispif_config_pack(ispif,
                                          line->fmt[MSM_ISPIF_PAD_SINK].code,
                                          intf, cid, vfe, 0);
@@ -911,7 +911,7 @@ static void ispif_try_format(struct ispif_line *line,
 
                /* If not found, use UYVY as default */
                if (i >= line->nformats)
-                       fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
+                       fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
 
                fmt->width = clamp_t(u32, fmt->width, 1, 8191);
                fmt->height = clamp_t(u32, fmt->height, 1, 8191);
@@ -1078,7 +1078,7 @@ static int ispif_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
                .which = fh ? V4L2_SUBDEV_FORMAT_TRY :
                              V4L2_SUBDEV_FORMAT_ACTIVE,
                .format = {
-                       .code = MEDIA_BUS_FMT_UYVY8_2X8,
+                       .code = MEDIA_BUS_FMT_UYVY8_1X16,
                        .width = 1920,
                        .height = 1080
                }
@@ -1095,7 +1095,7 @@ static int ispif_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
  * Return 0 on success or a negative error code otherwise
  */
 int msm_ispif_subdev_init(struct camss *camss,
-                         const struct resources_ispif *res)
+                         const struct camss_subdev_resources *res)
 {
        struct device *dev = camss->dev;
        struct ispif_device *ispif = camss->ispif;
@@ -1109,10 +1109,10 @@ int msm_ispif_subdev_init(struct camss *camss,
        ispif->camss = camss;
 
        /* Number of ISPIF lines - same as number of CSID hardware modules */
-       if (camss->version == CAMSS_8x16)
+       if (camss->res->version == CAMSS_8x16)
                ispif->line_num = 2;
-       else if (camss->version == CAMSS_8x96 ||
-                camss->version == CAMSS_660)
+       else if (camss->res->version == CAMSS_8x96 ||
+                camss->res->version == CAMSS_660)
                ispif->line_num = 4;
        else
                return -EINVAL;
@@ -1126,12 +1126,12 @@ int msm_ispif_subdev_init(struct camss *camss,
                ispif->line[i].ispif = ispif;
                ispif->line[i].id = i;
 
-               if (camss->version == CAMSS_8x16) {
+               if (camss->res->version == CAMSS_8x16) {
                        ispif->line[i].formats = ispif_formats_8x16;
                        ispif->line[i].nformats =
                                        ARRAY_SIZE(ispif_formats_8x16);
-               } else if (camss->version == CAMSS_8x96 ||
-                          camss->version == CAMSS_660) {
+               } else if (camss->res->version == CAMSS_8x96 ||
+                          camss->res->version == CAMSS_660) {
                        ispif->line[i].formats = ispif_formats_8x96;
                        ispif->line[i].nformats =
                                        ARRAY_SIZE(ispif_formats_8x96);
@@ -1152,18 +1152,18 @@ int msm_ispif_subdev_init(struct camss *camss,
 
        /* Interrupt */
 
-       ret = platform_get_irq_byname(pdev, res->interrupt);
+       ret = platform_get_irq_byname(pdev, res->interrupt[0]);
        if (ret < 0)
                return ret;
 
        ispif->irq = ret;
        snprintf(ispif->irq_name, sizeof(ispif->irq_name), "%s_%s",
                 dev_name(dev), MSM_ISPIF_NAME);
-       if (camss->version == CAMSS_8x16)
+       if (camss->res->version == CAMSS_8x16)
                ret = devm_request_irq(dev, ispif->irq, ispif_isr_8x16,
                               IRQF_TRIGGER_RISING, ispif->irq_name, ispif);
-       else if (camss->version == CAMSS_8x96 ||
-                camss->version == CAMSS_660)
+       else if (camss->res->version == CAMSS_8x96 ||
+                camss->res->version == CAMSS_660)
                ret = devm_request_irq(dev, ispif->irq, ispif_isr_8x96,
                               IRQF_TRIGGER_RISING, ispif->irq_name, ispif);
        else
index fdf28e68cc7d87e79e6ea68c331cb58f4680f10c..dff6d5b35c72ed7e7c94b485d50acab2d614feb7 100644 (file)
@@ -66,10 +66,10 @@ struct ispif_device {
        struct camss *camss;
 };
 
-struct resources_ispif;
+struct camss_subdev_resources;
 
 int msm_ispif_subdev_init(struct camss *camss,
-                         const struct resources_ispif *res);
+                         const struct camss_subdev_resources *res);
 
 int msm_ispif_register_entities(struct ispif_device *ispif,
                                struct v4l2_device *v4l2_dev);
index 02494c89da91c8db750df58f95f24050eec15238..0b211fed127605a91482fdf0f7a3013ff91aeb01 100644 (file)
@@ -7,7 +7,6 @@
  * Copyright (C) 2020-2021 Linaro Ltd.
  */
 
-#include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/iopoll.h>
@@ -210,7 +209,8 @@ static void vfe_global_reset(struct vfe_device *vfe)
                         GLOBAL_RESET_CMD_IDLE_CGC      |
                         GLOBAL_RESET_CMD_RDI0          |
                         GLOBAL_RESET_CMD_RDI1          |
-                        GLOBAL_RESET_CMD_RDI2;
+                        GLOBAL_RESET_CMD_RDI2          |
+                        GLOBAL_RESET_CMD_RDI3;
 
        writel_relaxed(BIT(31), vfe->base + VFE_IRQ_MASK_0);
 
@@ -344,7 +344,7 @@ static void vfe_violation_read(struct vfe_device *vfe)
 static irqreturn_t vfe_isr(int irq, void *dev)
 {
        struct vfe_device *vfe = dev;
-       u32 status0, status1, vfe_bus_status[3];
+       u32 status0, status1, vfe_bus_status[VFE_LINE_NUM_MAX];
        int i, wm;
 
        status0 = readl_relaxed(vfe->base + VFE_IRQ_STATUS_0);
@@ -353,7 +353,7 @@ static irqreturn_t vfe_isr(int irq, void *dev)
        writel_relaxed(status0, vfe->base + VFE_IRQ_CLEAR_0);
        writel_relaxed(status1, vfe->base + VFE_IRQ_CLEAR_1);
 
-       for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++) {
+       for (i = VFE_LINE_RDI0; i < vfe->line_num; i++) {
                vfe_bus_status[i] = readl_relaxed(vfe->base + VFE_BUS_IRQ_STATUS(i));
                writel_relaxed(vfe_bus_status[i], vfe->base + VFE_BUS_IRQ_CLEAR(i));
        }
@@ -367,11 +367,11 @@ static irqreturn_t vfe_isr(int irq, void *dev)
        if (status0 & STATUS_0_RESET_ACK)
                vfe->isr_ops.reset_ack(vfe);
 
-       for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
+       for (i = VFE_LINE_RDI0; i < vfe->line_num; i++)
                if (status0 & STATUS_0_RDI_REG_UPDATE(i))
                        vfe->isr_ops.reg_update(vfe, i);
 
-       for (i = VFE_LINE_RDI0; i <= VFE_LINE_RDI2; i++)
+       for (i = VFE_LINE_RDI0; i < vfe->line_num; i++)
                if (status0 & STATUS_1_RDI_SOF(i))
                        vfe->isr_ops.sof(vfe, i);
 
@@ -494,37 +494,6 @@ static int vfe_enable_output(struct vfe_line *line)
        return 0;
 }
 
-static int vfe_disable_output(struct vfe_line *line)
-{
-       struct vfe_device *vfe = to_vfe(line);
-       struct vfe_output *output = &line->output;
-       unsigned long flags;
-       unsigned int i;
-       bool done;
-       int timeout = 0;
-
-       do {
-               spin_lock_irqsave(&vfe->output_lock, flags);
-               done = !output->gen2.active_num;
-               spin_unlock_irqrestore(&vfe->output_lock, flags);
-               usleep_range(10000, 20000);
-
-               if (timeout++ == 100) {
-                       dev_err(vfe->camss->dev, "VFE idle timeout - resetting\n");
-                       vfe_reset(vfe);
-                       output->gen2.active_num = 0;
-                       return 0;
-               }
-       } while (!done);
-
-       spin_lock_irqsave(&vfe->output_lock, flags);
-       for (i = 0; i < output->wm_num; i++)
-               vfe_wm_stop(vfe, output->wm_idx[i]);
-       spin_unlock_irqrestore(&vfe->output_lock, flags);
-
-       return 0;
-}
-
 /*
  * vfe_enable - Enable streaming on VFE line
  * @line: VFE line
@@ -570,29 +539,6 @@ error_get_output:
        return ret;
 }
 
-/*
- * vfe_disable - Disable streaming on VFE line
- * @line: VFE line
- *
- * Return 0 on success or a negative error code otherwise
- */
-static int vfe_disable(struct vfe_line *line)
-{
-       struct vfe_device *vfe = to_vfe(line);
-
-       vfe_disable_output(line);
-
-       vfe_put_output(line);
-
-       mutex_lock(&vfe->stream_lock);
-
-       vfe->stream_count--;
-
-       mutex_unlock(&vfe->stream_lock);
-
-       return 0;
-}
-
 /*
  * vfe_isr_sof - Process start of frame interrupt
  * @vfe: VFE Device
@@ -689,7 +635,7 @@ static void vfe_pm_domain_off(struct vfe_device *vfe)
 {
        struct camss *camss = vfe->camss;
 
-       if (vfe->id >= camss->vfe_num)
+       if (vfe->id >= camss->res->vfe_num)
                return;
 
        device_link_del(camss->genpd_link[vfe->id]);
@@ -704,7 +650,7 @@ static int vfe_pm_domain_on(struct vfe_device *vfe)
        struct camss *camss = vfe->camss;
        enum vfe_line_id id = vfe->id;
 
-       if (id >= camss->vfe_num)
+       if (id >= camss->res->vfe_num)
                return 0;
 
        camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id],
@@ -769,8 +715,6 @@ static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
 {
        vfe->isr_ops = vfe_isr_ops_170;
        vfe->video_ops = vfe_video_ops_170;
-
-       vfe->line_num = VFE_LINE_NUM_GEN2;
 }
 
 const struct vfe_hw_ops vfe_ops_170 = {
@@ -787,4 +731,5 @@ const struct vfe_hw_ops vfe_ops_170 = {
        .vfe_enable = vfe_enable,
        .vfe_halt = vfe_halt,
        .violation_read = vfe_violation_read,
+       .vfe_wm_stop = vfe_wm_stop,
 };
index 42047b11ba529e97c0ff2cac1c54a7c64246ec93..2911e4126e7adb5a47fbd3591cd617aee2dfb7ed 100644 (file)
@@ -614,20 +614,20 @@ static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line)
        writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1);
 
        switch (line->fmt[MSM_VFE_PAD_SINK].code) {
-       case MEDIA_BUS_FMT_YUYV8_2X8:
+       case MEDIA_BUS_FMT_YUYV8_1X16:
                even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV;
                odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV;
                break;
-       case MEDIA_BUS_FMT_YVYU8_2X8:
+       case MEDIA_BUS_FMT_YVYU8_1X16:
                even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU;
                odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU;
                break;
-       case MEDIA_BUS_FMT_UYVY8_2X8:
+       case MEDIA_BUS_FMT_UYVY8_1X16:
        default:
                even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY;
                odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY;
                break;
-       case MEDIA_BUS_FMT_VYUY8_2X8:
+       case MEDIA_BUS_FMT_VYUY8_1X16:
                even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY;
                odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY;
                break;
@@ -775,17 +775,17 @@ static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line)
        u32 val;
 
        switch (line->fmt[MSM_VFE_PAD_SINK].code) {
-       case MEDIA_BUS_FMT_YUYV8_2X8:
+       case MEDIA_BUS_FMT_YUYV8_1X16:
                val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR;
                break;
-       case MEDIA_BUS_FMT_YVYU8_2X8:
+       case MEDIA_BUS_FMT_YVYU8_1X16:
                val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB;
                break;
-       case MEDIA_BUS_FMT_UYVY8_2X8:
+       case MEDIA_BUS_FMT_UYVY8_1X16:
        default:
                val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY;
                break;
-       case MEDIA_BUS_FMT_VYUY8_2X8:
+       case MEDIA_BUS_FMT_VYUY8_1X16:
                val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY;
                break;
        }
@@ -992,8 +992,6 @@ static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
        vfe->isr_ops = vfe_isr_ops_gen1;
        vfe->ops_gen1 = &vfe_ops_gen1_4_1;
        vfe->video_ops = vfe_video_ops_gen1;
-
-       vfe->line_num = VFE_LINE_NUM_GEN1;
 }
 
 const struct vfe_hw_ops vfe_ops_4_1 = {
index ab2d57bdf5e71c7e5de9ba029c901a071e2d6218..b65ed0fef595e8e6fb8914f0ffb6e00a23ce6b0d 100644 (file)
@@ -768,20 +768,20 @@ static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line)
        writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1);
 
        switch (line->fmt[MSM_VFE_PAD_SINK].code) {
-       case MEDIA_BUS_FMT_YUYV8_2X8:
+       case MEDIA_BUS_FMT_YUYV8_1X16:
                even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV;
                odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV;
                break;
-       case MEDIA_BUS_FMT_YVYU8_2X8:
+       case MEDIA_BUS_FMT_YVYU8_1X16:
                even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU;
                odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU;
                break;
-       case MEDIA_BUS_FMT_UYVY8_2X8:
+       case MEDIA_BUS_FMT_UYVY8_1X16:
        default:
                even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY;
                odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY;
                break;
-       case MEDIA_BUS_FMT_VYUY8_2X8:
+       case MEDIA_BUS_FMT_VYUY8_1X16:
                even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY;
                odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY;
                break;
@@ -941,17 +941,17 @@ static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line)
        u32 val;
 
        switch (line->fmt[MSM_VFE_PAD_SINK].code) {
-       case MEDIA_BUS_FMT_YUYV8_2X8:
+       case MEDIA_BUS_FMT_YUYV8_1X16:
                val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR;
                break;
-       case MEDIA_BUS_FMT_YVYU8_2X8:
+       case MEDIA_BUS_FMT_YVYU8_1X16:
                val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB;
                break;
-       case MEDIA_BUS_FMT_UYVY8_2X8:
+       case MEDIA_BUS_FMT_UYVY8_1X16:
        default:
                val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY;
                break;
-       case MEDIA_BUS_FMT_VYUY8_2X8:
+       case MEDIA_BUS_FMT_VYUY8_1X16:
                val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY;
                break;
        }
@@ -1188,8 +1188,6 @@ static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
        vfe->isr_ops = vfe_isr_ops_gen1;
        vfe->ops_gen1 = &vfe_ops_gen1_4_7;
        vfe->video_ops = vfe_video_ops_gen1;
-
-       vfe->line_num = VFE_LINE_NUM_GEN1;
 }
 
 const struct vfe_hw_ops vfe_ops_4_7 = {
index 7e6b62c930ac8abe23992ae781b6e8ca4536fcbb..7b3805177f037fe9a6bbb8ebc27355d3017f906c 100644 (file)
@@ -739,20 +739,20 @@ static void vfe_set_demux_cfg(struct vfe_device *vfe, struct vfe_line *line)
        writel_relaxed(val, vfe->base + VFE_0_DEMUX_GAIN_1);
 
        switch (line->fmt[MSM_VFE_PAD_SINK].code) {
-       case MEDIA_BUS_FMT_YUYV8_2X8:
+       case MEDIA_BUS_FMT_YUYV8_1X16:
                even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YUYV;
                odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YUYV;
                break;
-       case MEDIA_BUS_FMT_YVYU8_2X8:
+       case MEDIA_BUS_FMT_YVYU8_1X16:
                even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_YVYU;
                odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_YVYU;
                break;
-       case MEDIA_BUS_FMT_UYVY8_2X8:
+       case MEDIA_BUS_FMT_UYVY8_1X16:
        default:
                even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_UYVY;
                odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_UYVY;
                break;
-       case MEDIA_BUS_FMT_VYUY8_2X8:
+       case MEDIA_BUS_FMT_VYUY8_1X16:
                even_cfg = VFE_0_DEMUX_EVEN_CFG_PATTERN_VYUY;
                odd_cfg = VFE_0_DEMUX_ODD_CFG_PATTERN_VYUY;
                break;
@@ -873,17 +873,17 @@ static void vfe_set_camif_cfg(struct vfe_device *vfe, struct vfe_line *line)
        u32 val;
 
        switch (line->fmt[MSM_VFE_PAD_SINK].code) {
-       case MEDIA_BUS_FMT_YUYV8_2X8:
+       case MEDIA_BUS_FMT_YUYV8_1X16:
                val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCBYCR;
                break;
-       case MEDIA_BUS_FMT_YVYU8_2X8:
+       case MEDIA_BUS_FMT_YVYU8_1X16:
                val = VFE_0_CORE_CFG_PIXEL_PATTERN_YCRYCB;
                break;
-       case MEDIA_BUS_FMT_UYVY8_2X8:
+       case MEDIA_BUS_FMT_UYVY8_1X16:
        default:
                val = VFE_0_CORE_CFG_PIXEL_PATTERN_CBYCRY;
                break;
-       case MEDIA_BUS_FMT_VYUY8_2X8:
+       case MEDIA_BUS_FMT_VYUY8_1X16:
                val = VFE_0_CORE_CFG_PIXEL_PATTERN_CRYCBY;
                break;
        }
@@ -1173,8 +1173,6 @@ static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
        vfe->isr_ops = vfe_isr_ops_gen1;
        vfe->ops_gen1 = &vfe_ops_gen1_4_8;
        vfe->video_ops = vfe_video_ops_gen1;
-
-       vfe->line_num = VFE_LINE_NUM_GEN1;
 }
 
 const struct vfe_hw_ops vfe_ops_4_8 = {
index f70aad2e8c2378fb7e07b1e8e374a941c3e6a1b7..f2368b77fc6d6a0029b68483f8d17f9706666240 100644 (file)
@@ -8,7 +8,6 @@
  * Copyright (C) 2021 Jonathan Marek
  */
 
-#include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/iopoll.h>
@@ -328,37 +327,6 @@ static int vfe_enable_output(struct vfe_line *line)
        return 0;
 }
 
-static int vfe_disable_output(struct vfe_line *line)
-{
-       struct vfe_device *vfe = to_vfe(line);
-       struct vfe_output *output = &line->output;
-       unsigned long flags;
-       unsigned int i;
-       bool done;
-       int timeout = 0;
-
-       do {
-               spin_lock_irqsave(&vfe->output_lock, flags);
-               done = !output->gen2.active_num;
-               spin_unlock_irqrestore(&vfe->output_lock, flags);
-               usleep_range(10000, 20000);
-
-               if (timeout++ == 100) {
-                       dev_err(vfe->camss->dev, "VFE idle timeout - resetting\n");
-                       vfe_reset(vfe);
-                       output->gen2.active_num = 0;
-                       return 0;
-               }
-       } while (!done);
-
-       spin_lock_irqsave(&vfe->output_lock, flags);
-       for (i = 0; i < output->wm_num; i++)
-               vfe_wm_stop(vfe, output->wm_idx[i]);
-       spin_unlock_irqrestore(&vfe->output_lock, flags);
-
-       return 0;
-}
-
 /*
  * vfe_enable - Enable streaming on VFE line
  * @line: VFE line
@@ -406,29 +374,6 @@ error_get_output:
        return ret;
 }
 
-/*
- * vfe_disable - Disable streaming on VFE line
- * @line: VFE line
- *
- * Return 0 on success or a negative error code otherwise
- */
-static int vfe_disable(struct vfe_line *line)
-{
-       struct vfe_device *vfe = to_vfe(line);
-
-       vfe_disable_output(line);
-
-       vfe_put_output(line);
-
-       mutex_lock(&vfe->stream_lock);
-
-       vfe->stream_count--;
-
-       mutex_unlock(&vfe->stream_lock);
-
-       return 0;
-}
-
 /*
  * vfe_isr_reg_update - Process reg update interrupt
  * @vfe: VFE Device
@@ -515,7 +460,7 @@ static void vfe_pm_domain_off(struct vfe_device *vfe)
 {
        struct camss *camss = vfe->camss;
 
-       if (vfe->id >= camss->vfe_num)
+       if (vfe->id >= camss->res->vfe_num)
                return;
 
        device_link_del(camss->genpd_link[vfe->id]);
@@ -530,7 +475,7 @@ static int vfe_pm_domain_on(struct vfe_device *vfe)
        struct camss *camss = vfe->camss;
        enum vfe_line_id id = vfe->id;
 
-       if (id >= camss->vfe_num)
+       if (id >= camss->res->vfe_num)
                return 0;
 
        camss->genpd_link[id] = device_link_add(camss->dev, camss->genpd[id],
@@ -585,7 +530,6 @@ static const struct camss_video_ops vfe_video_ops_480 = {
 static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
 {
        vfe->video_ops = vfe_video_ops_480;
-       vfe->line_num = MAX_VFE_OUTPUT_LINES;
 }
 
 const struct vfe_hw_ops vfe_ops_480 = {
@@ -598,4 +542,5 @@ const struct vfe_hw_ops vfe_ops_480 = {
        .vfe_disable = vfe_disable,
        .vfe_enable = vfe_enable,
        .vfe_halt = vfe_halt,
+       .vfe_wm_stop = vfe_wm_stop,
 };
index 06c95568e5af4ee3eacdcb938719c02b9a7f1f13..4839e2cedfe584842202bf6fd46dd9de3abb9dc0 100644 (file)
@@ -37,10 +37,10 @@ struct vfe_format {
 };
 
 static const struct vfe_format formats_rdi_8x16[] = {
-       { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
-       { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
-       { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
-       { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+       { MEDIA_BUS_FMT_UYVY8_1X16, 8 },
+       { MEDIA_BUS_FMT_VYUY8_1X16, 8 },
+       { MEDIA_BUS_FMT_YUYV8_1X16, 8 },
+       { MEDIA_BUS_FMT_YVYU8_1X16, 8 },
        { MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
        { MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
        { MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
@@ -57,17 +57,17 @@ static const struct vfe_format formats_rdi_8x16[] = {
 };
 
 static const struct vfe_format formats_pix_8x16[] = {
-       { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
-       { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
-       { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
-       { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+       { MEDIA_BUS_FMT_UYVY8_1X16, 8 },
+       { MEDIA_BUS_FMT_VYUY8_1X16, 8 },
+       { MEDIA_BUS_FMT_YUYV8_1X16, 8 },
+       { MEDIA_BUS_FMT_YVYU8_1X16, 8 },
 };
 
 static const struct vfe_format formats_rdi_8x96[] = {
-       { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
-       { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
-       { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
-       { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+       { MEDIA_BUS_FMT_UYVY8_1X16, 8 },
+       { MEDIA_BUS_FMT_VYUY8_1X16, 8 },
+       { MEDIA_BUS_FMT_YUYV8_1X16, 8 },
+       { MEDIA_BUS_FMT_YVYU8_1X16, 8 },
        { MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
        { MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
        { MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
@@ -90,17 +90,17 @@ static const struct vfe_format formats_rdi_8x96[] = {
 };
 
 static const struct vfe_format formats_pix_8x96[] = {
-       { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
-       { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
-       { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
-       { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+       { MEDIA_BUS_FMT_UYVY8_1X16, 8 },
+       { MEDIA_BUS_FMT_VYUY8_1X16, 8 },
+       { MEDIA_BUS_FMT_YUYV8_1X16, 8 },
+       { MEDIA_BUS_FMT_YVYU8_1X16, 8 },
 };
 
 static const struct vfe_format formats_rdi_845[] = {
-       { MEDIA_BUS_FMT_UYVY8_2X8, 8 },
-       { MEDIA_BUS_FMT_VYUY8_2X8, 8 },
-       { MEDIA_BUS_FMT_YUYV8_2X8, 8 },
-       { MEDIA_BUS_FMT_YVYU8_2X8, 8 },
+       { MEDIA_BUS_FMT_UYVY8_1X16, 8 },
+       { MEDIA_BUS_FMT_VYUY8_1X16, 8 },
+       { MEDIA_BUS_FMT_YUYV8_1X16, 8 },
+       { MEDIA_BUS_FMT_YVYU8_1X16, 8 },
        { MEDIA_BUS_FMT_SBGGR8_1X8, 8 },
        { MEDIA_BUS_FMT_SGBRG8_1X8, 8 },
        { MEDIA_BUS_FMT_SGRBG8_1X8, 8 },
@@ -170,42 +170,43 @@ static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
 {
        struct vfe_device *vfe = to_vfe(line);
 
-       if (vfe->camss->version == CAMSS_8x16)
+       switch (vfe->camss->res->version) {
+       case CAMSS_8x16:
                switch (sink_code) {
-               case MEDIA_BUS_FMT_YUYV8_2X8:
+               case MEDIA_BUS_FMT_YUYV8_1X16:
                {
                        u32 src_code[] = {
-                               MEDIA_BUS_FMT_YUYV8_2X8,
+                               MEDIA_BUS_FMT_YUYV8_1X16,
                                MEDIA_BUS_FMT_YUYV8_1_5X8,
                        };
 
                        return vfe_find_code(src_code, ARRAY_SIZE(src_code),
                                             index, src_req_code);
                }
-               case MEDIA_BUS_FMT_YVYU8_2X8:
+               case MEDIA_BUS_FMT_YVYU8_1X16:
                {
                        u32 src_code[] = {
-                               MEDIA_BUS_FMT_YVYU8_2X8,
+                               MEDIA_BUS_FMT_YVYU8_1X16,
                                MEDIA_BUS_FMT_YVYU8_1_5X8,
                        };
 
                        return vfe_find_code(src_code, ARRAY_SIZE(src_code),
                                             index, src_req_code);
                }
-               case MEDIA_BUS_FMT_UYVY8_2X8:
+               case MEDIA_BUS_FMT_UYVY8_1X16:
                {
                        u32 src_code[] = {
-                               MEDIA_BUS_FMT_UYVY8_2X8,
+                               MEDIA_BUS_FMT_UYVY8_1X16,
                                MEDIA_BUS_FMT_UYVY8_1_5X8,
                        };
 
                        return vfe_find_code(src_code, ARRAY_SIZE(src_code),
                                             index, src_req_code);
                }
-               case MEDIA_BUS_FMT_VYUY8_2X8:
+               case MEDIA_BUS_FMT_VYUY8_1X16:
                {
                        u32 src_code[] = {
-                               MEDIA_BUS_FMT_VYUY8_2X8,
+                               MEDIA_BUS_FMT_VYUY8_1X16,
                                MEDIA_BUS_FMT_VYUY8_1_5X8,
                        };
 
@@ -218,57 +219,58 @@ static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
 
                        return sink_code;
                }
-       else if (vfe->camss->version == CAMSS_8x96 ||
-                vfe->camss->version == CAMSS_660 ||
-                vfe->camss->version == CAMSS_845 ||
-                vfe->camss->version == CAMSS_8250)
+               break;
+       case CAMSS_8x96:
+       case CAMSS_660:
+       case CAMSS_845:
+       case CAMSS_8250:
                switch (sink_code) {
-               case MEDIA_BUS_FMT_YUYV8_2X8:
+               case MEDIA_BUS_FMT_YUYV8_1X16:
                {
                        u32 src_code[] = {
-                               MEDIA_BUS_FMT_YUYV8_2X8,
-                               MEDIA_BUS_FMT_YVYU8_2X8,
-                               MEDIA_BUS_FMT_UYVY8_2X8,
-                               MEDIA_BUS_FMT_VYUY8_2X8,
+                               MEDIA_BUS_FMT_YUYV8_1X16,
+                               MEDIA_BUS_FMT_YVYU8_1X16,
+                               MEDIA_BUS_FMT_UYVY8_1X16,
+                               MEDIA_BUS_FMT_VYUY8_1X16,
                                MEDIA_BUS_FMT_YUYV8_1_5X8,
                        };
 
                        return vfe_find_code(src_code, ARRAY_SIZE(src_code),
                                             index, src_req_code);
                }
-               case MEDIA_BUS_FMT_YVYU8_2X8:
+               case MEDIA_BUS_FMT_YVYU8_1X16:
                {
                        u32 src_code[] = {
-                               MEDIA_BUS_FMT_YVYU8_2X8,
-                               MEDIA_BUS_FMT_YUYV8_2X8,
-                               MEDIA_BUS_FMT_UYVY8_2X8,
-                               MEDIA_BUS_FMT_VYUY8_2X8,
+                               MEDIA_BUS_FMT_YVYU8_1X16,
+                               MEDIA_BUS_FMT_YUYV8_1X16,
+                               MEDIA_BUS_FMT_UYVY8_1X16,
+                               MEDIA_BUS_FMT_VYUY8_1X16,
                                MEDIA_BUS_FMT_YVYU8_1_5X8,
                        };
 
                        return vfe_find_code(src_code, ARRAY_SIZE(src_code),
                                             index, src_req_code);
                }
-               case MEDIA_BUS_FMT_UYVY8_2X8:
+               case MEDIA_BUS_FMT_UYVY8_1X16:
                {
                        u32 src_code[] = {
-                               MEDIA_BUS_FMT_UYVY8_2X8,
-                               MEDIA_BUS_FMT_YUYV8_2X8,
-                               MEDIA_BUS_FMT_YVYU8_2X8,
-                               MEDIA_BUS_FMT_VYUY8_2X8,
+                               MEDIA_BUS_FMT_UYVY8_1X16,
+                               MEDIA_BUS_FMT_YUYV8_1X16,
+                               MEDIA_BUS_FMT_YVYU8_1X16,
+                               MEDIA_BUS_FMT_VYUY8_1X16,
                                MEDIA_BUS_FMT_UYVY8_1_5X8,
                        };
 
                        return vfe_find_code(src_code, ARRAY_SIZE(src_code),
                                             index, src_req_code);
                }
-               case MEDIA_BUS_FMT_VYUY8_2X8:
+               case MEDIA_BUS_FMT_VYUY8_1X16:
                {
                        u32 src_code[] = {
-                               MEDIA_BUS_FMT_VYUY8_2X8,
-                               MEDIA_BUS_FMT_YUYV8_2X8,
-                               MEDIA_BUS_FMT_YVYU8_2X8,
-                               MEDIA_BUS_FMT_UYVY8_2X8,
+                               MEDIA_BUS_FMT_VYUY8_1X16,
+                               MEDIA_BUS_FMT_YUYV8_1X16,
+                               MEDIA_BUS_FMT_YVYU8_1X16,
+                               MEDIA_BUS_FMT_UYVY8_1X16,
                                MEDIA_BUS_FMT_VYUY8_1_5X8,
                        };
 
@@ -281,8 +283,9 @@ static u32 vfe_src_pad_code(struct vfe_line *line, u32 sink_code,
 
                        return sink_code;
                }
-       else
-               return 0;
+               break;
+       }
+       return 0;
 }
 
 int vfe_reset(struct vfe_device *vfe)
@@ -407,6 +410,49 @@ int vfe_put_output(struct vfe_line *line)
        return 0;
 }
 
+static int vfe_disable_output(struct vfe_line *line)
+{
+       struct vfe_device *vfe = to_vfe(line);
+       struct vfe_output *output = &line->output;
+       unsigned long flags;
+       unsigned int i;
+
+       spin_lock_irqsave(&vfe->output_lock, flags);
+       for (i = 0; i < output->wm_num; i++)
+               vfe->ops->vfe_wm_stop(vfe, output->wm_idx[i]);
+       output->gen2.active_num = 0;
+       spin_unlock_irqrestore(&vfe->output_lock, flags);
+
+       return vfe_reset(vfe);
+}
+
+/*
+ * vfe_disable - Disable streaming on VFE line
+ * @line: VFE line
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int vfe_disable(struct vfe_line *line)
+{
+       struct vfe_device *vfe = to_vfe(line);
+       int ret;
+
+       ret = vfe_disable_output(line);
+       if (ret)
+               goto error;
+
+       vfe_put_output(line);
+
+       mutex_lock(&vfe->stream_lock);
+
+       vfe->stream_count--;
+
+       mutex_unlock(&vfe->stream_lock);
+
+error:
+       return ret;
+}
+
 /**
  * vfe_isr_comp_done() - Process composite image done interrupt
  * @vfe: VFE Device
@@ -428,6 +474,20 @@ void vfe_isr_reset_ack(struct vfe_device *vfe)
        complete(&vfe->reset_complete);
 }
 
+static int vfe_match_clock_names(struct vfe_device *vfe,
+                                struct camss_clock *clock)
+{
+       char vfe_name[7]; /* vfeXXX\0 */
+       char vfe_lite_name[12]; /* vfe_liteXXX\0 */
+
+       snprintf(vfe_name, sizeof(vfe_name), "vfe%d", vfe->id);
+       snprintf(vfe_lite_name, sizeof(vfe_lite_name), "vfe_lite%d", vfe->id);
+
+       return (!strcmp(clock->name, vfe_name) ||
+               !strcmp(clock->name, vfe_lite_name) ||
+               !strcmp(clock->name, "vfe_lite"));
+}
+
 /*
  * vfe_set_clock_rates - Calculate and set clock rates on VFE module
  * @vfe: VFE device
@@ -451,9 +511,7 @@ static int vfe_set_clock_rates(struct vfe_device *vfe)
        for (i = 0; i < vfe->nclocks; i++) {
                struct camss_clock *clock = &vfe->clock[i];
 
-               if (!strcmp(clock->name, "vfe0") ||
-                   !strcmp(clock->name, "vfe1") ||
-                   !strcmp(clock->name, "vfe_lite")) {
+               if (vfe_match_clock_names(vfe, clock)) {
                        u64 min_rate = 0;
                        long rate;
 
@@ -534,8 +592,7 @@ static int vfe_check_clock_rates(struct vfe_device *vfe)
        for (i = 0; i < vfe->nclocks; i++) {
                struct camss_clock *clock = &vfe->clock[i];
 
-               if (!strcmp(clock->name, "vfe0") ||
-                   !strcmp(clock->name, "vfe1")) {
+               if (vfe_match_clock_names(vfe, clock)) {
                        u64 min_rate = 0;
                        unsigned long rate;
 
@@ -611,7 +668,7 @@ int vfe_get(struct vfe_device *vfe)
        } else {
                ret = vfe_check_clock_rates(vfe);
                if (ret < 0)
-                       goto error_pm_runtime_get;
+                       goto error_pm_domain;
        }
        vfe->power_count++;
 
@@ -844,7 +901,7 @@ static void vfe_try_format(struct vfe_line *line,
 
                /* If not found, use UYVY as default */
                if (i >= line->nformats)
-                       fmt->code = MEDIA_BUS_FMT_UYVY8_2X8;
+                       fmt->code = MEDIA_BUS_FMT_UYVY8_1X16;
 
                fmt->width = clamp_t(u32, fmt->width, 1, 8191);
                fmt->height = clamp_t(u32, fmt->height, 1, 8191);
@@ -1261,7 +1318,7 @@ static int vfe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
                .which = fh ? V4L2_SUBDEV_FORMAT_TRY :
                              V4L2_SUBDEV_FORMAT_ACTIVE,
                .format = {
-                       .code = MEDIA_BUS_FMT_UYVY8_2X8,
+                       .code = MEDIA_BUS_FMT_UYVY8_1X16,
                        .width = 1920,
                        .height = 1080
                }
@@ -1278,32 +1335,19 @@ static int vfe_init_formats(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
  * Return 0 on success or a negative error code otherwise
  */
 int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
-                       const struct resources *res, u8 id)
+                       const struct camss_subdev_resources *res, u8 id)
 {
        struct device *dev = camss->dev;
        struct platform_device *pdev = to_platform_device(dev);
        int i, j;
        int ret;
 
-       switch (camss->version) {
-       case CAMSS_8x16:
-               vfe->ops = &vfe_ops_4_1;
-               break;
-       case CAMSS_8x96:
-               vfe->ops = &vfe_ops_4_7;
-               break;
-       case CAMSS_660:
-               vfe->ops = &vfe_ops_4_8;
-               break;
-       case CAMSS_845:
-               vfe->ops = &vfe_ops_170;
-               break;
-       case CAMSS_8250:
-               vfe->ops = &vfe_ops_480;
-               break;
-       default:
+       vfe->ops = res->ops;
+
+       if (!res->line_num)
                return -EINVAL;
-       }
+
+       vfe->line_num = res->line_num;
        vfe->ops->subdev_init(dev, vfe);
 
        /* Memory */
@@ -1391,7 +1435,8 @@ int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
                init_completion(&l->output.sof);
                init_completion(&l->output.reg_update);
 
-               if (camss->version == CAMSS_8x16) {
+               switch (camss->res->version) {
+               case CAMSS_8x16:
                        if (i == VFE_LINE_PIX) {
                                l->formats = formats_pix_8x16;
                                l->nformats = ARRAY_SIZE(formats_pix_8x16);
@@ -1399,8 +1444,9 @@ int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
                                l->formats = formats_rdi_8x16;
                                l->nformats = ARRAY_SIZE(formats_rdi_8x16);
                        }
-               } else if (camss->version == CAMSS_8x96 ||
-                          camss->version == CAMSS_660) {
+                       break;
+               case CAMSS_8x96:
+               case CAMSS_660:
                        if (i == VFE_LINE_PIX) {
                                l->formats = formats_pix_8x96;
                                l->nformats = ARRAY_SIZE(formats_pix_8x96);
@@ -1408,12 +1454,12 @@ int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
                                l->formats = formats_rdi_8x96;
                                l->nformats = ARRAY_SIZE(formats_rdi_8x96);
                        }
-               } else if (camss->version == CAMSS_845 ||
-                          camss->version == CAMSS_8250) {
+                       break;
+               case CAMSS_845:
+               case CAMSS_8250:
                        l->formats = formats_rdi_845;
                        l->nformats = ARRAY_SIZE(formats_rdi_845);
-               } else {
-                       return -EINVAL;
+                       break;
                }
        }
 
@@ -1541,8 +1587,8 @@ int msm_vfe_register_entities(struct vfe_device *vfe,
                }
 
                video_out->ops = &vfe->video_ops;
-               if (vfe->camss->version == CAMSS_845 ||
-                   vfe->camss->version == CAMSS_8250)
+               if (vfe->camss->res->version == CAMSS_845 ||
+                   vfe->camss->res->version == CAMSS_8250)
                        video_out->bpl_alignment = 16;
                else
                        video_out->bpl_alignment = 8;
index cbc314c4e244b4a20e4dae4b6f100fbfb4ae472c..09baded0dcdd63ef7e56c0bf7b2873d4bcdf2119 100644 (file)
@@ -52,9 +52,7 @@ enum vfe_line_id {
        VFE_LINE_RDI0 = 0,
        VFE_LINE_RDI1 = 1,
        VFE_LINE_RDI2 = 2,
-       VFE_LINE_NUM_GEN2 = 3,
        VFE_LINE_PIX = 3,
-       VFE_LINE_NUM_GEN1 = 4,
        VFE_LINE_NUM_MAX = 4
 };
 
@@ -116,6 +114,7 @@ struct vfe_hw_ops {
        int (*vfe_enable)(struct vfe_line *line);
        int (*vfe_halt)(struct vfe_device *vfe);
        void (*violation_read)(struct vfe_device *vfe);
+       void (*vfe_wm_stop)(struct vfe_device *vfe, u8 wm);
 };
 
 struct vfe_isr_ops {
@@ -153,10 +152,10 @@ struct vfe_device {
        struct camss_video_ops video_ops;
 };
 
-struct resources;
+struct camss_subdev_resources;
 
 int msm_vfe_subdev_init(struct camss *camss, struct vfe_device *vfe,
-                       const struct resources *res, u8 id);
+                       const struct camss_subdev_resources *res, u8 id);
 
 int msm_vfe_register_entities(struct vfe_device *vfe,
                              struct v4l2_device *v4l2_dev);
@@ -194,6 +193,14 @@ int vfe_reserve_wm(struct vfe_device *vfe, enum vfe_line_id line_id);
  */
 int vfe_reset(struct vfe_device *vfe);
 
+/*
+ * vfe_disable - Disable streaming on VFE line
+ * @line: VFE line
+ *
+ * Return 0 on success or a negative error code otherwise
+ */
+int vfe_disable(struct vfe_line *line);
+
 extern const struct vfe_hw_ops vfe_ops_4_1;
 extern const struct vfe_hw_ops vfe_ops_4_7;
 extern const struct vfe_hw_ops vfe_ops_4_8;
index 8640db306026804270e82cb4642f9592f51b5c0b..a89da5ef47109a3dc8d7f298392489350023db86 100644 (file)
@@ -48,13 +48,13 @@ struct camss_format_info {
 };
 
 static const struct camss_format_info formats_rdi_8x16[] = {
-       { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1,
+       { MEDIA_BUS_FMT_UYVY8_1X16, V4L2_PIX_FMT_UYVY, 1,
          { { 1, 1 } }, { { 1, 1 } }, { 16 } },
-       { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1,
+       { MEDIA_BUS_FMT_VYUY8_1X16, V4L2_PIX_FMT_VYUY, 1,
          { { 1, 1 } }, { { 1, 1 } }, { 16 } },
-       { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1,
+       { MEDIA_BUS_FMT_YUYV8_1X16, V4L2_PIX_FMT_YUYV, 1,
          { { 1, 1 } }, { { 1, 1 } }, { 16 } },
-       { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1,
+       { MEDIA_BUS_FMT_YVYU8_1X16, V4L2_PIX_FMT_YVYU, 1,
          { { 1, 1 } }, { { 1, 1 } }, { 16 } },
        { MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR8, 1,
          { { 1, 1 } }, { { 1, 1 } }, { 8 } },
@@ -85,13 +85,13 @@ static const struct camss_format_info formats_rdi_8x16[] = {
 };
 
 static const struct camss_format_info formats_rdi_8x96[] = {
-       { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1,
+       { MEDIA_BUS_FMT_UYVY8_1X16, V4L2_PIX_FMT_UYVY, 1,
          { { 1, 1 } }, { { 1, 1 } }, { 16 } },
-       { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1,
+       { MEDIA_BUS_FMT_VYUY8_1X16, V4L2_PIX_FMT_VYUY, 1,
          { { 1, 1 } }, { { 1, 1 } }, { 16 } },
-       { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1,
+       { MEDIA_BUS_FMT_YUYV8_1X16, V4L2_PIX_FMT_YUYV, 1,
          { { 1, 1 } }, { { 1, 1 } }, { 16 } },
-       { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1,
+       { MEDIA_BUS_FMT_YVYU8_1X16, V4L2_PIX_FMT_YVYU, 1,
          { { 1, 1 } }, { { 1, 1 } }, { 16 } },
        { MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR8, 1,
          { { 1, 1 } }, { { 1, 1 } }, { 8 } },
@@ -134,13 +134,13 @@ static const struct camss_format_info formats_rdi_8x96[] = {
 };
 
 static const struct camss_format_info formats_rdi_845[] = {
-       { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1,
+       { MEDIA_BUS_FMT_UYVY8_1X16, V4L2_PIX_FMT_UYVY, 1,
          { { 1, 1 } }, { { 1, 1 } }, { 16 } },
-       { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1,
+       { MEDIA_BUS_FMT_VYUY8_1X16, V4L2_PIX_FMT_VYUY, 1,
          { { 1, 1 } }, { { 1, 1 } }, { 16 } },
-       { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1,
+       { MEDIA_BUS_FMT_YUYV8_1X16, V4L2_PIX_FMT_YUYV, 1,
          { { 1, 1 } }, { { 1, 1 } }, { 16 } },
-       { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1,
+       { MEDIA_BUS_FMT_YVYU8_1X16, V4L2_PIX_FMT_YVYU, 1,
          { { 1, 1 } }, { { 1, 1 } }, { 16 } },
        { MEDIA_BUS_FMT_SBGGR8_1X8, V4L2_PIX_FMT_SBGGR8, 1,
          { { 1, 1 } }, { { 1, 1 } }, { 8 } },
@@ -201,21 +201,21 @@ static const struct camss_format_info formats_pix_8x16[] = {
          { { 1, 1 } }, { { 2, 3 } }, { 8 } },
        { MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV21, 1,
          { { 1, 1 } }, { { 2, 3 } }, { 8 } },
-       { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV16, 1,
+       { MEDIA_BUS_FMT_YUYV8_1X16, V4L2_PIX_FMT_NV16, 1,
          { { 1, 1 } }, { { 1, 2 } }, { 8 } },
-       { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV16, 1,
+       { MEDIA_BUS_FMT_YVYU8_1X16, V4L2_PIX_FMT_NV16, 1,
          { { 1, 1 } }, { { 1, 2 } }, { 8 } },
-       { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV16, 1,
+       { MEDIA_BUS_FMT_UYVY8_1X16, V4L2_PIX_FMT_NV16, 1,
          { { 1, 1 } }, { { 1, 2 } }, { 8 } },
-       { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV16, 1,
+       { MEDIA_BUS_FMT_VYUY8_1X16, V4L2_PIX_FMT_NV16, 1,
          { { 1, 1 } }, { { 1, 2 } }, { 8 } },
-       { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV61, 1,
+       { MEDIA_BUS_FMT_YUYV8_1X16, V4L2_PIX_FMT_NV61, 1,
          { { 1, 1 } }, { { 1, 2 } }, { 8 } },
-       { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV61, 1,
+       { MEDIA_BUS_FMT_YVYU8_1X16, V4L2_PIX_FMT_NV61, 1,
          { { 1, 1 } }, { { 1, 2 } }, { 8 } },
-       { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV61, 1,
+       { MEDIA_BUS_FMT_UYVY8_1X16, V4L2_PIX_FMT_NV61, 1,
          { { 1, 1 } }, { { 1, 2 } }, { 8 } },
-       { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV61, 1,
+       { MEDIA_BUS_FMT_VYUY8_1X16, V4L2_PIX_FMT_NV61, 1,
          { { 1, 1 } }, { { 1, 2 } }, { 8 } },
 };
 
@@ -236,29 +236,29 @@ static const struct camss_format_info formats_pix_8x96[] = {
          { { 1, 1 } }, { { 2, 3 } }, { 8 } },
        { MEDIA_BUS_FMT_VYUY8_1_5X8, V4L2_PIX_FMT_NV21, 1,
          { { 1, 1 } }, { { 2, 3 } }, { 8 } },
-       { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV16, 1,
+       { MEDIA_BUS_FMT_YUYV8_1X16, V4L2_PIX_FMT_NV16, 1,
          { { 1, 1 } }, { { 1, 2 } }, { 8 } },
-       { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV16, 1,
+       { MEDIA_BUS_FMT_YVYU8_1X16, V4L2_PIX_FMT_NV16, 1,
          { { 1, 1 } }, { { 1, 2 } }, { 8 } },
-       { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV16, 1,
+       { MEDIA_BUS_FMT_UYVY8_1X16, V4L2_PIX_FMT_NV16, 1,
          { { 1, 1 } }, { { 1, 2 } }, { 8 } },
-       { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV16, 1,
+       { MEDIA_BUS_FMT_VYUY8_1X16, V4L2_PIX_FMT_NV16, 1,
          { { 1, 1 } }, { { 1, 2 } }, { 8 } },
-       { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_NV61, 1,
+       { MEDIA_BUS_FMT_YUYV8_1X16, V4L2_PIX_FMT_NV61, 1,
          { { 1, 1 } }, { { 1, 2 } }, { 8 } },
-       { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_NV61, 1,
+       { MEDIA_BUS_FMT_YVYU8_1X16, V4L2_PIX_FMT_NV61, 1,
          { { 1, 1 } }, { { 1, 2 } }, { 8 } },
-       { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_NV61, 1,
+       { MEDIA_BUS_FMT_UYVY8_1X16, V4L2_PIX_FMT_NV61, 1,
          { { 1, 1 } }, { { 1, 2 } }, { 8 } },
-       { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_NV61, 1,
+       { MEDIA_BUS_FMT_VYUY8_1X16, V4L2_PIX_FMT_NV61, 1,
          { { 1, 1 } }, { { 1, 2 } }, { 8 } },
-       { MEDIA_BUS_FMT_UYVY8_2X8, V4L2_PIX_FMT_UYVY, 1,
+       { MEDIA_BUS_FMT_UYVY8_1X16, V4L2_PIX_FMT_UYVY, 1,
          { { 1, 1 } }, { { 1, 1 } }, { 16 } },
-       { MEDIA_BUS_FMT_VYUY8_2X8, V4L2_PIX_FMT_VYUY, 1,
+       { MEDIA_BUS_FMT_VYUY8_1X16, V4L2_PIX_FMT_VYUY, 1,
          { { 1, 1 } }, { { 1, 1 } }, { 16 } },
-       { MEDIA_BUS_FMT_YUYV8_2X8, V4L2_PIX_FMT_YUYV, 1,
+       { MEDIA_BUS_FMT_YUYV8_1X16, V4L2_PIX_FMT_YUYV, 1,
          { { 1, 1 } }, { { 1, 1 } }, { 16 } },
-       { MEDIA_BUS_FMT_YVYU8_2X8, V4L2_PIX_FMT_YVYU, 1,
+       { MEDIA_BUS_FMT_YVYU8_1X16, V4L2_PIX_FMT_YVYU, 1,
          { { 1, 1 } }, { { 1, 1 } }, { 16 } },
 };
 
@@ -1006,7 +1006,8 @@ int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
 
        mutex_init(&video->lock);
 
-       if (video->camss->version == CAMSS_8x16) {
+       switch (video->camss->res->version) {
+       case CAMSS_8x16:
                if (is_pix) {
                        video->formats = formats_pix_8x16;
                        video->nformats = ARRAY_SIZE(formats_pix_8x16);
@@ -1014,8 +1015,9 @@ int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
                        video->formats = formats_rdi_8x16;
                        video->nformats = ARRAY_SIZE(formats_rdi_8x16);
                }
-       } else if (video->camss->version == CAMSS_8x96 ||
-                  video->camss->version == CAMSS_660) {
+               break;
+       case CAMSS_8x96:
+       case CAMSS_660:
                if (is_pix) {
                        video->formats = formats_pix_8x96;
                        video->nformats = ARRAY_SIZE(formats_pix_8x96);
@@ -1023,13 +1025,12 @@ int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
                        video->formats = formats_rdi_8x96;
                        video->nformats = ARRAY_SIZE(formats_rdi_8x96);
                }
-       }  else if (video->camss->version == CAMSS_845 ||
-                   video->camss->version == CAMSS_8250) {
+               break;
+       case CAMSS_845:
+       case CAMSS_8250:
                video->formats = formats_rdi_845;
                video->nformats = ARRAY_SIZE(formats_rdi_845);
-       } else {
-               ret = -EINVAL;
-               goto error_video_register;
+               break;
        }
 
        ret = msm_video_init_format(video);
index f11dc59135a5acd83ae686b065e95b341067e95e..8e78dd8d5961e0976c4de556a38eb398c5382b75 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/of_graph.h>
 #include <linux/pm_runtime.h>
 #include <linux/pm_domain.h>
@@ -31,7 +32,7 @@
 #define CAMSS_CLOCK_MARGIN_NUMERATOR 105
 #define CAMSS_CLOCK_MARGIN_DENOMINATOR 100
 
-static const struct resources csiphy_res_8x16[] = {
+static const struct camss_subdev_resources csiphy_res_8x16[] = {
        /* CSIPHY0 */
        {
                .regulators = {},
@@ -41,7 +42,8 @@ static const struct resources csiphy_res_8x16[] = {
                                { 0 },
                                { 100000000, 200000000 } },
                .reg = { "csiphy0", "csiphy0_clk_mux" },
-               .interrupt = { "csiphy0" }
+               .interrupt = { "csiphy0" },
+               .ops = &csiphy_ops_2ph_1_0
        },
 
        /* CSIPHY1 */
@@ -53,11 +55,12 @@ static const struct resources csiphy_res_8x16[] = {
                                { 0 },
                                { 100000000, 200000000 } },
                .reg = { "csiphy1", "csiphy1_clk_mux" },
-               .interrupt = { "csiphy1" }
+               .interrupt = { "csiphy1" },
+               .ops = &csiphy_ops_2ph_1_0
        }
 };
 
-static const struct resources csid_res_8x16[] = {
+static const struct camss_subdev_resources csid_res_8x16[] = {
        /* CSID0 */
        {
                .regulators = { "vdda" },
@@ -72,7 +75,8 @@ static const struct resources csid_res_8x16[] = {
                                { 0 },
                                { 0 } },
                .reg = { "csid0" },
-               .interrupt = { "csid0" }
+               .interrupt = { "csid0" },
+               .ops = &csid_ops_4_1,
        },
 
        /* CSID1 */
@@ -89,22 +93,23 @@ static const struct resources csid_res_8x16[] = {
                                { 0 },
                                { 0 } },
                .reg = { "csid1" },
-               .interrupt = { "csid1" }
+               .interrupt = { "csid1" },
+               .ops = &csid_ops_4_1,
        },
 };
 
-static const struct resources_ispif ispif_res_8x16 = {
+static const struct camss_subdev_resources ispif_res_8x16 = {
        /* ISPIF */
        .clock = { "top_ahb", "ahb", "ispif_ahb",
                   "csi0", "csi0_pix", "csi0_rdi",
                   "csi1", "csi1_pix", "csi1_rdi" },
        .clock_for_reset = { "vfe0", "csi_vfe0" },
        .reg = { "ispif", "csi_clk_mux" },
-       .interrupt = "ispif"
+       .interrupt = { "ispif" }
 
 };
 
-static const struct resources vfe_res_8x16[] = {
+static const struct camss_subdev_resources vfe_res_8x16[] = {
        /* VFE0 */
        {
                .regulators = {},
@@ -122,11 +127,13 @@ static const struct resources vfe_res_8x16[] = {
                                { 0 },
                                { 0 } },
                .reg = { "vfe0" },
-               .interrupt = { "vfe0" }
+               .interrupt = { "vfe0" },
+               .line_num = 3,
+               .ops = &vfe_ops_4_1
        }
 };
 
-static const struct resources csiphy_res_8x96[] = {
+static const struct camss_subdev_resources csiphy_res_8x96[] = {
        /* CSIPHY0 */
        {
                .regulators = {},
@@ -136,7 +143,8 @@ static const struct resources csiphy_res_8x96[] = {
                                { 0 },
                                { 100000000, 200000000, 266666667 } },
                .reg = { "csiphy0", "csiphy0_clk_mux" },
-               .interrupt = { "csiphy0" }
+               .interrupt = { "csiphy0" },
+               .ops = &csiphy_ops_3ph_1_0
        },
 
        /* CSIPHY1 */
@@ -148,7 +156,8 @@ static const struct resources csiphy_res_8x96[] = {
                                { 0 },
                                { 100000000, 200000000, 266666667 } },
                .reg = { "csiphy1", "csiphy1_clk_mux" },
-               .interrupt = { "csiphy1" }
+               .interrupt = { "csiphy1" },
+               .ops = &csiphy_ops_3ph_1_0
        },
 
        /* CSIPHY2 */
@@ -160,11 +169,12 @@ static const struct resources csiphy_res_8x96[] = {
                                { 0 },
                                { 100000000, 200000000, 266666667 } },
                .reg = { "csiphy2", "csiphy2_clk_mux" },
-               .interrupt = { "csiphy2" }
+               .interrupt = { "csiphy2" },
+               .ops = &csiphy_ops_3ph_1_0
        }
 };
 
-static const struct resources csid_res_8x96[] = {
+static const struct camss_subdev_resources csid_res_8x96[] = {
        /* CSID0 */
        {
                .regulators = { "vdda" },
@@ -179,7 +189,8 @@ static const struct resources csid_res_8x96[] = {
                                { 0 },
                                { 0 } },
                .reg = { "csid0" },
-               .interrupt = { "csid0" }
+               .interrupt = { "csid0" },
+               .ops = &csid_ops_4_7,
        },
 
        /* CSID1 */
@@ -196,7 +207,8 @@ static const struct resources csid_res_8x96[] = {
                                { 0 },
                                { 0 } },
                .reg = { "csid1" },
-               .interrupt = { "csid1" }
+               .interrupt = { "csid1" },
+               .ops = &csid_ops_4_7,
        },
 
        /* CSID2 */
@@ -213,7 +225,8 @@ static const struct resources csid_res_8x96[] = {
                                { 0 },
                                { 0 } },
                .reg = { "csid2" },
-               .interrupt = { "csid2" }
+               .interrupt = { "csid2" },
+               .ops = &csid_ops_4_7,
        },
 
        /* CSID3 */
@@ -230,11 +243,12 @@ static const struct resources csid_res_8x96[] = {
                                { 0 },
                                { 0 } },
                .reg = { "csid3" },
-               .interrupt = { "csid3" }
+               .interrupt = { "csid3" },
+               .ops = &csid_ops_4_7,
        }
 };
 
-static const struct resources_ispif ispif_res_8x96 = {
+static const struct camss_subdev_resources ispif_res_8x96 = {
        /* ISPIF */
        .clock = { "top_ahb", "ahb", "ispif_ahb",
                   "csi0", "csi0_pix", "csi0_rdi",
@@ -243,10 +257,10 @@ static const struct resources_ispif ispif_res_8x96 = {
                   "csi3", "csi3_pix", "csi3_rdi" },
        .clock_for_reset = { "vfe0", "csi_vfe0", "vfe1", "csi_vfe1" },
        .reg = { "ispif", "csi_clk_mux" },
-       .interrupt = "ispif"
+       .interrupt = { "ispif" }
 };
 
-static const struct resources vfe_res_8x96[] = {
+static const struct camss_subdev_resources vfe_res_8x96[] = {
        /* VFE0 */
        {
                .regulators = {},
@@ -262,7 +276,9 @@ static const struct resources vfe_res_8x96[] = {
                                { 0 },
                                { 0 } },
                .reg = { "vfe0" },
-               .interrupt = { "vfe0" }
+               .interrupt = { "vfe0" },
+               .line_num = 3,
+               .ops = &vfe_ops_4_7
        },
 
        /* VFE1 */
@@ -280,11 +296,13 @@ static const struct resources vfe_res_8x96[] = {
                                { 0 },
                                { 0 } },
                .reg = { "vfe1" },
-               .interrupt = { "vfe1" }
+               .interrupt = { "vfe1" },
+               .line_num = 3,
+               .ops = &vfe_ops_4_7
        }
 };
 
-static const struct resources csiphy_res_660[] = {
+static const struct camss_subdev_resources csiphy_res_660[] = {
        /* CSIPHY0 */
        {
                .regulators = {},
@@ -296,7 +314,8 @@ static const struct resources csiphy_res_660[] = {
                                { 100000000, 200000000, 269333333 },
                                { 0 } },
                .reg = { "csiphy0", "csiphy0_clk_mux" },
-               .interrupt = { "csiphy0" }
+               .interrupt = { "csiphy0" },
+               .ops = &csiphy_ops_3ph_1_0
        },
 
        /* CSIPHY1 */
@@ -310,7 +329,8 @@ static const struct resources csiphy_res_660[] = {
                                { 100000000, 200000000, 269333333 },
                                { 0 } },
                .reg = { "csiphy1", "csiphy1_clk_mux" },
-               .interrupt = { "csiphy1" }
+               .interrupt = { "csiphy1" },
+               .ops = &csiphy_ops_3ph_1_0
        },
 
        /* CSIPHY2 */
@@ -324,11 +344,12 @@ static const struct resources csiphy_res_660[] = {
                                { 100000000, 200000000, 269333333 },
                                { 0 } },
                .reg = { "csiphy2", "csiphy2_clk_mux" },
-               .interrupt = { "csiphy2" }
+               .interrupt = { "csiphy2" },
+               .ops = &csiphy_ops_3ph_1_0
        }
 };
 
-static const struct resources csid_res_660[] = {
+static const struct camss_subdev_resources csid_res_660[] = {
        /* CSID0 */
        {
                .regulators = { "vdda", "vdd_sec" },
@@ -346,7 +367,8 @@ static const struct resources csid_res_660[] = {
                                { 0 },
                                { 0 } },
                .reg = { "csid0" },
-               .interrupt = { "csid0" }
+               .interrupt = { "csid0" },
+               .ops = &csid_ops_4_7,
        },
 
        /* CSID1 */
@@ -366,7 +388,8 @@ static const struct resources csid_res_660[] = {
                                { 0 },
                                { 0 } },
                .reg = { "csid1" },
-               .interrupt = { "csid1" }
+               .interrupt = { "csid1" },
+               .ops = &csid_ops_4_7,
        },
 
        /* CSID2 */
@@ -386,7 +409,8 @@ static const struct resources csid_res_660[] = {
                                { 0 },
                                { 0 } },
                .reg = { "csid2" },
-               .interrupt = { "csid2" }
+               .interrupt = { "csid2" },
+               .ops = &csid_ops_4_7,
        },
 
        /* CSID3 */
@@ -406,11 +430,12 @@ static const struct resources csid_res_660[] = {
                                { 0 },
                                { 0 } },
                .reg = { "csid3" },
-               .interrupt = { "csid3" }
+               .interrupt = { "csid3" },
+               .ops = &csid_ops_4_7,
        }
 };
 
-static const struct resources_ispif ispif_res_660 = {
+static const struct camss_subdev_resources ispif_res_660 = {
        /* ISPIF */
        .clock = { "top_ahb", "ahb", "ispif_ahb",
                   "csi0", "csi0_pix", "csi0_rdi",
@@ -419,10 +444,10 @@ static const struct resources_ispif ispif_res_660 = {
                   "csi3", "csi3_pix", "csi3_rdi" },
        .clock_for_reset = { "vfe0", "csi_vfe0", "vfe1", "csi_vfe1" },
        .reg = { "ispif", "csi_clk_mux" },
-       .interrupt = "ispif"
+       .interrupt = { "ispif" }
 };
 
-static const struct resources vfe_res_660[] = {
+static const struct camss_subdev_resources vfe_res_660[] = {
        /* VFE0 */
        {
                .regulators = {},
@@ -441,7 +466,9 @@ static const struct resources vfe_res_660[] = {
                                { 0 },
                                { 0 } },
                .reg = { "vfe0" },
-               .interrupt = { "vfe0" }
+               .interrupt = { "vfe0" },
+               .line_num = 3,
+               .ops = &vfe_ops_4_8
        },
 
        /* VFE1 */
@@ -462,11 +489,13 @@ static const struct resources vfe_res_660[] = {
                                { 0 },
                                { 0 } },
                .reg = { "vfe1" },
-               .interrupt = { "vfe1" }
+               .interrupt = { "vfe1" },
+               .line_num = 3,
+               .ops = &vfe_ops_4_8
        }
 };
 
-static const struct resources csiphy_res_845[] = {
+static const struct camss_subdev_resources csiphy_res_845[] = {
        /* CSIPHY0 */
        {
                .regulators = {},
@@ -482,7 +511,8 @@ static const struct resources csiphy_res_845[] = {
                                { 0 },
                                { 19200000, 240000000, 269333333 } },
                .reg = { "csiphy0" },
-               .interrupt = { "csiphy0" }
+               .interrupt = { "csiphy0" },
+               .ops = &csiphy_ops_3ph_1_0
        },
 
        /* CSIPHY1 */
@@ -500,7 +530,8 @@ static const struct resources csiphy_res_845[] = {
                                { 0 },
                                { 19200000, 240000000, 269333333 } },
                .reg = { "csiphy1" },
-               .interrupt = { "csiphy1" }
+               .interrupt = { "csiphy1" },
+               .ops = &csiphy_ops_3ph_1_0
        },
 
        /* CSIPHY2 */
@@ -518,7 +549,8 @@ static const struct resources csiphy_res_845[] = {
                                { 0 },
                                { 19200000, 240000000, 269333333 } },
                .reg = { "csiphy2" },
-               .interrupt = { "csiphy2" }
+               .interrupt = { "csiphy2" },
+               .ops = &csiphy_ops_3ph_1_0
        },
 
        /* CSIPHY3 */
@@ -536,11 +568,12 @@ static const struct resources csiphy_res_845[] = {
                                { 0 },
                                { 19200000, 240000000, 269333333 } },
                .reg = { "csiphy3" },
-               .interrupt = { "csiphy3" }
+               .interrupt = { "csiphy3" },
+               .ops = &csiphy_ops_3ph_1_0
        }
 };
 
-static const struct resources csid_res_845[] = {
+static const struct camss_subdev_resources csid_res_845[] = {
        /* CSID0 */
        {
                .regulators = { "vdda-phy", "vdda-pll" },
@@ -558,7 +591,8 @@ static const struct resources csid_res_845[] = {
                                { 19200000, 75000000, 384000000, 538666667 },
                                { 384000000 } },
                .reg = { "csid0" },
-               .interrupt = { "csid0" }
+               .interrupt = { "csid0" },
+               .ops = &csid_ops_gen2
        },
 
        /* CSID1 */
@@ -578,7 +612,8 @@ static const struct resources csid_res_845[] = {
                                { 19200000, 75000000, 384000000, 538666667 },
                                { 384000000 } },
                .reg = { "csid1" },
-               .interrupt = { "csid1" }
+               .interrupt = { "csid1" },
+               .ops = &csid_ops_gen2
        },
 
        /* CSID2 */
@@ -598,11 +633,12 @@ static const struct resources csid_res_845[] = {
                                { 19200000, 75000000, 384000000, 538666667 },
                                { 384000000 } },
                .reg = { "csid2" },
-               .interrupt = { "csid2" }
+               .interrupt = { "csid2" },
+               .ops = &csid_ops_gen2
        }
 };
 
-static const struct resources vfe_res_845[] = {
+static const struct camss_subdev_resources vfe_res_845[] = {
        /* VFE0 */
        {
                .regulators = {},
@@ -620,7 +656,9 @@ static const struct resources vfe_res_845[] = {
                                { 19200000, 75000000, 384000000, 538666667 },
                                { 384000000 } },
                .reg = { "vfe0" },
-               .interrupt = { "vfe0" }
+               .interrupt = { "vfe0" },
+               .line_num = 4,
+               .ops = &vfe_ops_170
        },
 
        /* VFE1 */
@@ -640,7 +678,9 @@ static const struct resources vfe_res_845[] = {
                                { 19200000, 75000000, 384000000, 538666667 },
                                { 384000000 } },
                .reg = { "vfe1" },
-               .interrupt = { "vfe1" }
+               .interrupt = { "vfe1" },
+               .line_num = 4,
+               .ops = &vfe_ops_170
        },
 
        /* VFE-lite */
@@ -659,11 +699,13 @@ static const struct resources vfe_res_845[] = {
                                { 19200000, 75000000, 384000000, 538666667 },
                                { 384000000 } },
                .reg = { "vfe_lite" },
-               .interrupt = { "vfe_lite" }
+               .interrupt = { "vfe_lite" },
+               .line_num = 4,
+               .ops = &vfe_ops_170
        }
 };
 
-static const struct resources csiphy_res_8250[] = {
+static const struct camss_subdev_resources csiphy_res_8250[] = {
        /* CSIPHY0 */
        {
                .regulators = {},
@@ -671,7 +713,8 @@ static const struct resources csiphy_res_8250[] = {
                .clock_rate = { { 400000000 },
                                { 300000000 } },
                .reg = { "csiphy0" },
-               .interrupt = { "csiphy0" }
+               .interrupt = { "csiphy0" },
+               .ops = &csiphy_ops_3ph_1_0
        },
        /* CSIPHY1 */
        {
@@ -680,7 +723,8 @@ static const struct resources csiphy_res_8250[] = {
                .clock_rate = { { 400000000 },
                                { 300000000 } },
                .reg = { "csiphy1" },
-               .interrupt = { "csiphy1" }
+               .interrupt = { "csiphy1" },
+               .ops = &csiphy_ops_3ph_1_0
        },
        /* CSIPHY2 */
        {
@@ -689,7 +733,8 @@ static const struct resources csiphy_res_8250[] = {
                .clock_rate = { { 400000000 },
                                { 300000000 } },
                .reg = { "csiphy2" },
-               .interrupt = { "csiphy2" }
+               .interrupt = { "csiphy2" },
+               .ops = &csiphy_ops_3ph_1_0
        },
        /* CSIPHY3 */
        {
@@ -698,7 +743,8 @@ static const struct resources csiphy_res_8250[] = {
                .clock_rate = { { 400000000 },
                                { 300000000 } },
                .reg = { "csiphy3" },
-               .interrupt = { "csiphy3" }
+               .interrupt = { "csiphy3" },
+               .ops = &csiphy_ops_3ph_1_0
        },
        /* CSIPHY4 */
        {
@@ -707,7 +753,8 @@ static const struct resources csiphy_res_8250[] = {
                .clock_rate = { { 400000000 },
                                { 300000000 } },
                .reg = { "csiphy4" },
-               .interrupt = { "csiphy4" }
+               .interrupt = { "csiphy4" },
+               .ops = &csiphy_ops_3ph_1_0
        },
        /* CSIPHY5 */
        {
@@ -716,11 +763,12 @@ static const struct resources csiphy_res_8250[] = {
                .clock_rate = { { 400000000 },
                                { 300000000 } },
                .reg = { "csiphy5" },
-               .interrupt = { "csiphy5" }
+               .interrupt = { "csiphy5" },
+               .ops = &csiphy_ops_3ph_1_0
        }
 };
 
-static const struct resources csid_res_8250[] = {
+static const struct camss_subdev_resources csid_res_8250[] = {
        /* CSID0 */
        {
                .regulators = { "vdda-phy", "vdda-pll" },
@@ -731,7 +779,8 @@ static const struct resources csid_res_8250[] = {
                                { 100000000, 200000000, 300000000, 400000000 },
                                { 0 } },
                .reg = { "csid0" },
-               .interrupt = { "csid0" }
+               .interrupt = { "csid0" },
+               .ops = &csid_ops_gen2
        },
        /* CSID1 */
        {
@@ -743,7 +792,8 @@ static const struct resources csid_res_8250[] = {
                                { 100000000, 200000000, 300000000, 400000000 },
                                { 0 } },
                .reg = { "csid1" },
-               .interrupt = { "csid1" }
+               .interrupt = { "csid1" },
+               .ops = &csid_ops_gen2
        },
        /* CSID2 */
        {
@@ -754,7 +804,8 @@ static const struct resources csid_res_8250[] = {
                                { 400000000, 480000000 },
                                { 0 } },
                .reg = { "csid2" },
-               .interrupt = { "csid2" }
+               .interrupt = { "csid2" },
+               .ops = &csid_ops_gen2
        },
        /* CSID3 */
        {
@@ -765,11 +816,12 @@ static const struct resources csid_res_8250[] = {
                                { 400000000, 480000000 },
                                { 0 } },
                .reg = { "csid3" },
-               .interrupt = { "csid3" }
+               .interrupt = { "csid3" },
+               .ops = &csid_ops_gen2
        }
 };
 
-static const struct resources vfe_res_8250[] = {
+static const struct camss_subdev_resources vfe_res_8250[] = {
        /* VFE0 */
        {
                .regulators = {},
@@ -786,7 +838,9 @@ static const struct resources vfe_res_8250[] = {
                                { 0 },
                                { 0 } },
                .reg = { "vfe0" },
-               .interrupt = { "vfe0" }
+               .interrupt = { "vfe0" },
+               .line_num = 3,
+               .ops = &vfe_ops_480
        },
        /* VFE1 */
        {
@@ -804,7 +858,9 @@ static const struct resources vfe_res_8250[] = {
                                { 0 },
                                { 0 } },
                .reg = { "vfe1" },
-               .interrupt = { "vfe1" }
+               .interrupt = { "vfe1" },
+               .line_num = 3,
+               .ops = &vfe_ops_480
        },
        /* VFE2 (lite) */
        {
@@ -821,7 +877,9 @@ static const struct resources vfe_res_8250[] = {
                                { 400000000, 480000000 },
                                { 0 } },
                .reg = { "vfe_lite0" },
-               .interrupt = { "vfe_lite0" }
+               .interrupt = { "vfe_lite0" },
+               .line_num = 4,
+               .ops = &vfe_ops_480
        },
        /* VFE3 (lite) */
        {
@@ -838,7 +896,9 @@ static const struct resources vfe_res_8250[] = {
                                { 400000000, 480000000 },
                                { 0 } },
                .reg = { "vfe_lite1" },
-               .interrupt = { "vfe_lite1" }
+               .interrupt = { "vfe_lite1" },
+               .line_num = 4,
+               .ops = &vfe_ops_480
        },
 };
 
@@ -1004,7 +1064,7 @@ int camss_pm_domain_on(struct camss *camss, int id)
 {
        int ret = 0;
 
-       if (id < camss->vfe_num) {
+       if (id < camss->res->vfe_num) {
                struct vfe_device *vfe = &camss->vfe[id];
 
                ret = vfe->ops->pm_domain_on(vfe);
@@ -1015,7 +1075,7 @@ int camss_pm_domain_on(struct camss *camss, int id)
 
 void camss_pm_domain_off(struct camss *camss, int id)
 {
-       if (id < camss->vfe_num) {
+       if (id < camss->res->vfe_num) {
                struct vfe_device *vfe = &camss->vfe[id];
 
                vfe->ops->pm_domain_off(vfe);
@@ -1120,47 +1180,13 @@ err_cleanup:
  */
 static int camss_init_subdevices(struct camss *camss)
 {
-       const struct resources *csiphy_res;
-       const struct resources *csid_res;
-       const struct resources_ispif *ispif_res;
-       const struct resources *vfe_res;
+       const struct camss_resources *res = camss->res;
        unsigned int i;
        int ret;
 
-       if (camss->version == CAMSS_8x16) {
-               csiphy_res = csiphy_res_8x16;
-               csid_res = csid_res_8x16;
-               ispif_res = &ispif_res_8x16;
-               vfe_res = vfe_res_8x16;
-       } else if (camss->version == CAMSS_8x96) {
-               csiphy_res = csiphy_res_8x96;
-               csid_res = csid_res_8x96;
-               ispif_res = &ispif_res_8x96;
-               vfe_res = vfe_res_8x96;
-       } else if (camss->version == CAMSS_660) {
-               csiphy_res = csiphy_res_660;
-               csid_res = csid_res_660;
-               ispif_res = &ispif_res_660;
-               vfe_res = vfe_res_660;
-       }  else if (camss->version == CAMSS_845) {
-               csiphy_res = csiphy_res_845;
-               csid_res = csid_res_845;
-               /* Titan VFEs don't have an ISPIF  */
-               ispif_res = NULL;
-               vfe_res = vfe_res_845;
-       } else if (camss->version == CAMSS_8250) {
-               csiphy_res = csiphy_res_8250;
-               csid_res = csid_res_8250;
-               /* Titan VFEs don't have an ISPIF  */
-               ispif_res = NULL;
-               vfe_res = vfe_res_8250;
-       } else {
-               return -EINVAL;
-       }
-
-       for (i = 0; i < camss->csiphy_num; i++) {
+       for (i = 0; i < camss->res->csiphy_num; i++) {
                ret = msm_csiphy_subdev_init(camss, &camss->csiphy[i],
-                                            &csiphy_res[i], i);
+                                            &res->csiphy_res[i], i);
                if (ret < 0) {
                        dev_err(camss->dev,
                                "Failed to init csiphy%d sub-device: %d\n",
@@ -1170,9 +1196,9 @@ static int camss_init_subdevices(struct camss *camss)
        }
 
        /* note: SM8250 requires VFE to be initialized before CSID */
-       for (i = 0; i < camss->vfe_num + camss->vfe_lite_num; i++) {
+       for (i = 0; i < camss->vfe_total_num; i++) {
                ret = msm_vfe_subdev_init(camss, &camss->vfe[i],
-                                         &vfe_res[i], i);
+                                         &res->vfe_res[i], i);
                if (ret < 0) {
                        dev_err(camss->dev,
                                "Fail to init vfe%d sub-device: %d\n", i, ret);
@@ -1180,9 +1206,9 @@ static int camss_init_subdevices(struct camss *camss)
                }
        }
 
-       for (i = 0; i < camss->csid_num; i++) {
+       for (i = 0; i < camss->res->csid_num; i++) {
                ret = msm_csid_subdev_init(camss, &camss->csid[i],
-                                          &csid_res[i], i);
+                                          &res->csid_res[i], i);
                if (ret < 0) {
                        dev_err(camss->dev,
                                "Failed to init csid%d sub-device: %d\n",
@@ -1191,7 +1217,7 @@ static int camss_init_subdevices(struct camss *camss)
                }
        }
 
-       ret = msm_ispif_subdev_init(camss, ispif_res);
+       ret = msm_ispif_subdev_init(camss, res->ispif_res);
        if (ret < 0) {
                dev_err(camss->dev, "Failed to init ispif sub-device: %d\n",
                ret);
@@ -1212,7 +1238,7 @@ static int camss_register_entities(struct camss *camss)
        int i, j, k;
        int ret;
 
-       for (i = 0; i < camss->csiphy_num; i++) {
+       for (i = 0; i < camss->res->csiphy_num; i++) {
                ret = msm_csiphy_register_entity(&camss->csiphy[i],
                                                 &camss->v4l2_dev);
                if (ret < 0) {
@@ -1223,7 +1249,7 @@ static int camss_register_entities(struct camss *camss)
                }
        }
 
-       for (i = 0; i < camss->csid_num; i++) {
+       for (i = 0; i < camss->res->csid_num; i++) {
                ret = msm_csid_register_entity(&camss->csid[i],
                                               &camss->v4l2_dev);
                if (ret < 0) {
@@ -1242,7 +1268,7 @@ static int camss_register_entities(struct camss *camss)
                goto err_reg_ispif;
        }
 
-       for (i = 0; i < camss->vfe_num + camss->vfe_lite_num; i++) {
+       for (i = 0; i < camss->vfe_total_num; i++) {
                ret = msm_vfe_register_entities(&camss->vfe[i],
                                                &camss->v4l2_dev);
                if (ret < 0) {
@@ -1253,8 +1279,8 @@ static int camss_register_entities(struct camss *camss)
                }
        }
 
-       for (i = 0; i < camss->csiphy_num; i++) {
-               for (j = 0; j < camss->csid_num; j++) {
+       for (i = 0; i < camss->res->csiphy_num; i++) {
+               for (j = 0; j < camss->res->csid_num; j++) {
                        ret = media_create_pad_link(
                                &camss->csiphy[i].subdev.entity,
                                MSM_CSIPHY_PAD_SRC,
@@ -1273,7 +1299,7 @@ static int camss_register_entities(struct camss *camss)
        }
 
        if (camss->ispif) {
-               for (i = 0; i < camss->csid_num; i++) {
+               for (i = 0; i < camss->res->csid_num; i++) {
                        for (j = 0; j < camss->ispif->line_num; j++) {
                                ret = media_create_pad_link(
                                        &camss->csid[i].subdev.entity,
@@ -1293,7 +1319,7 @@ static int camss_register_entities(struct camss *camss)
                }
 
                for (i = 0; i < camss->ispif->line_num; i++)
-                       for (k = 0; k < camss->vfe_num; k++)
+                       for (k = 0; k < camss->res->vfe_num; k++)
                                for (j = 0; j < camss->vfe[k].line_num; j++) {
                                        struct v4l2_subdev *ispif = &camss->ispif->line[i].subdev;
                                        struct v4l2_subdev *vfe = &camss->vfe[k].line[j].subdev;
@@ -1313,8 +1339,8 @@ static int camss_register_entities(struct camss *camss)
                                        }
                                }
        } else {
-               for (i = 0; i < camss->csid_num; i++)
-                       for (k = 0; k < camss->vfe_num + camss->vfe_lite_num; k++)
+               for (i = 0; i < camss->res->csid_num; i++)
+                       for (k = 0; k < camss->vfe_total_num; k++)
                                for (j = 0; j < camss->vfe[k].line_num; j++) {
                                        struct v4l2_subdev *csid = &camss->csid[i].subdev;
                                        struct v4l2_subdev *vfe = &camss->vfe[k].line[j].subdev;
@@ -1338,7 +1364,7 @@ static int camss_register_entities(struct camss *camss)
        return 0;
 
 err_link:
-       i = camss->vfe_num + camss->vfe_lite_num;
+       i = camss->vfe_total_num;
 err_reg_vfe:
        for (i--; i >= 0; i--)
                msm_vfe_unregister_entities(&camss->vfe[i]);
@@ -1346,12 +1372,12 @@ err_reg_vfe:
 err_reg_ispif:
        msm_ispif_unregister_entities(camss->ispif);
 
-       i = camss->csid_num;
+       i = camss->res->csid_num;
 err_reg_csid:
        for (i--; i >= 0; i--)
                msm_csid_unregister_entity(&camss->csid[i]);
 
-       i = camss->csiphy_num;
+       i = camss->res->csiphy_num;
 err_reg_csiphy:
        for (i--; i >= 0; i--)
                msm_csiphy_unregister_entity(&camss->csiphy[i]);
@@ -1369,15 +1395,15 @@ static void camss_unregister_entities(struct camss *camss)
 {
        unsigned int i;
 
-       for (i = 0; i < camss->csiphy_num; i++)
+       for (i = 0; i < camss->res->csiphy_num; i++)
                msm_csiphy_unregister_entity(&camss->csiphy[i]);
 
-       for (i = 0; i < camss->csid_num; i++)
+       for (i = 0; i < camss->res->csid_num; i++)
                msm_csid_unregister_entity(&camss->csid[i]);
 
        msm_ispif_unregister_entities(camss->ispif);
 
-       for (i = 0; i < camss->vfe_num + camss->vfe_lite_num; i++)
+       for (i = 0; i < camss->vfe_total_num; i++)
                msm_vfe_unregister_entities(&camss->vfe[i]);
 }
 
@@ -1496,7 +1522,7 @@ static int camss_configure_pd(struct camss *camss)
                }
        }
 
-       if (i > camss->vfe_num) {
+       if (i > camss->res->vfe_num) {
                camss->genpd_link[i - 1] = device_link_add(camss->dev, camss->genpd[i - 1],
                                                           DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME |
                                                           DL_FLAG_RPM_ACTIVE);
@@ -1518,26 +1544,34 @@ fail_pm:
 static int camss_icc_get(struct camss *camss)
 {
        const struct resources_icc *icc_res;
-       int nbr_icc_paths = 0;
        int i;
 
-       if (camss->version == CAMSS_8250) {
-               icc_res = &icc_res_sm8250[0];
-               nbr_icc_paths = ICC_SM8250_COUNT;
-       }
+       icc_res = camss->res->icc_res;
 
-       for (i = 0; i < nbr_icc_paths; i++) {
+       for (i = 0; i < camss->res->icc_path_num; i++) {
                camss->icc_path[i] = devm_of_icc_get(camss->dev,
                                                     icc_res[i].name);
                if (IS_ERR(camss->icc_path[i]))
                        return PTR_ERR(camss->icc_path[i]);
-
-               camss->icc_bw_tbl[i] = icc_res[i].icc_bw_tbl;
        }
 
        return 0;
 }
 
+static void camss_genpd_cleanup(struct camss *camss)
+{
+       int i;
+
+       if (camss->genpd_num == 1)
+               return;
+
+       if (camss->genpd_num > camss->res->vfe_num)
+               device_link_del(camss->genpd_link[camss->genpd_num - 1]);
+
+       for (i = 0; i < camss->genpd_num; i++)
+               dev_pm_domain_detach(camss->genpd[i], true);
+}
+
 /*
  * camss_probe - Probe CAMSS platform device
  * @pdev: Pointer to CAMSS platform device
@@ -1548,84 +1582,59 @@ static int camss_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct camss *camss;
-       int num_subdevs, ret;
+       int num_subdevs;
+       int ret;
 
        camss = devm_kzalloc(dev, sizeof(*camss), GFP_KERNEL);
        if (!camss)
                return -ENOMEM;
 
+       camss->res = of_device_get_match_data(dev);
+
        atomic_set(&camss->ref_count, 0);
        camss->dev = dev;
        platform_set_drvdata(pdev, camss);
 
-       if (of_device_is_compatible(dev->of_node, "qcom,msm8916-camss")) {
-               camss->version = CAMSS_8x16;
-               camss->csiphy_num = 2;
-               camss->csid_num = 2;
-               camss->vfe_num = 1;
-       } else if (of_device_is_compatible(dev->of_node,
-                                          "qcom,msm8996-camss")) {
-               camss->version = CAMSS_8x96;
-               camss->csiphy_num = 3;
-               camss->csid_num = 4;
-               camss->vfe_num = 2;
-       } else if (of_device_is_compatible(dev->of_node,
-                                          "qcom,sdm660-camss")) {
-               camss->version = CAMSS_660;
-               camss->csiphy_num = 3;
-               camss->csid_num = 4;
-               camss->vfe_num = 2;
-       } else if (of_device_is_compatible(dev->of_node,
-                                          "qcom,sdm845-camss")) {
-               camss->version = CAMSS_845;
-               camss->csiphy_num = 4;
-               camss->csid_num = 3;
-               camss->vfe_num = 2;
-               camss->vfe_lite_num = 1;
-       } else if (of_device_is_compatible(dev->of_node,
-                                          "qcom,sm8250-camss")) {
-               camss->version = CAMSS_8250;
-               camss->csiphy_num = 6;
-               camss->csid_num = 4;
-               camss->vfe_num = 2;
-               camss->vfe_lite_num = 2;
-       } else {
-               return -EINVAL;
-       }
-
-       camss->csiphy = devm_kcalloc(dev, camss->csiphy_num,
+       camss->csiphy = devm_kcalloc(dev, camss->res->csiphy_num,
                                     sizeof(*camss->csiphy), GFP_KERNEL);
        if (!camss->csiphy)
                return -ENOMEM;
 
-       camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid),
+       camss->csid = devm_kcalloc(dev, camss->res->csid_num, sizeof(*camss->csid),
                                   GFP_KERNEL);
        if (!camss->csid)
                return -ENOMEM;
 
-       if (camss->version == CAMSS_8x16 ||
-           camss->version == CAMSS_8x96) {
+       if (camss->res->version == CAMSS_8x16 ||
+           camss->res->version == CAMSS_8x96) {
                camss->ispif = devm_kcalloc(dev, 1, sizeof(*camss->ispif), GFP_KERNEL);
                if (!camss->ispif)
                        return -ENOMEM;
        }
 
-       camss->vfe = devm_kcalloc(dev, camss->vfe_num + camss->vfe_lite_num,
+       camss->vfe_total_num = camss->res->vfe_num + camss->res->vfe_lite_num;
+       camss->vfe = devm_kcalloc(dev, camss->vfe_total_num,
                                  sizeof(*camss->vfe), GFP_KERNEL);
        if (!camss->vfe)
                return -ENOMEM;
 
        ret = camss_icc_get(camss);
        if (ret < 0)
-               goto err_cleanup;
+               return ret;
+
+       ret = camss_configure_pd(camss);
+       if (ret < 0) {
+               dev_err(dev, "Failed to configure power domains: %d\n", ret);
+               return ret;
+       }
 
        ret = camss_init_subdevices(camss);
        if (ret < 0)
-               goto err_cleanup;
+               goto err_genpd_cleanup;
 
        ret = dma_set_mask_and_coherent(dev, 0xffffffff);
        if (ret)
-               goto err_cleanup;
+               goto err_genpd_cleanup;
 
        camss->media_dev.dev = camss->dev;
        strscpy(camss->media_dev.model, "Qualcomm Camera Subsystem",
@@ -1637,7 +1646,7 @@ static int camss_probe(struct platform_device *pdev)
        ret = v4l2_device_register(camss->dev, &camss->v4l2_dev);
        if (ret < 0) {
                dev_err(dev, "Failed to register V4L2 device: %d\n", ret);
-               goto err_cleanup;
+               goto err_genpd_cleanup;
        }
 
        v4l2_async_nf_init(&camss->notifier, &camss->v4l2_dev);
@@ -1645,12 +1654,12 @@ static int camss_probe(struct platform_device *pdev)
        num_subdevs = camss_of_parse_ports(camss);
        if (num_subdevs < 0) {
                ret = num_subdevs;
-               goto err_cleanup;
+               goto err_v4l2_device_unregister;
        }
 
        ret = camss_register_entities(camss);
        if (ret < 0)
-               goto err_cleanup;
+               goto err_v4l2_device_unregister;
 
        if (num_subdevs) {
                camss->notifier.ops = &camss_subdev_notifier_ops;
@@ -1678,43 +1687,28 @@ static int camss_probe(struct platform_device *pdev)
                }
        }
 
-       ret = camss_configure_pd(camss);
-       if (ret < 0) {
-               dev_err(dev, "Failed to configure power domains: %d\n", ret);
-               return ret;
-       }
-
        pm_runtime_enable(dev);
 
        return 0;
 
 err_register_subdevs:
        camss_unregister_entities(camss);
-err_cleanup:
+err_v4l2_device_unregister:
        v4l2_device_unregister(&camss->v4l2_dev);
        v4l2_async_nf_cleanup(&camss->notifier);
+err_genpd_cleanup:
+       camss_genpd_cleanup(camss);
 
        return ret;
 }
 
 void camss_delete(struct camss *camss)
 {
-       int i;
-
        v4l2_device_unregister(&camss->v4l2_dev);
        media_device_unregister(&camss->media_dev);
        media_device_cleanup(&camss->media_dev);
 
        pm_runtime_disable(camss->dev);
-
-       if (camss->genpd_num == 1)
-               return;
-
-       if (camss->genpd_num > camss->vfe_num)
-               device_link_del(camss->genpd_link[camss->genpd_num - 1]);
-
-       for (i = 0; i < camss->genpd_num; i++)
-               dev_pm_domain_detach(camss->genpd[i], true);
 }
 
 /*
@@ -1733,14 +1727,73 @@ static void camss_remove(struct platform_device *pdev)
 
        if (atomic_read(&camss->ref_count) == 0)
                camss_delete(camss);
+
+       camss_genpd_cleanup(camss);
 }
 
+static const struct camss_resources msm8916_resources = {
+       .version = CAMSS_8x16,
+       .csiphy_res = csiphy_res_8x16,
+       .csid_res = csid_res_8x16,
+       .ispif_res = &ispif_res_8x16,
+       .vfe_res = vfe_res_8x16,
+       .csiphy_num = ARRAY_SIZE(csiphy_res_8x16),
+       .csid_num = ARRAY_SIZE(csid_res_8x16),
+       .vfe_num = ARRAY_SIZE(vfe_res_8x16),
+};
+
+static const struct camss_resources msm8996_resources = {
+       .version = CAMSS_8x96,
+       .csiphy_res = csiphy_res_8x96,
+       .csid_res = csid_res_8x96,
+       .ispif_res = &ispif_res_8x96,
+       .vfe_res = vfe_res_8x96,
+       .csiphy_num = ARRAY_SIZE(csiphy_res_8x96),
+       .csid_num = ARRAY_SIZE(csid_res_8x96),
+       .vfe_num = ARRAY_SIZE(vfe_res_8x96),
+};
+
+static const struct camss_resources sdm660_resources = {
+       .version = CAMSS_660,
+       .csiphy_res = csiphy_res_660,
+       .csid_res = csid_res_660,
+       .ispif_res = &ispif_res_660,
+       .vfe_res = vfe_res_660,
+       .csiphy_num = ARRAY_SIZE(csiphy_res_660),
+       .csid_num = ARRAY_SIZE(csid_res_660),
+       .vfe_num = ARRAY_SIZE(vfe_res_660),
+};
+
+static const struct camss_resources sdm845_resources = {
+       .version = CAMSS_845,
+       .csiphy_res = csiphy_res_845,
+       .csid_res = csid_res_845,
+       .vfe_res = vfe_res_845,
+       .csiphy_num = ARRAY_SIZE(csiphy_res_845),
+       .csid_num = ARRAY_SIZE(csid_res_845),
+       .vfe_num = 2,
+       .vfe_lite_num = 1,
+};
+
+static const struct camss_resources sm8250_resources = {
+       .version = CAMSS_8250,
+       .csiphy_res = csiphy_res_8250,
+       .csid_res = csid_res_8250,
+       .vfe_res = vfe_res_8250,
+       .icc_res = icc_res_sm8250,
+       .icc_path_num = ARRAY_SIZE(icc_res_sm8250),
+       .csiphy_num = ARRAY_SIZE(csiphy_res_8250),
+       .csid_num = ARRAY_SIZE(csid_res_8250),
+       .vfe_num = 2,
+       .vfe_lite_num = 2,
+};
+
 static const struct of_device_id camss_dt_match[] = {
-       { .compatible = "qcom,msm8916-camss" },
-       { .compatible = "qcom,msm8996-camss" },
-       { .compatible = "qcom,sdm660-camss" },
-       { .compatible = "qcom,sdm845-camss" },
-       { .compatible = "qcom,sm8250-camss" },
+       { .compatible = "qcom,msm8916-camss", .data = &msm8916_resources },
+       { .compatible = "qcom,msm8996-camss", .data = &msm8996_resources },
+       { .compatible = "qcom,sdm660-camss", .data = &sdm660_resources },
+       { .compatible = "qcom,sdm845-camss", .data = &sdm845_resources },
+       { .compatible = "qcom,sm8250-camss", .data = &sm8250_resources },
        { }
 };
 
@@ -1749,14 +1802,10 @@ MODULE_DEVICE_TABLE(of, camss_dt_match);
 static int __maybe_unused camss_runtime_suspend(struct device *dev)
 {
        struct camss *camss = dev_get_drvdata(dev);
-       int nbr_icc_paths = 0;
        int i;
        int ret;
 
-       if (camss->version == CAMSS_8250)
-               nbr_icc_paths = ICC_SM8250_COUNT;
-
-       for (i = 0; i < nbr_icc_paths; i++) {
+       for (i = 0; i < camss->res->icc_path_num; i++) {
                ret = icc_set_bw(camss->icc_path[i], 0, 0);
                if (ret)
                        return ret;
@@ -1768,17 +1817,14 @@ static int __maybe_unused camss_runtime_suspend(struct device *dev)
 static int __maybe_unused camss_runtime_resume(struct device *dev)
 {
        struct camss *camss = dev_get_drvdata(dev);
-       int nbr_icc_paths = 0;
+       const struct resources_icc *icc_res = camss->res->icc_res;
        int i;
        int ret;
 
-       if (camss->version == CAMSS_8250)
-               nbr_icc_paths = ICC_SM8250_COUNT;
-
-       for (i = 0; i < nbr_icc_paths; i++) {
+       for (i = 0; i < camss->res->icc_path_num; i++) {
                ret = icc_set_bw(camss->icc_path[i],
-                                camss->icc_bw_tbl[i].avg,
-                                camss->icc_bw_tbl[i].peak);
+                                icc_res[i].icc_bw_tbl.avg,
+                                icc_res[i].icc_bw_tbl.peak);
                if (ret)
                        return ret;
        }
index f6c326cb853b85ef6b5d557079844520a226a5c1..8acad7321c09d0b5c77256a949313fe386a1de96 100644 (file)
 
 #define CAMSS_RES_MAX 17
 
-struct resources {
+struct camss_subdev_resources {
        char *regulators[CAMSS_RES_MAX];
        char *clock[CAMSS_RES_MAX];
+       char *clock_for_reset[CAMSS_RES_MAX];
        u32 clock_rate[CAMSS_RES_MAX][CAMSS_RES_MAX];
        char *reg[CAMSS_RES_MAX];
        char *interrupt[CAMSS_RES_MAX];
-};
-
-struct resources_ispif {
-       char *clock[CAMSS_RES_MAX];
-       char *clock_for_reset[CAMSS_RES_MAX];
-       char *reg[CAMSS_RES_MAX];
-       char *interrupt;
+       u8 line_num;
+       const void *ops;
 };
 
 struct icc_bw_tbl {
@@ -85,26 +81,36 @@ enum icc_count {
        ICC_SM8250_COUNT = 4,
 };
 
-struct camss {
+struct camss_resources {
        enum camss_version version;
+       const struct camss_subdev_resources *csiphy_res;
+       const struct camss_subdev_resources *csid_res;
+       const struct camss_subdev_resources *ispif_res;
+       const struct camss_subdev_resources *vfe_res;
+       const struct resources_icc *icc_res;
+       const unsigned int icc_path_num;
+       const unsigned int csiphy_num;
+       const unsigned int csid_num;
+       const unsigned int vfe_num;
+       const unsigned int vfe_lite_num;
+};
+
+struct camss {
        struct v4l2_device v4l2_dev;
        struct v4l2_async_notifier notifier;
        struct media_device media_dev;
        struct device *dev;
-       int csiphy_num;
        struct csiphy_device *csiphy;
-       int csid_num;
        struct csid_device *csid;
        struct ispif_device *ispif;
-       int vfe_num;
-       int vfe_lite_num;
        struct vfe_device *vfe;
        atomic_t ref_count;
        int genpd_num;
        struct device **genpd;
        struct device_link **genpd_link;
        struct icc_path *icc_path[ICC_SM8250_COUNT];
-       struct icc_bw_tbl icc_bw_tbl[ICC_SM8250_COUNT];
+       const struct camss_resources *res;
+       unsigned int vfe_total_num;
 };
 
 struct camss_camera_interface {
index 054b8e74ba4f51d80852d9fec4b151aff3702340..9cffe975581b722cf35e6e837611f7d1042e8766 100644 (file)
@@ -549,7 +549,7 @@ static const struct venus_resources msm8916_res = {
        .vmem_size = 0,
        .vmem_addr = 0,
        .dma_mask = 0xddc00000 - 1,
-       .fwname = "qcom/venus-1.8/venus.mdt",
+       .fwname = "qcom/venus-1.8/venus.mbn",
 };
 
 static const struct freq_tbl msm8996_freq_table[] = {
@@ -582,7 +582,7 @@ static const struct venus_resources msm8996_res = {
        .vmem_size = 0,
        .vmem_addr = 0,
        .dma_mask = 0xddc00000 - 1,
-       .fwname = "qcom/venus-4.2/venus.mdt",
+       .fwname = "qcom/venus-4.2/venus.mbn",
 };
 
 static const struct freq_tbl sdm660_freq_table[] = {
@@ -690,7 +690,7 @@ static const struct venus_resources sdm845_res = {
        .vmem_size = 0,
        .vmem_addr = 0,
        .dma_mask = 0xe0000000 - 1,
-       .fwname = "qcom/venus-5.2/venus.mdt",
+       .fwname = "qcom/venus-5.2/venus.mbn",
 };
 
 static const struct venus_resources sdm845_res_v2 = {
@@ -720,7 +720,7 @@ static const struct venus_resources sdm845_res_v2 = {
        .cp_size = 0x70800000,
        .cp_nonpixel_start = 0x1000000,
        .cp_nonpixel_size = 0x24800000,
-       .fwname = "qcom/venus-5.2/venus.mdt",
+       .fwname = "qcom/venus-5.2/venus.mbn",
 };
 
 static const struct freq_tbl sc7180_freq_table[] = {
@@ -768,7 +768,7 @@ static const struct venus_resources sc7180_res = {
        .cp_size = 0x70800000,
        .cp_nonpixel_start = 0x1000000,
        .cp_nonpixel_size = 0x24800000,
-       .fwname = "qcom/venus-5.4/venus.mdt",
+       .fwname = "qcom/venus-5.4/venus.mbn",
 };
 
 static const struct freq_tbl sm8250_freq_table[] = {
index dd9c5066442db1186e20885921053f9fc0783e89..20acd412ee7b9116f79c9ad6bde89db30f18a7e5 100644 (file)
@@ -242,7 +242,7 @@ struct hfi_session_parse_sequence_header_pkt {
 
 struct hfi_sfr {
        u32 buf_size;
-       u8 data[1];
+       u8 data[] __counted_by(buf_size);
 };
 
 struct hfi_sys_test_ssr_pkt {
index 7cab685a2ec804e0cf2a1dbc54df47eeceb49fcc..0a041b4db9efc549621de07dd13b4a3a37a70d11 100644 (file)
@@ -398,7 +398,7 @@ session_get_prop_buf_req(struct hfi_msg_session_property_info_pkt *pkt,
                memcpy(&bufreq[idx], buf_req, sizeof(*bufreq));
                idx++;
 
-               if (idx > HFI_BUFFER_TYPE_MAX)
+               if (idx >= HFI_BUFFER_TYPE_MAX)
                        return HFI_ERR_SESSION_INVALID_PARAMETER;
 
                req_bytes -= sizeof(struct hfi_buffer_requirements);
index 6cf74b2bc5ae38e98feb71d74579218177a7a259..c43839539d4ddabff75ba0113110d4a8027a0dc8 100644 (file)
@@ -19,6 +19,9 @@ static void init_codecs(struct venus_core *core)
        struct hfi_plat_caps *caps = core->caps, *cap;
        unsigned long bit;
 
+       if (hweight_long(core->dec_codecs) + hweight_long(core->enc_codecs) > MAX_CODEC_NUM)
+               return;
+
        for_each_set_bit(bit, &core->dec_codecs, MAX_CODEC_NUM) {
                cap = &caps[core->codecs_count++];
                cap->codec = BIT(bit);
@@ -86,6 +89,9 @@ static void fill_profile_level(struct hfi_plat_caps *cap, const void *data,
 {
        const struct hfi_profile_level *pl = data;
 
+       if (cap->num_pl + num >= HFI_MAX_PROFILE_COUNT)
+               return;
+
        memcpy(&cap->pl[cap->num_pl], pl, num * sizeof(*pl));
        cap->num_pl += num;
 }
@@ -111,6 +117,9 @@ fill_caps(struct hfi_plat_caps *cap, const void *data, unsigned int num)
 {
        const struct hfi_capability *caps = data;
 
+       if (cap->num_caps + num >= MAX_CAP_ENTRIES)
+               return;
+
        memcpy(&cap->caps[cap->num_caps], caps, num * sizeof(*caps));
        cap->num_caps += num;
 }
@@ -137,6 +146,9 @@ static void fill_raw_fmts(struct hfi_plat_caps *cap, const void *fmts,
 {
        const struct raw_formats *formats = fmts;
 
+       if (cap->num_fmts + num_fmts >= MAX_FMT_ENTRIES)
+               return;
+
        memcpy(&cap->fmts[cap->num_fmts], formats, num_fmts * sizeof(*formats));
        cap->num_fmts += num_fmts;
 }
@@ -159,6 +171,9 @@ parse_raw_formats(struct venus_core *core, u32 codecs, u32 domain, void *data)
                rawfmts[i].buftype = fmt->buffer_type;
                i++;
 
+               if (i >= MAX_FMT_ENTRIES)
+                       return;
+
                if (pinfo->num_planes > MAX_PLANES)
                        break;
 
index 19fc6575a489105f824f9275c8f05650d25d440a..f9437b6412b91c2483670a2b11f4fd43f3206404 100644 (file)
@@ -205,6 +205,11 @@ static int venus_write_queue(struct venus_hfi_device *hdev,
 
        new_wr_idx = wr_idx + dwords;
        wr_ptr = (u32 *)(queue->qmem.kva + (wr_idx << 2));
+
+       if (wr_ptr < (u32 *)queue->qmem.kva ||
+           wr_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*wr_ptr)))
+               return -EINVAL;
+
        if (new_wr_idx < qsize) {
                memcpy(wr_ptr, packet, dwords << 2);
        } else {
@@ -272,6 +277,11 @@ static int venus_read_queue(struct venus_hfi_device *hdev,
        }
 
        rd_ptr = (u32 *)(queue->qmem.kva + (rd_idx << 2));
+
+       if (rd_ptr < (u32 *)queue->qmem.kva ||
+           rd_ptr > (u32 *)(queue->qmem.kva + queue->qmem.size - sizeof(*rd_ptr)))
+               return -EINVAL;
+
        dwords = *rd_ptr >> 2;
        if (!dwords)
                return -EINVAL;
index 48c9084bb4dba82902ddd2b916f3e06e0a527434..a1b127caa90a763c10199108d6a1d1897712ab42 100644 (file)
@@ -870,7 +870,7 @@ static int vcodec_domains_get(struct venus_core *core)
                pd = dev_pm_domain_attach_by_name(dev,
                                                  res->vcodec_pmdomains[i]);
                if (IS_ERR_OR_NULL(pd))
-                       return PTR_ERR(pd) ? : -ENODATA;
+                       return pd ? PTR_ERR(pd) : -ENODATA;
                core->pmdomains[i] = pd;
        }
 
index 7360cf3863f2c8d8c507a76bf16b231672c17870..19a005d83733072449ab30559896f8e6b8be9c63 100644 (file)
@@ -467,7 +467,7 @@ static int risp_probe(struct platform_device *pdev)
        isp->subdev.dev = &pdev->dev;
        v4l2_subdev_init(&isp->subdev, &rcar_isp_subdev_ops);
        v4l2_set_subdevdata(&isp->subdev, &pdev->dev);
-       snprintf(isp->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s %s",
+       snprintf(isp->subdev.name, sizeof(isp->subdev.name), "%s %s",
                 KBUILD_MODNAME, dev_name(&pdev->dev));
        isp->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
 
index f6326df0b09bef904ded0e03c80f3399b9009dd0..66fe553a00e757418dc8611599ea0677eecd9c85 100644 (file)
@@ -1889,7 +1889,7 @@ static int rcsi2_probe(struct platform_device *pdev)
        priv->subdev.dev = &pdev->dev;
        v4l2_subdev_init(&priv->subdev, &rcar_csi2_subdev_ops);
        v4l2_set_subdevdata(&priv->subdev, &pdev->dev);
-       snprintf(priv->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s %s",
+       snprintf(priv->subdev.name, sizeof(priv->subdev.name), "%s %s",
                 KBUILD_MODNAME, dev_name(&pdev->dev));
        priv->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
 
index 163a4ba61c1735f459bba959c10828dc9e02db69..292c5bf9e50c3b2f165b5d824e99347abaaaf1ac 100644 (file)
@@ -871,8 +871,7 @@ static int rcar_drif_querycap(struct file *file, void *fh,
 
        strscpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
        strscpy(cap->card, sdr->vdev->name, sizeof(cap->card));
-       snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
-                sdr->vdev->name);
+       strscpy(cap->bus_info, "platform:R-Car DRIF", sizeof(cap->bus_info));
 
        return 0;
 }
index ec631c6e2a571752bdbd322fd23c73e7502bca67..2562b30acfb9b298398914afec8b797eb88ab7bd 100644 (file)
@@ -1183,17 +1183,13 @@ static int ceu_enum_input(struct file *file, void *priv,
                          struct v4l2_input *inp)
 {
        struct ceu_device *ceudev = video_drvdata(file);
-       struct ceu_subdev *ceusd;
 
        if (inp->index >= ceudev->num_sd)
                return -EINVAL;
 
-       ceusd = ceudev->subdevs[inp->index];
-
        inp->type = V4L2_INPUT_TYPE_CAMERA;
        inp->std = 0;
-       snprintf(inp->name, sizeof(inp->name), "Camera%u: %s",
-                inp->index, ceusd->v4l2_sd->name);
+       snprintf(inp->name, sizeof(inp->name), "Camera %u", inp->index);
 
        return 0;
 }
index 8f3cba319762341a9a35167488ccc667fc0862e9..c6d7e01c89494a94e3ee741e1b673148c6c64f19 100644 (file)
@@ -479,9 +479,11 @@ static void rkisp1_sp_config(struct rkisp1_capture *cap)
        rkisp1_write(rkisp1, cap->config->mi.cr_size_init,
                     rkisp1_pixfmt_comp_size(pixm, RKISP1_PLANE_CR));
 
+       rkisp1_write(rkisp1, RKISP1_CIF_MI_SP_Y_LLENGTH, cap->sp_y_stride);
        rkisp1_write(rkisp1, RKISP1_CIF_MI_SP_Y_PIC_WIDTH, pixm->width);
        rkisp1_write(rkisp1, RKISP1_CIF_MI_SP_Y_PIC_HEIGHT, pixm->height);
-       rkisp1_write(rkisp1, RKISP1_CIF_MI_SP_Y_LLENGTH, cap->sp_y_stride);
+       rkisp1_write(rkisp1, RKISP1_CIF_MI_SP_Y_PIC_SIZE,
+                    cap->sp_y_stride * pixm->height);
 
        rkisp1_irq_frame_end_enable(cap);
 
@@ -1101,14 +1103,20 @@ rkisp1_fill_pixfmt(struct v4l2_pix_format_mplane *pixm,
        memset(pixm->plane_fmt, 0, sizeof(pixm->plane_fmt));
        info = v4l2_format_info(pixm->pixelformat);
        pixm->num_planes = info->mem_planes;
-       stride = info->bpp[0] * pixm->width;
-       /* Self path supports custom stride but Main path doesn't */
-       if (id == RKISP1_MAINPATH || plane_y->bytesperline < stride)
-               plane_y->bytesperline = stride;
-       plane_y->sizeimage = plane_y->bytesperline * pixm->height;
 
-       /* normalize stride to pixels per line */
-       stride = DIV_ROUND_UP(plane_y->bytesperline, info->bpp[0]);
+       /*
+        * The SP supports custom strides, expressed as a number of pixels for
+        * the Y plane. Clamp the stride to a reasonable value to avoid integer
+        * overflows when calculating the bytesperline and sizeimage values.
+        */
+       if (id == RKISP1_SELFPATH)
+               stride = clamp(DIV_ROUND_UP(plane_y->bytesperline, info->bpp[0]),
+                              pixm->width, 65536U);
+       else
+               stride = pixm->width;
+
+       plane_y->bytesperline = stride * info->bpp[0];
+       plane_y->sizeimage = plane_y->bytesperline * pixm->height;
 
        for (i = 1; i < info->comp_planes; i++) {
                struct v4l2_plane_pix_format *plane = &pixm->plane_fmt[i];
index d30f0ecb1bfd844eac174c0bae594f1e6ca90750..1e7cea1bea5ea60631e18898a2a158402417ec83 100644 (file)
@@ -167,9 +167,6 @@ struct rkisp1_sensor_async {
  * @is_dphy_errctrl_disabled: if dphy errctrl is disabled (avoid endless interrupt)
  * @sd: v4l2_subdev variable
  * @pads: media pads
- * @pad_cfg: configurations for the pads
- * @sink_fmt: input format
- * @lock: protects pad_cfg and sink_fmt
  * @source: source in-use, set when starting streaming
  */
 struct rkisp1_csi {
@@ -178,9 +175,6 @@ struct rkisp1_csi {
        bool is_dphy_errctrl_disabled;
        struct v4l2_subdev sd;
        struct media_pad pads[RKISP1_CSI_PAD_NUM];
-       struct v4l2_subdev_pad_config pad_cfg[RKISP1_CSI_PAD_NUM];
-       const struct rkisp1_mbus_info *sink_fmt;
-       struct mutex lock;
        struct v4l2_subdev *source;
 };
 
@@ -190,20 +184,14 @@ struct rkisp1_csi {
  * @sd:                                v4l2_subdev variable
  * @rkisp1:                    pointer to rkisp1_device
  * @pads:                      media pads
- * @pad_cfg:                   pads configurations
  * @sink_fmt:                  input format
- * @src_fmt:                   output format
- * @ops_lock:                  ops serialization
  * @frame_sequence:            used to synchronize frame_id between video devices.
  */
 struct rkisp1_isp {
        struct v4l2_subdev sd;
        struct rkisp1_device *rkisp1;
        struct media_pad pads[RKISP1_ISP_PAD_MAX];
-       struct v4l2_subdev_pad_config pad_cfg[RKISP1_ISP_PAD_MAX];
        const struct rkisp1_mbus_info *sink_fmt;
-       const struct rkisp1_mbus_info *src_fmt;
-       struct mutex ops_lock; /* serialize the subdevice ops */
        __u32 frame_sequence;
 };
 
@@ -390,10 +378,7 @@ struct rkisp1_params {
  * @id:               id of the resizer, one of RKISP1_SELFPATH, RKISP1_MAINPATH
  * @rkisp1:    pointer to the rkisp1 device
  * @pads:      media pads
- * @pad_cfg:   configurations for the pads
  * @config:    the set of registers to configure the resizer
- * @pixel_enc: pixel encoding of the resizer
- * @ops_lock:  a lock for the subdev ops
  */
 struct rkisp1_resizer {
        struct v4l2_subdev sd;
@@ -401,10 +386,7 @@ struct rkisp1_resizer {
        enum rkisp1_stream_id id;
        struct rkisp1_device *rkisp1;
        struct media_pad pads[RKISP1_RSZ_PAD_MAX];
-       struct v4l2_subdev_pad_config pad_cfg[RKISP1_RSZ_PAD_MAX];
        const struct rkisp1_rsz_config *config;
-       enum v4l2_pixel_encoding pixel_enc;
-       struct mutex ops_lock; /* serialize the subdevice ops */
 };
 
 /*
index fdff3d0da4e5061c2e74485a52214d551f7f0cf0..6e17b2817e610681d3d712bee4818e515db3acdf 100644 (file)
@@ -30,23 +30,6 @@ static inline struct rkisp1_csi *to_rkisp1_csi(struct v4l2_subdev *sd)
        return container_of(sd, struct rkisp1_csi, sd);
 }
 
-static struct v4l2_mbus_framefmt *
-rkisp1_csi_get_pad_fmt(struct rkisp1_csi *csi,
-                      struct v4l2_subdev_state *sd_state,
-                      unsigned int pad, u32 which)
-{
-       struct v4l2_subdev_state state = {
-               .pads = csi->pad_cfg
-       };
-
-       lockdep_assert_held(&csi->lock);
-
-       if (which == V4L2_SUBDEV_FORMAT_TRY)
-               return v4l2_subdev_get_try_format(&csi->sd, sd_state, pad);
-       else
-               return v4l2_subdev_get_try_format(&csi->sd, &state, pad);
-}
-
 int rkisp1_csi_link_sensor(struct rkisp1_device *rkisp1, struct v4l2_subdev *sd,
                           struct rkisp1_sensor_async *s_asd,
                           unsigned int source_pad)
@@ -76,7 +59,8 @@ int rkisp1_csi_link_sensor(struct rkisp1_device *rkisp1, struct v4l2_subdev *sd,
 }
 
 static int rkisp1_csi_config(struct rkisp1_csi *csi,
-                            const struct rkisp1_sensor_async *sensor)
+                            const struct rkisp1_sensor_async *sensor,
+                            const struct rkisp1_mbus_info *format)
 {
        struct rkisp1_device *rkisp1 = csi->rkisp1;
        unsigned int lanes = sensor->lanes;
@@ -98,7 +82,7 @@ static int rkisp1_csi_config(struct rkisp1_csi *csi,
 
        /* Configure Data Type and Virtual Channel */
        rkisp1_write(rkisp1, RKISP1_CIF_MIPI_IMG_DATA_SEL,
-                    RKISP1_CIF_MIPI_DATA_SEL_DT(csi->sink_fmt->mipi_dt) |
+                    RKISP1_CIF_MIPI_DATA_SEL_DT(format->mipi_dt) |
                     RKISP1_CIF_MIPI_DATA_SEL_VC(0));
 
        /* Clear MIPI interrupts */
@@ -151,7 +135,8 @@ static void rkisp1_csi_disable(struct rkisp1_csi *csi)
 }
 
 static int rkisp1_csi_start(struct rkisp1_csi *csi,
-                           const struct rkisp1_sensor_async *sensor)
+                           const struct rkisp1_sensor_async *sensor,
+                           const struct rkisp1_mbus_info *format)
 {
        struct rkisp1_device *rkisp1 = csi->rkisp1;
        union phy_configure_opts opts;
@@ -159,7 +144,7 @@ static int rkisp1_csi_start(struct rkisp1_csi *csi,
        s64 pixel_clock;
        int ret;
 
-       ret = rkisp1_csi_config(csi, sensor);
+       ret = rkisp1_csi_config(csi, sensor, format);
        if (ret)
                return ret;
 
@@ -169,7 +154,7 @@ static int rkisp1_csi_start(struct rkisp1_csi *csi,
                return -EINVAL;
        }
 
-       phy_mipi_dphy_get_default_config(pixel_clock, csi->sink_fmt->bus_width,
+       phy_mipi_dphy_get_default_config(pixel_clock, format->bus_width,
                                         sensor->lanes, cfg);
        phy_set_mode(csi->dphy, PHY_MODE_MIPI_DPHY);
        phy_configure(csi->dphy, &opts);
@@ -248,7 +233,6 @@ static int rkisp1_csi_enum_mbus_code(struct v4l2_subdev *sd,
                                     struct v4l2_subdev_state *sd_state,
                                     struct v4l2_subdev_mbus_code_enum *code)
 {
-       struct rkisp1_csi *csi = to_rkisp1_csi(sd);
        unsigned int i;
        int pos = 0;
 
@@ -258,15 +242,10 @@ static int rkisp1_csi_enum_mbus_code(struct v4l2_subdev *sd,
                if (code->index)
                        return -EINVAL;
 
-               mutex_lock(&csi->lock);
-
-               sink_fmt = rkisp1_csi_get_pad_fmt(csi, sd_state,
-                                                 RKISP1_CSI_PAD_SINK,
-                                                 code->which);
+               sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
+                                                     RKISP1_CSI_PAD_SINK);
                code->code = sink_fmt->code;
 
-               mutex_unlock(&csi->lock);
-
                return 0;
        }
 
@@ -296,9 +275,9 @@ static int rkisp1_csi_init_config(struct v4l2_subdev *sd,
 {
        struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
 
-       sink_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+       sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
                                              RKISP1_CSI_PAD_SINK);
-       src_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+       src_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
                                             RKISP1_CSI_PAD_SRC);
 
        sink_fmt->width = RKISP1_DEFAULT_WIDTH;
@@ -311,36 +290,18 @@ static int rkisp1_csi_init_config(struct v4l2_subdev *sd,
        return 0;
 }
 
-static int rkisp1_csi_get_fmt(struct v4l2_subdev *sd,
-                             struct v4l2_subdev_state *sd_state,
-                             struct v4l2_subdev_format *fmt)
-{
-       struct rkisp1_csi *csi = to_rkisp1_csi(sd);
-
-       mutex_lock(&csi->lock);
-       fmt->format = *rkisp1_csi_get_pad_fmt(csi, sd_state, fmt->pad,
-                                             fmt->which);
-       mutex_unlock(&csi->lock);
-
-       return 0;
-}
-
 static int rkisp1_csi_set_fmt(struct v4l2_subdev *sd,
                              struct v4l2_subdev_state *sd_state,
                              struct v4l2_subdev_format *fmt)
 {
-       struct rkisp1_csi *csi = to_rkisp1_csi(sd);
        const struct rkisp1_mbus_info *mbus_info;
        struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
 
        /* The format on the source pad always matches the sink pad. */
        if (fmt->pad == RKISP1_CSI_PAD_SRC)
-               return rkisp1_csi_get_fmt(sd, sd_state, fmt);
+               return v4l2_subdev_get_fmt(sd, sd_state, fmt);
 
-       mutex_lock(&csi->lock);
-
-       sink_fmt = rkisp1_csi_get_pad_fmt(csi, sd_state, RKISP1_CSI_PAD_SINK,
-                                         fmt->which);
+       sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state, RKISP1_CSI_PAD_SINK);
 
        sink_fmt->code = fmt->format.code;
 
@@ -359,16 +320,10 @@ static int rkisp1_csi_set_fmt(struct v4l2_subdev *sd,
 
        fmt->format = *sink_fmt;
 
-       if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
-               csi->sink_fmt = mbus_info;
-
        /* Propagate the format to the source pad. */
-       src_fmt = rkisp1_csi_get_pad_fmt(csi, sd_state, RKISP1_CSI_PAD_SRC,
-                                        fmt->which);
+       src_fmt = v4l2_subdev_get_pad_format(sd, sd_state, RKISP1_CSI_PAD_SRC);
        *src_fmt = *sink_fmt;
 
-       mutex_unlock(&csi->lock);
-
        return 0;
 }
 
@@ -380,8 +335,11 @@ static int rkisp1_csi_s_stream(struct v4l2_subdev *sd, int enable)
 {
        struct rkisp1_csi *csi = to_rkisp1_csi(sd);
        struct rkisp1_device *rkisp1 = csi->rkisp1;
+       const struct v4l2_mbus_framefmt *sink_fmt;
+       const struct rkisp1_mbus_info *format;
        struct rkisp1_sensor_async *source_asd;
        struct v4l2_async_connection *asc;
+       struct v4l2_subdev_state *sd_state;
        struct media_pad *source_pad;
        struct v4l2_subdev *source;
        int ret;
@@ -415,9 +373,12 @@ static int rkisp1_csi_s_stream(struct v4l2_subdev *sd, int enable)
        if (source_asd->mbus_type != V4L2_MBUS_CSI2_DPHY)
                return -EINVAL;
 
-       mutex_lock(&csi->lock);
-       ret = rkisp1_csi_start(csi, source_asd);
-       mutex_unlock(&csi->lock);
+       sd_state = v4l2_subdev_lock_and_get_active_state(sd);
+       sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state, RKISP1_CSI_PAD_SINK);
+       format = rkisp1_mbus_info_get_by_code(sink_fmt->code);
+       v4l2_subdev_unlock_state(sd_state);
+
+       ret = rkisp1_csi_start(csi, source_asd, format);
        if (ret)
                return ret;
 
@@ -447,7 +408,7 @@ static const struct v4l2_subdev_video_ops rkisp1_csi_video_ops = {
 static const struct v4l2_subdev_pad_ops rkisp1_csi_pad_ops = {
        .enum_mbus_code = rkisp1_csi_enum_mbus_code,
        .init_cfg = rkisp1_csi_init_config,
-       .get_fmt = rkisp1_csi_get_fmt,
+       .get_fmt = v4l2_subdev_get_fmt,
        .set_fmt = rkisp1_csi_set_fmt,
 };
 
@@ -459,13 +420,11 @@ static const struct v4l2_subdev_ops rkisp1_csi_ops = {
 int rkisp1_csi_register(struct rkisp1_device *rkisp1)
 {
        struct rkisp1_csi *csi = &rkisp1->csi;
-       struct v4l2_subdev_state state = {};
        struct media_pad *pads;
        struct v4l2_subdev *sd;
        int ret;
 
        csi->rkisp1 = rkisp1;
-       mutex_init(&csi->lock);
 
        sd = &csi->sd;
        v4l2_subdev_init(sd, &rkisp1_csi_ops);
@@ -481,26 +440,26 @@ int rkisp1_csi_register(struct rkisp1_device *rkisp1)
        pads[RKISP1_CSI_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE |
                                         MEDIA_PAD_FL_MUST_CONNECT;
 
-       csi->sink_fmt = rkisp1_mbus_info_get_by_code(RKISP1_CSI_DEF_FMT);
-
        ret = media_entity_pads_init(&sd->entity, RKISP1_CSI_PAD_NUM, pads);
        if (ret)
-               goto error;
+               goto err_entity_cleanup;
 
-       state.pads = csi->pad_cfg;
-       rkisp1_csi_init_config(sd, &state);
+       ret = v4l2_subdev_init_finalize(sd);
+       if (ret)
+               goto err_entity_cleanup;
 
        ret = v4l2_device_register_subdev(&csi->rkisp1->v4l2_dev, sd);
        if (ret) {
                dev_err(sd->dev, "Failed to register csi receiver subdev\n");
-               goto error;
+               goto err_subdev_cleanup;
        }
 
        return 0;
 
-error:
+err_subdev_cleanup:
+       v4l2_subdev_cleanup(sd);
+err_entity_cleanup:
        media_entity_cleanup(&sd->entity);
-       mutex_destroy(&csi->lock);
        csi->rkisp1 = NULL;
        return ret;
 }
@@ -513,8 +472,8 @@ void rkisp1_csi_unregister(struct rkisp1_device *rkisp1)
                return;
 
        v4l2_device_unregister_subdev(&csi->sd);
+       v4l2_subdev_cleanup(&csi->sd);
        media_entity_cleanup(&csi->sd.entity);
-       mutex_destroy(&csi->lock);
 }
 
 int rkisp1_csi_init(struct rkisp1_device *rkisp1)
index 07fbb77ce2349e4e9db7b9dbb89b662326db4feb..88ca8b2283b7f067069535b2bc7aabc4e6d0c0be 100644 (file)
  * +---------------------------------------------------------+
  */
 
-/* ----------------------------------------------------------------------------
- * Helpers
- */
-
-static struct v4l2_mbus_framefmt *
-rkisp1_isp_get_pad_fmt(struct rkisp1_isp *isp,
-                      struct v4l2_subdev_state *sd_state,
-                      unsigned int pad, u32 which)
-{
-       struct v4l2_subdev_state state = {
-               .pads = isp->pad_cfg
-       };
-
-       if (which == V4L2_SUBDEV_FORMAT_TRY)
-               return v4l2_subdev_get_try_format(&isp->sd, sd_state, pad);
-       else
-               return v4l2_subdev_get_try_format(&isp->sd, &state, pad);
-}
-
-static struct v4l2_rect *
-rkisp1_isp_get_pad_crop(struct rkisp1_isp *isp,
-                       struct v4l2_subdev_state *sd_state,
-                       unsigned int pad, u32 which)
-{
-       struct v4l2_subdev_state state = {
-               .pads = isp->pad_cfg
-       };
-
-       if (which == V4L2_SUBDEV_FORMAT_TRY)
-               return v4l2_subdev_get_try_crop(&isp->sd, sd_state, pad);
-       else
-               return v4l2_subdev_get_try_crop(&isp->sd, &state, pad);
-}
-
 /* ----------------------------------------------------------------------------
  * Camera Interface registers configurations
  */
@@ -96,12 +62,12 @@ rkisp1_isp_get_pad_crop(struct rkisp1_isp *isp,
  * This should only be called when configuring CIF
  * or at the frame end interrupt
  */
-static void rkisp1_config_ism(struct rkisp1_isp *isp)
+static void rkisp1_config_ism(struct rkisp1_isp *isp,
+                             struct v4l2_subdev_state *sd_state)
 {
        const struct v4l2_rect *src_crop =
-               rkisp1_isp_get_pad_crop(isp, NULL,
-                                       RKISP1_ISP_PAD_SOURCE_VIDEO,
-                                       V4L2_SUBDEV_FORMAT_ACTIVE);
+               v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
+                                        RKISP1_ISP_PAD_SOURCE_VIDEO);
        struct rkisp1_device *rkisp1 = isp->rkisp1;
        u32 val;
 
@@ -125,21 +91,26 @@ static void rkisp1_config_ism(struct rkisp1_isp *isp)
  * configure ISP blocks with input format, size......
  */
 static int rkisp1_config_isp(struct rkisp1_isp *isp,
+                            struct v4l2_subdev_state *sd_state,
                             enum v4l2_mbus_type mbus_type, u32 mbus_flags)
 {
        struct rkisp1_device *rkisp1 = isp->rkisp1;
        u32 isp_ctrl = 0, irq_mask = 0, acq_mult = 0, acq_prop = 0;
-       const struct rkisp1_mbus_info *sink_fmt = isp->sink_fmt;
-       const struct rkisp1_mbus_info *src_fmt = isp->src_fmt;
+       const struct rkisp1_mbus_info *sink_fmt;
+       const struct rkisp1_mbus_info *src_fmt;
+       const struct v4l2_mbus_framefmt *src_frm;
        const struct v4l2_mbus_framefmt *sink_frm;
        const struct v4l2_rect *sink_crop;
 
-       sink_frm = rkisp1_isp_get_pad_fmt(isp, NULL,
-                                         RKISP1_ISP_PAD_SINK_VIDEO,
-                                         V4L2_SUBDEV_FORMAT_ACTIVE);
-       sink_crop = rkisp1_isp_get_pad_crop(isp, NULL,
-                                           RKISP1_ISP_PAD_SINK_VIDEO,
-                                           V4L2_SUBDEV_FORMAT_ACTIVE);
+       sink_frm = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
+                                             RKISP1_ISP_PAD_SINK_VIDEO);
+       sink_crop = v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
+                                            RKISP1_ISP_PAD_SINK_VIDEO);
+       src_frm = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
+                                            RKISP1_ISP_PAD_SOURCE_VIDEO);
+
+       sink_fmt = rkisp1_mbus_info_get_by_code(sink_frm->code);
+       src_fmt = rkisp1_mbus_info_get_by_code(src_frm->code);
 
        if (sink_fmt->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
                acq_mult = 1;
@@ -230,14 +201,15 @@ static int rkisp1_config_isp(struct rkisp1_isp *isp,
        } else {
                struct v4l2_mbus_framefmt *src_frm;
 
-               src_frm = rkisp1_isp_get_pad_fmt(isp, NULL,
-                                                RKISP1_ISP_PAD_SOURCE_VIDEO,
-                                                V4L2_SUBDEV_FORMAT_ACTIVE);
+               src_frm = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
+                                                    RKISP1_ISP_PAD_SOURCE_VIDEO);
                rkisp1_params_pre_configure(&rkisp1->params, sink_fmt->bayer_pat,
                                            src_frm->quantization,
                                            src_frm->ycbcr_enc);
        }
 
+       isp->sink_fmt = sink_fmt;
+
        return 0;
 }
 
@@ -258,16 +230,17 @@ static void rkisp1_config_path(struct rkisp1_isp *isp,
 
 /* Hardware configure Entry */
 static int rkisp1_config_cif(struct rkisp1_isp *isp,
+                            struct v4l2_subdev_state *sd_state,
                             enum v4l2_mbus_type mbus_type, u32 mbus_flags)
 {
        int ret;
 
-       ret = rkisp1_config_isp(isp, mbus_type, mbus_flags);
+       ret = rkisp1_config_isp(isp, sd_state, mbus_type, mbus_flags);
        if (ret)
                return ret;
 
        rkisp1_config_path(isp, mbus_type);
-       rkisp1_config_ism(isp);
+       rkisp1_config_ism(isp, sd_state);
 
        return 0;
 }
@@ -328,9 +301,12 @@ static void rkisp1_config_clk(struct rkisp1_isp *isp)
        }
 }
 
-static void rkisp1_isp_start(struct rkisp1_isp *isp)
+static void rkisp1_isp_start(struct rkisp1_isp *isp,
+                            struct v4l2_subdev_state *sd_state)
 {
        struct rkisp1_device *rkisp1 = isp->rkisp1;
+       const struct v4l2_mbus_framefmt *src_fmt;
+       const struct rkisp1_mbus_info *src_info;
        u32 val;
 
        rkisp1_config_clk(isp);
@@ -342,7 +318,11 @@ static void rkisp1_isp_start(struct rkisp1_isp *isp)
               RKISP1_CIF_ISP_CTRL_ISP_INFORM_ENABLE;
        rkisp1_write(rkisp1, RKISP1_CIF_ISP_CTRL, val);
 
-       if (isp->src_fmt->pixel_enc != V4L2_PIXEL_ENC_BAYER)
+       src_fmt = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
+                                            RKISP1_ISP_PAD_SOURCE_VIDEO);
+       src_info = rkisp1_mbus_info_get_by_code(src_fmt->code);
+
+       if (src_info->pixel_enc != V4L2_PIXEL_ENC_BAYER)
                rkisp1_params_post_configure(&rkisp1->params);
 }
 
@@ -436,7 +416,7 @@ static int rkisp1_isp_init_config(struct v4l2_subdev *sd,
        struct v4l2_rect *sink_crop, *src_crop;
 
        /* Video. */
-       sink_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+       sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
                                              RKISP1_ISP_PAD_SINK_VIDEO);
        sink_fmt->width = RKISP1_DEFAULT_WIDTH;
        sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
@@ -447,14 +427,14 @@ static int rkisp1_isp_init_config(struct v4l2_subdev *sd,
        sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
        sink_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE;
 
-       sink_crop = v4l2_subdev_get_try_crop(sd, sd_state,
+       sink_crop = v4l2_subdev_get_pad_crop(sd, sd_state,
                                             RKISP1_ISP_PAD_SINK_VIDEO);
        sink_crop->width = RKISP1_DEFAULT_WIDTH;
        sink_crop->height = RKISP1_DEFAULT_HEIGHT;
        sink_crop->left = 0;
        sink_crop->top = 0;
 
-       src_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+       src_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
                                             RKISP1_ISP_PAD_SOURCE_VIDEO);
        *src_fmt = *sink_fmt;
        src_fmt->code = RKISP1_DEF_SRC_PAD_FMT;
@@ -463,14 +443,14 @@ static int rkisp1_isp_init_config(struct v4l2_subdev *sd,
        src_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601;
        src_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE;
 
-       src_crop = v4l2_subdev_get_try_crop(sd, sd_state,
+       src_crop = v4l2_subdev_get_pad_crop(sd, sd_state,
                                            RKISP1_ISP_PAD_SOURCE_VIDEO);
        *src_crop = *sink_crop;
 
        /* Parameters and statistics. */
-       sink_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+       sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
                                              RKISP1_ISP_PAD_SINK_PARAMS);
-       src_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+       src_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
                                             RKISP1_ISP_PAD_SOURCE_STATS);
        sink_fmt->width = 0;
        sink_fmt->height = 0;
@@ -483,8 +463,7 @@ static int rkisp1_isp_init_config(struct v4l2_subdev *sd,
 
 static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp,
                                   struct v4l2_subdev_state *sd_state,
-                                  struct v4l2_mbus_framefmt *format,
-                                  unsigned int which)
+                                  struct v4l2_mbus_framefmt *format)
 {
        const struct rkisp1_mbus_info *sink_info;
        const struct rkisp1_mbus_info *src_info;
@@ -493,12 +472,12 @@ static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp,
        const struct v4l2_rect *src_crop;
        bool set_csc;
 
-       sink_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
-                                         RKISP1_ISP_PAD_SINK_VIDEO, which);
-       src_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
-                                        RKISP1_ISP_PAD_SOURCE_VIDEO, which);
-       src_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
-                                          RKISP1_ISP_PAD_SOURCE_VIDEO, which);
+       sink_fmt = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
+                                             RKISP1_ISP_PAD_SINK_VIDEO);
+       src_fmt = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
+                                            RKISP1_ISP_PAD_SOURCE_VIDEO);
+       src_crop = v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
+                                           RKISP1_ISP_PAD_SOURCE_VIDEO);
 
        /*
         * Media bus code. The ISP can operate in pass-through mode (Bayer in,
@@ -581,26 +560,20 @@ static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp,
         */
        if (set_csc)
                format->flags |= V4L2_MBUS_FRAMEFMT_SET_CSC;
-
-       /* Store the source format info when setting the active format. */
-       if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
-               isp->src_fmt = src_info;
 }
 
 static void rkisp1_isp_set_src_crop(struct rkisp1_isp *isp,
                                    struct v4l2_subdev_state *sd_state,
-                                   struct v4l2_rect *r, unsigned int which)
+                                   struct v4l2_rect *r)
 {
        struct v4l2_mbus_framefmt *src_fmt;
        const struct v4l2_rect *sink_crop;
        struct v4l2_rect *src_crop;
 
-       src_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
-                                          RKISP1_ISP_PAD_SOURCE_VIDEO,
-                                          which);
-       sink_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
-                                           RKISP1_ISP_PAD_SINK_VIDEO,
-                                           which);
+       src_crop = v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
+                                           RKISP1_ISP_PAD_SOURCE_VIDEO);
+       sink_crop = v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
+                                            RKISP1_ISP_PAD_SINK_VIDEO);
 
        src_crop->left = ALIGN(r->left, 2);
        src_crop->width = ALIGN(r->width, 2);
@@ -611,24 +584,22 @@ static void rkisp1_isp_set_src_crop(struct rkisp1_isp *isp,
        *r = *src_crop;
 
        /* Propagate to out format */
-       src_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
-                                        RKISP1_ISP_PAD_SOURCE_VIDEO, which);
-       rkisp1_isp_set_src_fmt(isp, sd_state, src_fmt, which);
+       src_fmt = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
+                                            RKISP1_ISP_PAD_SOURCE_VIDEO);
+       rkisp1_isp_set_src_fmt(isp, sd_state, src_fmt);
 }
 
 static void rkisp1_isp_set_sink_crop(struct rkisp1_isp *isp,
                                     struct v4l2_subdev_state *sd_state,
-                                    struct v4l2_rect *r, unsigned int which)
+                                    struct v4l2_rect *r)
 {
        struct v4l2_rect *sink_crop, *src_crop;
        const struct v4l2_mbus_framefmt *sink_fmt;
 
-       sink_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
-                                           RKISP1_ISP_PAD_SINK_VIDEO,
-                                           which);
-       sink_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
-                                         RKISP1_ISP_PAD_SINK_VIDEO,
-                                         which);
+       sink_crop = v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
+                                            RKISP1_ISP_PAD_SINK_VIDEO);
+       sink_fmt = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
+                                             RKISP1_ISP_PAD_SINK_VIDEO);
 
        sink_crop->left = ALIGN(r->left, 2);
        sink_crop->width = ALIGN(r->width, 2);
@@ -639,32 +610,28 @@ static void rkisp1_isp_set_sink_crop(struct rkisp1_isp *isp,
        *r = *sink_crop;
 
        /* Propagate to out crop */
-       src_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
-                                          RKISP1_ISP_PAD_SOURCE_VIDEO, which);
-       rkisp1_isp_set_src_crop(isp, sd_state, src_crop, which);
+       src_crop = v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
+                                           RKISP1_ISP_PAD_SOURCE_VIDEO);
+       rkisp1_isp_set_src_crop(isp, sd_state, src_crop);
 }
 
 static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp,
                                    struct v4l2_subdev_state *sd_state,
-                                   struct v4l2_mbus_framefmt *format,
-                                   unsigned int which)
+                                   struct v4l2_mbus_framefmt *format)
 {
        const struct rkisp1_mbus_info *mbus_info;
        struct v4l2_mbus_framefmt *sink_fmt;
        struct v4l2_rect *sink_crop;
        bool is_yuv;
 
-       sink_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state,
-                                         RKISP1_ISP_PAD_SINK_VIDEO,
-                                         which);
+       sink_fmt = v4l2_subdev_get_pad_format(&isp->sd, sd_state,
+                                             RKISP1_ISP_PAD_SINK_VIDEO);
        sink_fmt->code = format->code;
        mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
        if (!mbus_info || !(mbus_info->direction & RKISP1_ISP_SD_SINK)) {
                sink_fmt->code = RKISP1_DEF_SINK_PAD_FMT;
                mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
        }
-       if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
-               isp->sink_fmt = mbus_info;
 
        sink_fmt->width = clamp_t(u32, format->width,
                                  RKISP1_ISP_MIN_WIDTH,
@@ -706,23 +673,9 @@ static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp,
        *format = *sink_fmt;
 
        /* Propagate to in crop */
-       sink_crop = rkisp1_isp_get_pad_crop(isp, sd_state,
-                                           RKISP1_ISP_PAD_SINK_VIDEO,
-                                           which);
-       rkisp1_isp_set_sink_crop(isp, sd_state, sink_crop, which);
-}
-
-static int rkisp1_isp_get_fmt(struct v4l2_subdev *sd,
-                             struct v4l2_subdev_state *sd_state,
-                             struct v4l2_subdev_format *fmt)
-{
-       struct rkisp1_isp *isp = to_rkisp1_isp(sd);
-
-       mutex_lock(&isp->ops_lock);
-       fmt->format = *rkisp1_isp_get_pad_fmt(isp, sd_state, fmt->pad,
-                                             fmt->which);
-       mutex_unlock(&isp->ops_lock);
-       return 0;
+       sink_crop = v4l2_subdev_get_pad_crop(&isp->sd, sd_state,
+                                            RKISP1_ISP_PAD_SINK_VIDEO);
+       rkisp1_isp_set_sink_crop(isp, sd_state, sink_crop);
 }
 
 static int rkisp1_isp_set_fmt(struct v4l2_subdev *sd,
@@ -731,18 +684,13 @@ static int rkisp1_isp_set_fmt(struct v4l2_subdev *sd,
 {
        struct rkisp1_isp *isp = to_rkisp1_isp(sd);
 
-       mutex_lock(&isp->ops_lock);
        if (fmt->pad == RKISP1_ISP_PAD_SINK_VIDEO)
-               rkisp1_isp_set_sink_fmt(isp, sd_state, &fmt->format,
-                                       fmt->which);
+               rkisp1_isp_set_sink_fmt(isp, sd_state, &fmt->format);
        else if (fmt->pad == RKISP1_ISP_PAD_SOURCE_VIDEO)
-               rkisp1_isp_set_src_fmt(isp, sd_state, &fmt->format,
-                                      fmt->which);
+               rkisp1_isp_set_src_fmt(isp, sd_state, &fmt->format);
        else
-               fmt->format = *rkisp1_isp_get_pad_fmt(isp, sd_state, fmt->pad,
-                                                     fmt->which);
+               fmt->format = *v4l2_subdev_get_pad_format(sd, sd_state, fmt->pad);
 
-       mutex_unlock(&isp->ops_lock);
        return 0;
 }
 
@@ -750,39 +698,37 @@ static int rkisp1_isp_get_selection(struct v4l2_subdev *sd,
                                    struct v4l2_subdev_state *sd_state,
                                    struct v4l2_subdev_selection *sel)
 {
-       struct rkisp1_isp *isp = to_rkisp1_isp(sd);
        int ret = 0;
 
        if (sel->pad != RKISP1_ISP_PAD_SOURCE_VIDEO &&
            sel->pad != RKISP1_ISP_PAD_SINK_VIDEO)
                return -EINVAL;
 
-       mutex_lock(&isp->ops_lock);
        switch (sel->target) {
        case V4L2_SEL_TGT_CROP_BOUNDS:
                if (sel->pad == RKISP1_ISP_PAD_SINK_VIDEO) {
                        struct v4l2_mbus_framefmt *fmt;
 
-                       fmt = rkisp1_isp_get_pad_fmt(isp, sd_state, sel->pad,
-                                                    sel->which);
+                       fmt = v4l2_subdev_get_pad_format(sd, sd_state, sel->pad);
                        sel->r.height = fmt->height;
                        sel->r.width = fmt->width;
                        sel->r.left = 0;
                        sel->r.top = 0;
                } else {
-                       sel->r = *rkisp1_isp_get_pad_crop(isp, sd_state,
-                                                         RKISP1_ISP_PAD_SINK_VIDEO,
-                                                         sel->which);
+                       sel->r = *v4l2_subdev_get_pad_crop(sd, sd_state,
+                                                          RKISP1_ISP_PAD_SINK_VIDEO);
                }
                break;
+
        case V4L2_SEL_TGT_CROP:
-               sel->r = *rkisp1_isp_get_pad_crop(isp, sd_state, sel->pad,
-                                                 sel->which);
+               sel->r = *v4l2_subdev_get_pad_crop(sd, sd_state, sel->pad);
                break;
+
        default:
                ret = -EINVAL;
+               break;
        }
-       mutex_unlock(&isp->ops_lock);
+
        return ret;
 }
 
@@ -798,15 +744,14 @@ static int rkisp1_isp_set_selection(struct v4l2_subdev *sd,
 
        dev_dbg(isp->rkisp1->dev, "%s: pad: %d sel(%d,%d)/%dx%d\n", __func__,
                sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
-       mutex_lock(&isp->ops_lock);
+
        if (sel->pad == RKISP1_ISP_PAD_SINK_VIDEO)
-               rkisp1_isp_set_sink_crop(isp, sd_state, &sel->r, sel->which);
+               rkisp1_isp_set_sink_crop(isp, sd_state, &sel->r);
        else if (sel->pad == RKISP1_ISP_PAD_SOURCE_VIDEO)
-               rkisp1_isp_set_src_crop(isp, sd_state, &sel->r, sel->which);
+               rkisp1_isp_set_src_crop(isp, sd_state, &sel->r);
        else
                ret = -EINVAL;
 
-       mutex_unlock(&isp->ops_lock);
        return ret;
 }
 
@@ -824,7 +769,7 @@ static const struct v4l2_subdev_pad_ops rkisp1_isp_pad_ops = {
        .get_selection = rkisp1_isp_get_selection,
        .set_selection = rkisp1_isp_set_selection,
        .init_cfg = rkisp1_isp_init_config,
-       .get_fmt = rkisp1_isp_get_fmt,
+       .get_fmt = v4l2_subdev_get_fmt,
        .set_fmt = rkisp1_isp_set_fmt,
        .link_validate = v4l2_subdev_link_validate_default,
 };
@@ -837,6 +782,7 @@ static int rkisp1_isp_s_stream(struct v4l2_subdev *sd, int enable)
 {
        struct rkisp1_isp *isp = to_rkisp1_isp(sd);
        struct rkisp1_device *rkisp1 = isp->rkisp1;
+       struct v4l2_subdev_state *sd_state;
        struct media_pad *source_pad;
        struct media_pad *sink_pad;
        enum v4l2_mbus_type mbus_type;
@@ -881,21 +827,23 @@ static int rkisp1_isp_s_stream(struct v4l2_subdev *sd, int enable)
        }
 
        isp->frame_sequence = -1;
-       mutex_lock(&isp->ops_lock);
-       ret = rkisp1_config_cif(isp, mbus_type, mbus_flags);
+
+       sd_state = v4l2_subdev_lock_and_get_active_state(sd);
+
+       ret = rkisp1_config_cif(isp, sd_state, mbus_type, mbus_flags);
        if (ret)
-               goto mutex_unlock;
+               goto out_unlock;
 
-       rkisp1_isp_start(isp);
+       rkisp1_isp_start(isp, sd_state);
 
        ret = v4l2_subdev_call(rkisp1->source, video, s_stream, true);
        if (ret) {
                rkisp1_isp_stop(isp);
-               goto mutex_unlock;
+               goto out_unlock;
        }
 
-mutex_unlock:
-       mutex_unlock(&isp->ops_lock);
+out_unlock:
+       v4l2_subdev_unlock_state(sd_state);
        return ret;
 }
 
@@ -933,9 +881,6 @@ static const struct v4l2_subdev_ops rkisp1_isp_ops = {
 
 int rkisp1_isp_register(struct rkisp1_device *rkisp1)
 {
-       struct v4l2_subdev_state state = {
-               .pads = rkisp1->isp.pad_cfg
-       };
        struct rkisp1_isp *isp = &rkisp1->isp;
        struct media_pad *pads = isp->pads;
        struct v4l2_subdev *sd = &isp->sd;
@@ -956,27 +901,26 @@ int rkisp1_isp_register(struct rkisp1_device *rkisp1)
        pads[RKISP1_ISP_PAD_SOURCE_VIDEO].flags = MEDIA_PAD_FL_SOURCE;
        pads[RKISP1_ISP_PAD_SOURCE_STATS].flags = MEDIA_PAD_FL_SOURCE;
 
-       isp->sink_fmt = rkisp1_mbus_info_get_by_code(RKISP1_DEF_SINK_PAD_FMT);
-       isp->src_fmt = rkisp1_mbus_info_get_by_code(RKISP1_DEF_SRC_PAD_FMT);
-
-       mutex_init(&isp->ops_lock);
        ret = media_entity_pads_init(&sd->entity, RKISP1_ISP_PAD_MAX, pads);
        if (ret)
-               goto error;
+               goto err_entity_cleanup;
+
+       ret = v4l2_subdev_init_finalize(sd);
+       if (ret)
+               goto err_subdev_cleanup;
 
        ret = v4l2_device_register_subdev(&rkisp1->v4l2_dev, sd);
        if (ret) {
                dev_err(rkisp1->dev, "Failed to register isp subdev\n");
-               goto error;
+               goto err_subdev_cleanup;
        }
 
-       rkisp1_isp_init_config(sd, &state);
-
        return 0;
 
-error:
+err_subdev_cleanup:
+       v4l2_subdev_cleanup(sd);
+err_entity_cleanup:
        media_entity_cleanup(&sd->entity);
-       mutex_destroy(&isp->ops_lock);
        isp->sd.v4l2_dev = NULL;
        return ret;
 }
@@ -990,7 +934,6 @@ void rkisp1_isp_unregister(struct rkisp1_device *rkisp1)
 
        v4l2_device_unregister_subdev(&isp->sd);
        media_entity_cleanup(&isp->sd.entity);
-       mutex_destroy(&isp->ops_lock);
 }
 
 /* ----------------------------------------------------------------------------
index 3482f7d707b75d0dabcacc4b71bb3348585362cd..173d1ea4187482123232b1540446784f9f198167 100644 (file)
@@ -812,7 +812,7 @@ static void rkisp1_hst_config_v10(struct rkisp1_params *params,
                                                                weight[2], weight[3]));
 
        rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_HIST_WEIGHT_44_V10,
-                    weight[0] & 0x1F);
+                    weight[0] & 0x1f);
 }
 
 static void rkisp1_hst_config_v12(struct rkisp1_params *params,
@@ -1726,7 +1726,7 @@ static const struct rkisp1_params_ops rkisp1_v10_params_ops = {
        .afm_config = rkisp1_afm_config_v10,
 };
 
-static struct rkisp1_params_ops rkisp1_v12_params_ops = {
+static const struct rkisp1_params_ops rkisp1_v12_params_ops = {
        .lsc_matrix_config = rkisp1_lsc_matrix_config_v12,
        .goc_config = rkisp1_goc_config_v12,
        .awb_meas_config = rkisp1_awb_meas_config_v12,
index 421cc73355dbfa3d67da88cdc3cf111ebea3f1af..350f452e676f254aa5b30f17fd74cf6fa3ac848d 100644 (file)
 #define RKISP1_CIF_C_PROC_YOUT_FULL                    BIT(1)
 #define RKISP1_CIF_C_PROC_YIN_FULL                     BIT(2)
 #define RKISP1_CIF_C_PROC_COUT_FULL                    BIT(3)
-#define RKISP1_CIF_C_PROC_CTRL_RESERVED                        0xFFFFFFFE
-#define RKISP1_CIF_C_PROC_CONTRAST_RESERVED            0xFFFFFF00
-#define RKISP1_CIF_C_PROC_BRIGHTNESS_RESERVED          0xFFFFFF00
-#define RKISP1_CIF_C_PROC_HUE_RESERVED                 0xFFFFFF00
-#define RKISP1_CIF_C_PROC_SATURATION_RESERVED          0xFFFFFF00
-#define RKISP1_CIF_C_PROC_MACC_RESERVED                        0xE000E000
-#define RKISP1_CIF_C_PROC_TONE_RESERVED                        0xF000
+#define RKISP1_CIF_C_PROC_CTRL_RESERVED                        0xfffffffe
+#define RKISP1_CIF_C_PROC_CONTRAST_RESERVED            0xffffff00
+#define RKISP1_CIF_C_PROC_BRIGHTNESS_RESERVED          0xffffff00
+#define RKISP1_CIF_C_PROC_HUE_RESERVED                 0xffffff00
+#define RKISP1_CIF_C_PROC_SATURATION_RESERVED          0xffffff00
+#define RKISP1_CIF_C_PROC_MACC_RESERVED                        0xe000e000
+#define RKISP1_CIF_C_PROC_TONE_RESERVED                        0xf000
 /* DUAL_CROP_CTRL */
 #define RKISP1_CIF_DUAL_CROP_MP_MODE_BYPASS            (0 << 0)
 #define RKISP1_CIF_DUAL_CROP_MP_MODE_YUV               (1 << 0)
 #define RKISP1_CIF_IMG_EFF_CTRL_MODE_EMBOSS_SHIFT      4
 #define RKISP1_CIF_IMG_EFF_CTRL_MODE_SKETCH_SHIFT      5
 #define RKISP1_CIF_IMG_EFF_CTRL_MODE_SHARPEN_SHIFT     6
-#define RKISP1_CIF_IMG_EFF_CTRL_MODE_MASK              0xE
+#define RKISP1_CIF_IMG_EFF_CTRL_MODE_MASK              0xe
 
 /* IMG_EFF_COLOR_SEL */
 #define RKISP1_CIF_IMG_EFF_COLOR_RGB                   0
 
 /* MIPI_CTRL */
 #define RKISP1_CIF_MIPI_CTRL_OUTPUT_ENA                        BIT(0)
-#define RKISP1_CIF_MIPI_CTRL_SHUTDOWNLANES(a)          (((a) & 0xF) << 8)
+#define RKISP1_CIF_MIPI_CTRL_SHUTDOWNLANES(a)          (((a) & 0xf) << 8)
 #define RKISP1_CIF_MIPI_CTRL_NUM_LANES(a)              (((a) & 0x3) << 12)
 #define RKISP1_CIF_MIPI_CTRL_ERR_SOT_HS_SKIP           BIT(16)
 #define RKISP1_CIF_MIPI_CTRL_ERR_SOT_SYNC_HS_SKIP      BIT(17)
 
 /* MIPI_DATA_SEL */
 #define RKISP1_CIF_MIPI_DATA_SEL_VC(a)                 (((a) & 0x3) << 6)
-#define RKISP1_CIF_MIPI_DATA_SEL_DT(a)                 (((a) & 0x3F) << 0)
+#define RKISP1_CIF_MIPI_DATA_SEL_DT(a)                 (((a) & 0x3f) << 0)
 
 /* MIPI_IMSC, MIPI_RIS, MIPI_MIS, MIPI_ICR, MIPI_ISR */
-#define RKISP1_CIF_MIPI_SYNC_FIFO_OVFLW(a)             (((a) & 0xF) << 0)
-#define RKISP1_CIF_MIPI_ERR_SOT(a)                     (((a) & 0xF) << 4)
-#define RKISP1_CIF_MIPI_ERR_SOT_SYNC(a)                        (((a) & 0xF) << 8)
-#define RKISP1_CIF_MIPI_ERR_EOT_SYNC(a)                        (((a) & 0xF) << 12)
-#define RKISP1_CIF_MIPI_ERR_CTRL(a)                    (((a) & 0xF) << 16)
+#define RKISP1_CIF_MIPI_SYNC_FIFO_OVFLW(a)             (((a) & 0xf) << 0)
+#define RKISP1_CIF_MIPI_ERR_SOT(a)                     (((a) & 0xf) << 4)
+#define RKISP1_CIF_MIPI_ERR_SOT_SYNC(a)                        (((a) & 0xf) << 8)
+#define RKISP1_CIF_MIPI_ERR_EOT_SYNC(a)                        (((a) & 0xf) << 12)
+#define RKISP1_CIF_MIPI_ERR_CTRL(a)                    (((a) & 0xf) << 16)
 #define RKISP1_CIF_MIPI_ERR_PROTOCOL                   BIT(20)
 #define RKISP1_CIF_MIPI_ERR_ECC1                       BIT(21)
 #define RKISP1_CIF_MIPI_ERR_ECC2                       BIT(22)
 #define RKISP1_CIF_ISP_HIST_PROP_MODE_BLUE_V10         (4 << 0)
 #define RKISP1_CIF_ISP_HIST_PROP_MODE_LUM_V10          (5 << 0)
 #define RKISP1_CIF_ISP_HIST_PROP_MODE_MASK_V10         0x7
-#define RKISP1_CIF_ISP_HIST_PREDIV_SET_V10(x)          (((x) & 0x7F) << 3)
+#define RKISP1_CIF_ISP_HIST_PREDIV_SET_V10(x)          (((x) & 0x7f) << 3)
 #define RKISP1_CIF_ISP_HIST_WEIGHT_SET_V10(v0, v1, v2, v3)     \
-                                    (((v0) & 0x1F) | (((v1) & 0x1F) << 8)  |\
-                                    (((v2) & 0x1F) << 16) | \
-                                    (((v3) & 0x1F) << 24))
-
-#define RKISP1_CIF_ISP_HIST_WINDOW_OFFSET_RESERVED_V10 0xFFFFF000
-#define RKISP1_CIF_ISP_HIST_WINDOW_SIZE_RESERVED_V10   0xFFFFF800
-#define RKISP1_CIF_ISP_HIST_WEIGHT_RESERVED_V10                0xE0E0E0E0
-#define RKISP1_CIF_ISP_MAX_HIST_PREDIVIDER_V10         0x0000007F
+                                    (((v0) & 0x1f) | (((v1) & 0x1f) << 8)  |\
+                                    (((v2) & 0x1f) << 16) | \
+                                    (((v3) & 0x1f) << 24))
+
+#define RKISP1_CIF_ISP_HIST_WINDOW_OFFSET_RESERVED_V10 0xfffff000
+#define RKISP1_CIF_ISP_HIST_WINDOW_SIZE_RESERVED_V10   0xfffff800
+#define RKISP1_CIF_ISP_HIST_WEIGHT_RESERVED_V10                0xe0e0e0e0
+#define RKISP1_CIF_ISP_MAX_HIST_PREDIVIDER_V10         0x0000007f
 #define RKISP1_CIF_ISP_HIST_ROW_NUM_V10                        5
 #define RKISP1_CIF_ISP_HIST_COLUMN_NUM_V10             5
-#define RKISP1_CIF_ISP_HIST_GET_BIN_V10(x)             ((x) & 0x000FFFFF)
+#define RKISP1_CIF_ISP_HIST_GET_BIN_V10(x)             ((x) & 0x000fffff)
 
 /* ISP HISTOGRAM CALCULATION : CIF_ISP_HIST */
 #define RKISP1_CIF_ISP_HIST_CTRL_EN_SET_V12(x)         (((x) & 0x01) << 0)
 #define RKISP1_CIF_ISP_HIST_CTRL_EN_MASK_V12           RKISP1_CIF_ISP_HIST_CTRL_EN_SET_V12(0x01)
-#define RKISP1_CIF_ISP_HIST_CTRL_STEPSIZE_SET_V12(x)   (((x) & 0x7F) << 1)
+#define RKISP1_CIF_ISP_HIST_CTRL_STEPSIZE_SET_V12(x)   (((x) & 0x7f) << 1)
 #define RKISP1_CIF_ISP_HIST_CTRL_MODE_SET_V12(x)       (((x) & 0x07) << 8)
 #define RKISP1_CIF_ISP_HIST_CTRL_MODE_MASK_V12         RKISP1_CIF_ISP_HIST_CTRL_MODE_SET_V12(0x07)
 #define RKISP1_CIF_ISP_HIST_CTRL_AUTOSTOP_SET_V12(x)   (((x) & 0x01) << 11)
-#define RKISP1_CIF_ISP_HIST_CTRL_WATERLINE_SET_V12(x)  (((x) & 0xFFF) << 12)
+#define RKISP1_CIF_ISP_HIST_CTRL_WATERLINE_SET_V12(x)  (((x) & 0xfff) << 12)
 #define RKISP1_CIF_ISP_HIST_CTRL_DATASEL_SET_V12(x)    (((x) & 0x07) << 24)
 #define RKISP1_CIF_ISP_HIST_CTRL_INTRSEL_SET_V12(x)    (((x) & 0x01) << 27)
 #define RKISP1_CIF_ISP_HIST_CTRL_WNDNUM_SET_V12(x)     (((x) & 0x03) << 28)
                                (RKISP1_CIF_ISP_HIST_ROW_NUM_V12 * RKISP1_CIF_ISP_HIST_COLUMN_NUM_V12)
 
 #define RKISP1_CIF_ISP_HIST_WEIGHT_SET_V12(v0, v1, v2, v3)     \
-                               (((v0) & 0x3F) | (((v1) & 0x3F) << 8) |\
-                               (((v2) & 0x3F) << 16) |\
-                               (((v3) & 0x3F) << 24))
+                               (((v0) & 0x3f) | (((v1) & 0x3f) << 8) |\
+                               (((v2) & 0x3f) << 16) |\
+                               (((v3) & 0x3f) << 24))
 
 #define RKISP1_CIF_ISP_HIST_OFFS_SET_V12(v0, v1)       \
-                               (((v0) & 0x1FFF) | (((v1) & 0x1FFF) << 16))
+                               (((v0) & 0x1fff) | (((v1) & 0x1fff) << 16))
 #define RKISP1_CIF_ISP_HIST_SIZE_SET_V12(v0, v1)       \
-                               (((v0) & 0x7FF) | (((v1) & 0x7FF) << 16))
+                               (((v0) & 0x7ff) | (((v1) & 0x7ff) << 16))
 
 #define RKISP1_CIF_ISP_HIST_GET_BIN0_V12(x)    \
-                               ((x) & 0xFFFF)
+                               ((x) & 0xffff)
 #define RKISP1_CIF_ISP_HIST_GET_BIN1_V12(x)    \
-                               (((x) >> 16) & 0xFFFF)
+                               (((x) >> 16) & 0xffff)
 
 /* AUTO FOCUS MEASUREMENT:  ISP_AFM_CTRL */
 #define RKISP1_ISP_AFM_CTRL_ENABLE                     BIT(0)
 #define RKISP1_CIFFLASH_CONFIG_VSYNC_POS               BIT(1)
 #define RKISP1_CIFFLASH_CONFIG_PRELIGHT_LOW            BIT(2)
 #define RKISP1_CIFFLASH_CONFIG_SRC_FL_TRIG             BIT(3)
-#define RKISP1_CIFFLASH_CONFIG_DELAY(a)                        (((a) & 0xF) << 4)
+#define RKISP1_CIFFLASH_CONFIG_DELAY(a)                        (((a) & 0xf) << 4)
 
 /* Demosaic:  ISP_DEMOSAIC */
 #define RKISP1_CIF_ISP_DEMOSAIC_BYPASS                 BIT(10)
-#define RKISP1_CIF_ISP_DEMOSAIC_TH(x)                  ((x) & 0xFF)
+#define RKISP1_CIF_ISP_DEMOSAIC_TH(x)                  ((x) & 0xff)
 
 /* ISP_FLAGS_SHD */
 #define RKISP1_CIF_ISP_FLAGS_SHD_ISP_ENABLE_SHD                BIT(0)
 #define RKISP1_CIF_ISP_AWB_YMAX_READ(x)                        (((x) >> 2) & 1)
 #define RKISP1_CIF_ISP_AWB_MODE_RGB_EN                 ((1 << 31) | (0x2 << 0))
 #define RKISP1_CIF_ISP_AWB_MODE_YCBCR_EN               ((0 << 31) | (0x2 << 0))
-#define RKISP1_CIF_ISP_AWB_MODE_MASK_NONE              0xFFFFFFFC
+#define RKISP1_CIF_ISP_AWB_MODE_MASK_NONE              0xfffffffc
 #define RKISP1_CIF_ISP_AWB_MODE_READ(x)                        ((x) & 3)
 #define RKISP1_CIF_ISP_AWB_SET_FRAMES_V12(x)           (((x) & 0x07) << 28)
 #define RKISP1_CIF_ISP_AWB_SET_FRAMES_MASK_V12         RKISP1_CIF_ISP_AWB_SET_FRAMES_V12(0x07)
 /* ISP_AWB_GAIN_RB, ISP_AWB_GAIN_G  */
-#define RKISP1_CIF_ISP_AWB_GAIN_R_SET(x)               (((x) & 0x3FF) << 16)
-#define RKISP1_CIF_ISP_AWB_GAIN_R_READ(x)              (((x) >> 16) & 0x3FF)
-#define RKISP1_CIF_ISP_AWB_GAIN_B_SET(x)               ((x) & 0x3FFF)
-#define RKISP1_CIF_ISP_AWB_GAIN_B_READ(x)              ((x) & 0x3FFF)
+#define RKISP1_CIF_ISP_AWB_GAIN_R_SET(x)               (((x) & 0x3ff) << 16)
+#define RKISP1_CIF_ISP_AWB_GAIN_R_READ(x)              (((x) >> 16) & 0x3ff)
+#define RKISP1_CIF_ISP_AWB_GAIN_B_SET(x)               ((x) & 0x3fff)
+#define RKISP1_CIF_ISP_AWB_GAIN_B_READ(x)              ((x) & 0x3fff)
 /* ISP_AWB_REF */
-#define RKISP1_CIF_ISP_AWB_REF_CR_SET(x)               (((x) & 0xFF) << 8)
-#define RKISP1_CIF_ISP_AWB_REF_CR_READ(x)              (((x) >> 8) & 0xFF)
-#define RKISP1_CIF_ISP_AWB_REF_CB_READ(x)              ((x) & 0xFF)
+#define RKISP1_CIF_ISP_AWB_REF_CR_SET(x)               (((x) & 0xff) << 8)
+#define RKISP1_CIF_ISP_AWB_REF_CR_READ(x)              (((x) >> 8) & 0xff)
+#define RKISP1_CIF_ISP_AWB_REF_CB_READ(x)              ((x) & 0xff)
 /* ISP_AWB_THRESH */
-#define RKISP1_CIF_ISP_AWB_MAX_CS_SET(x)               (((x) & 0xFF) << 8)
-#define RKISP1_CIF_ISP_AWB_MAX_CS_READ(x)              (((x) >> 8) & 0xFF)
-#define RKISP1_CIF_ISP_AWB_MIN_C_READ(x)               ((x) & 0xFF)
-#define RKISP1_CIF_ISP_AWB_MIN_Y_SET(x)                        (((x) & 0xFF) << 16)
-#define RKISP1_CIF_ISP_AWB_MIN_Y_READ(x)               (((x) >> 16) & 0xFF)
-#define RKISP1_CIF_ISP_AWB_MAX_Y_SET(x)                        (((x) & 0xFF) << 24)
-#define RKISP1_CIF_ISP_AWB_MAX_Y_READ(x)                       (((x) >> 24) & 0xFF)
+#define RKISP1_CIF_ISP_AWB_MAX_CS_SET(x)               (((x) & 0xff) << 8)
+#define RKISP1_CIF_ISP_AWB_MAX_CS_READ(x)              (((x) >> 8) & 0xff)
+#define RKISP1_CIF_ISP_AWB_MIN_C_READ(x)               ((x) & 0xff)
+#define RKISP1_CIF_ISP_AWB_MIN_Y_SET(x)                        (((x) & 0xff) << 16)
+#define RKISP1_CIF_ISP_AWB_MIN_Y_READ(x)               (((x) >> 16) & 0xff)
+#define RKISP1_CIF_ISP_AWB_MAX_Y_SET(x)                        (((x) & 0xff) << 24)
+#define RKISP1_CIF_ISP_AWB_MAX_Y_READ(x)                       (((x) >> 24) & 0xff)
 /* ISP_AWB_MEAN */
-#define RKISP1_CIF_ISP_AWB_GET_MEAN_CR_R(x)            ((x) & 0xFF)
-#define RKISP1_CIF_ISP_AWB_GET_MEAN_CB_B(x)            (((x) >> 8) & 0xFF)
-#define RKISP1_CIF_ISP_AWB_GET_MEAN_Y_G(x)             (((x) >> 16) & 0xFF)
+#define RKISP1_CIF_ISP_AWB_GET_MEAN_CR_R(x)            ((x) & 0xff)
+#define RKISP1_CIF_ISP_AWB_GET_MEAN_CB_B(x)            (((x) >> 8) & 0xff)
+#define RKISP1_CIF_ISP_AWB_GET_MEAN_Y_G(x)             (((x) >> 16) & 0xff)
 /* ISP_AWB_WHITE_CNT */
-#define RKISP1_CIF_ISP_AWB_GET_PIXEL_CNT(x)            ((x) & 0x3FFFFFF)
+#define RKISP1_CIF_ISP_AWB_GET_PIXEL_CNT(x)            ((x) & 0x3ffffff)
 
-#define RKISP1_CIF_ISP_AWB_GAINS_MAX_VAL               0x000003FF
-#define RKISP1_CIF_ISP_AWB_WINDOW_OFFSET_MAX           0x00000FFF
-#define RKISP1_CIF_ISP_AWB_WINDOW_MAX_SIZE             0x00001FFF
-#define RKISP1_CIF_ISP_AWB_CBCR_MAX_REF                        0x000000FF
-#define RKISP1_CIF_ISP_AWB_THRES_MAX_YC                        0x000000FF
+#define RKISP1_CIF_ISP_AWB_GAINS_MAX_VAL               0x000003ff
+#define RKISP1_CIF_ISP_AWB_WINDOW_OFFSET_MAX           0x00000fff
+#define RKISP1_CIF_ISP_AWB_WINDOW_MAX_SIZE             0x00001fff
+#define RKISP1_CIF_ISP_AWB_CBCR_MAX_REF                        0x000000ff
+#define RKISP1_CIF_ISP_AWB_THRES_MAX_YC                        0x000000ff
 
 /* AE */
 /* ISP_EXP_CTRL */
 #define RKISP1_CIF_ISP_EXP_CTRL_MEASMODE_1             BIT(31)
 
 /* ISP_EXP_H_SIZE */
-#define RKISP1_CIF_ISP_EXP_H_SIZE_SET_V10(x)           ((x) & 0x7FF)
-#define RKISP1_CIF_ISP_EXP_HEIGHT_MASK_V10                     0x000007FF
-#define RKISP1_CIF_ISP_EXP_H_SIZE_SET_V12(x)           ((x) & 0x7FF)
-#define RKISP1_CIF_ISP_EXP_HEIGHT_MASK_V12             0x000007FF
+#define RKISP1_CIF_ISP_EXP_H_SIZE_SET_V10(x)           ((x) & 0x7ff)
+#define RKISP1_CIF_ISP_EXP_HEIGHT_MASK_V10                     0x000007ff
+#define RKISP1_CIF_ISP_EXP_H_SIZE_SET_V12(x)           ((x) & 0x7ff)
+#define RKISP1_CIF_ISP_EXP_HEIGHT_MASK_V12             0x000007ff
 /* ISP_EXP_V_SIZE : vertical size must be a multiple of 2). */
-#define RKISP1_CIF_ISP_EXP_V_SIZE_SET_V10(x)           ((x) & 0x7FE)
-#define RKISP1_CIF_ISP_EXP_V_SIZE_SET_V12(x)           (((x) & 0x7FE) << 16)
+#define RKISP1_CIF_ISP_EXP_V_SIZE_SET_V10(x)           ((x) & 0x7fe)
+#define RKISP1_CIF_ISP_EXP_V_SIZE_SET_V12(x)           (((x) & 0x7fe) << 16)
 
 /* ISP_EXP_H_OFFSET */
-#define RKISP1_CIF_ISP_EXP_H_OFFSET_SET_V10(x)         ((x) & 0x1FFF)
+#define RKISP1_CIF_ISP_EXP_H_OFFSET_SET_V10(x)         ((x) & 0x1fff)
 #define RKISP1_CIF_ISP_EXP_MAX_HOFFS_V10               2424
-#define RKISP1_CIF_ISP_EXP_H_OFFSET_SET_V12(x)         ((x) & 0x1FFF)
-#define RKISP1_CIF_ISP_EXP_MAX_HOFFS_V12               0x1FFF
+#define RKISP1_CIF_ISP_EXP_H_OFFSET_SET_V12(x)         ((x) & 0x1fff)
+#define RKISP1_CIF_ISP_EXP_MAX_HOFFS_V12               0x1fff
 /* ISP_EXP_V_OFFSET */
-#define RKISP1_CIF_ISP_EXP_V_OFFSET_SET_V10(x)         ((x) & 0x1FFF)
+#define RKISP1_CIF_ISP_EXP_V_OFFSET_SET_V10(x)         ((x) & 0x1fff)
 #define RKISP1_CIF_ISP_EXP_MAX_VOFFS_V10               1806
-#define RKISP1_CIF_ISP_EXP_V_OFFSET_SET_V12(x)         (((x) & 0x1FFF) << 16)
-#define RKISP1_CIF_ISP_EXP_MAX_VOFFS_V12               0x1FFF
+#define RKISP1_CIF_ISP_EXP_V_OFFSET_SET_V12(x)         (((x) & 0x1fff) << 16)
+#define RKISP1_CIF_ISP_EXP_MAX_VOFFS_V12               0x1fff
 
 #define RKISP1_CIF_ISP_EXP_ROW_NUM_V10                 5
 #define RKISP1_CIF_ISP_EXP_COLUMN_NUM_V10                      5
 #define RKISP1_CIF_ISP_EXP_NUM_LUMA_REGS_V12 \
        (RKISP1_CIF_ISP_EXP_ROW_NUM_V12 * RKISP1_CIF_ISP_EXP_COLUMN_NUM_V12)
 
-#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE_V12         0x7FF
-#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE_V12         0xE
-#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE_V12         0x7FE
-#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE_V12         0xE
+#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE_V12         0x7ff
+#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE_V12         0xe
+#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE_V12         0x7fe
+#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE_V12         0xe
 #define RKISP1_CIF_ISP_EXP_MAX_HSIZE_V12       \
        (RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE_V12 * RKISP1_CIF_ISP_EXP_COLUMN_NUM_V12 + 1)
 #define RKISP1_CIF_ISP_EXP_MIN_HSIZE_V12       \
 #define RKISP1_CIF_ISP_EXP_MIN_VSIZE_V12       \
        (RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE_V12 * RKISP1_CIF_ISP_EXP_ROW_NUM_V12 + 1)
 
-#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy0_V12(x)         ((x) & 0xFF)
-#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy1_V12(x)         (((x) >> 8) & 0xFF)
-#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy2_V12(x)         (((x) >> 16) & 0xFF)
-#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy3_V12(x)         (((x) >> 24) & 0xFF)
+#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy0_V12(x)         ((x) & 0xff)
+#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy1_V12(x)         (((x) >> 8) & 0xff)
+#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy2_V12(x)         (((x) >> 16) & 0xff)
+#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy3_V12(x)         (((x) >> 24) & 0xff)
 
 /* LSC: ISP_LSC_CTRL */
 #define RKISP1_CIF_ISP_LSC_CTRL_ENA                    BIT(0)
-#define RKISP1_CIF_ISP_LSC_SECT_SIZE_RESERVED          0xFC00FC00
-#define RKISP1_CIF_ISP_LSC_GRAD_RESERVED_V10           0xF000F000
-#define RKISP1_CIF_ISP_LSC_SAMPLE_RESERVED_V10         0xF000F000
-#define RKISP1_CIF_ISP_LSC_GRAD_RESERVED_V12           0xE000E000
-#define RKISP1_CIF_ISP_LSC_SAMPLE_RESERVED_V12         0xE000E000
+#define RKISP1_CIF_ISP_LSC_SECT_SIZE_RESERVED          0xfc00fc00
+#define RKISP1_CIF_ISP_LSC_GRAD_RESERVED_V10           0xf000f000
+#define RKISP1_CIF_ISP_LSC_SAMPLE_RESERVED_V10         0xf000f000
+#define RKISP1_CIF_ISP_LSC_GRAD_RESERVED_V12           0xe000e000
+#define RKISP1_CIF_ISP_LSC_SAMPLE_RESERVED_V12         0xe000e000
 #define RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(v0, v1)     \
-       (((v0) & 0xFFF) | (((v1) & 0xFFF) << 12))
+       (((v0) & 0xfff) | (((v1) & 0xfff) << 12))
 #define RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(v0, v1)     \
-       (((v0) & 0x1FFF) | (((v1) & 0x1FFF) << 13))
+       (((v0) & 0x1fff) | (((v1) & 0x1fff) << 13))
 #define RKISP1_CIF_ISP_LSC_SECT_SIZE(v0, v1)      \
-       (((v0) & 0xFFF) | (((v1) & 0xFFF) << 16))
+       (((v0) & 0xfff) | (((v1) & 0xfff) << 16))
 #define RKISP1_CIF_ISP_LSC_SECT_GRAD(v0, v1)      \
-       (((v0) & 0xFFF) | (((v1) & 0xFFF) << 16))
+       (((v0) & 0xfff) | (((v1) & 0xfff) << 16))
 
 /* LSC: ISP_LSC_TABLE_SEL */
 #define RKISP1_CIF_ISP_LSC_TABLE_0                     0
 #define RKISP1_CIF_ISP_FLT_CHROMA_V_MODE(x)            (((x) & 0x3) << 4)
 #define RKISP1_CIF_ISP_FLT_CHROMA_H_MODE(x)            (((x) & 0x3) << 6)
 #define RKISP1_CIF_ISP_FLT_CHROMA_MODE_MAX             3
-#define RKISP1_CIF_ISP_FLT_GREEN_STAGE1(x)             (((x) & 0xF) << 8)
+#define RKISP1_CIF_ISP_FLT_GREEN_STAGE1(x)             (((x) & 0xf) << 8)
 #define RKISP1_CIF_ISP_FLT_GREEN_STAGE1_MAX            8
-#define RKISP1_CIF_ISP_FLT_THREAD_RESERVED             0xFFFFFC00
-#define RKISP1_CIF_ISP_FLT_FAC_RESERVED                        0xFFFFFFC0
-#define RKISP1_CIF_ISP_FLT_LUM_WEIGHT_RESERVED         0xFFF80000
+#define RKISP1_CIF_ISP_FLT_THREAD_RESERVED             0xfffffc00
+#define RKISP1_CIF_ISP_FLT_FAC_RESERVED                        0xffffffc0
+#define RKISP1_CIF_ISP_FLT_LUM_WEIGHT_RESERVED         0xfff80000
 
-#define RKISP1_CIF_ISP_CTK_COEFF_RESERVED              0xFFFFF800
-#define RKISP1_CIF_ISP_XTALK_OFFSET_RESERVED           0xFFFFF000
+#define RKISP1_CIF_ISP_CTK_COEFF_RESERVED              0xfffff800
+#define RKISP1_CIF_ISP_XTALK_OFFSET_RESERVED           0xfffff000
 
 /* GOC */
 #define RKISP1_CIF_ISP_GAMMA_OUT_MODE_EQU              BIT(0)
 #define RKISP1_CIF_ISP_GOC_MODE_MAX                    1
-#define RKISP1_CIF_ISP_GOC_RESERVED                    0xFFFFF800
+#define RKISP1_CIF_ISP_GOC_RESERVED                    0xfffff800
 /* ISP_CTRL BIT 11*/
 #define RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA_READ(x)  (((x) >> 11) & 1)
 
 #define RKISP1_CIFISP_DEGAMMA_X_RESERVED       \
        ((1 << 31) | (1 << 27) | (1 << 23) | (1 << 19) |\
        (1 << 15) | (1 << 11) | (1 << 7) | (1 << 3))
-#define RKISP1_CIFISP_DEGAMMA_Y_RESERVED               0xFFFFF000
+#define RKISP1_CIFISP_DEGAMMA_Y_RESERVED               0xfffff000
 
 /* GAMMA-OUT */
 #define RKISP1_CIF_ISP_GAMMA_VALUE_V12(x, y)   \
-       (((x) & 0xFFF) << 16 | ((y) & 0xFFF) << 0)
+       (((x) & 0xfff) << 16 | ((y) & 0xfff) << 0)
 
 /* AFM */
 #define RKISP1_CIF_ISP_AFM_ENA                         BIT(0)
-#define RKISP1_CIF_ISP_AFM_THRES_RESERVED              0xFFFF0000
-#define RKISP1_CIF_ISP_AFM_VAR_SHIFT_RESERVED          0xFFF8FFF8
-#define RKISP1_CIF_ISP_AFM_WINDOW_X_RESERVED           0xE000
-#define RKISP1_CIF_ISP_AFM_WINDOW_Y_RESERVED           0xF000
+#define RKISP1_CIF_ISP_AFM_THRES_RESERVED              0xffff0000
+#define RKISP1_CIF_ISP_AFM_VAR_SHIFT_RESERVED          0xfff8fff8
+#define RKISP1_CIF_ISP_AFM_WINDOW_X_RESERVED           0xe000
+#define RKISP1_CIF_ISP_AFM_WINDOW_Y_RESERVED           0xf000
 #define RKISP1_CIF_ISP_AFM_WINDOW_X_MIN                        0x5
 #define RKISP1_CIF_ISP_AFM_WINDOW_Y_MIN                        0x2
-#define RKISP1_CIF_ISP_AFM_WINDOW_X(x)                 (((x) & 0x1FFF) << 16)
-#define RKISP1_CIF_ISP_AFM_WINDOW_Y(x)                 ((x) & 0x1FFF)
+#define RKISP1_CIF_ISP_AFM_WINDOW_X(x)                 (((x) & 0x1fff) << 16)
+#define RKISP1_CIF_ISP_AFM_WINDOW_Y(x)                 ((x) & 0x1fff)
 #define RKISP1_CIF_ISP_AFM_SET_SHIFT_a_V12(x, y)       (((x) & 0x7) << 16 | ((y) & 0x7) << 0)
 #define RKISP1_CIF_ISP_AFM_SET_SHIFT_b_V12(x, y)       (((x) & 0x7) << 20 | ((y) & 0x7) << 4)
 #define RKISP1_CIF_ISP_AFM_SET_SHIFT_c_V12(x, y)       (((x) & 0x7) << 24 | ((y) & 0x7) << 8)
 #define RKISP1_CIF_ISP_DPF_MODE_AWB_GAIN_COMP          BIT(7)
 #define RKISP1_CIF_ISP_DPF_MODE_LSC_GAIN_COMP          BIT(8)
 #define RKISP1_CIF_ISP_DPF_MODE_USE_NF_GAIN            BIT(9)
-#define RKISP1_CIF_ISP_DPF_NF_GAIN_RESERVED            0xFFFFF000
-#define RKISP1_CIF_ISP_DPF_SPATIAL_COEFF_MAX           0x1F
-#define RKISP1_CIF_ISP_DPF_NLL_COEFF_N_MAX             0x3FF
+#define RKISP1_CIF_ISP_DPF_NF_GAIN_RESERVED            0xfffff000
+#define RKISP1_CIF_ISP_DPF_SPATIAL_COEFF_MAX           0x1f
+#define RKISP1_CIF_ISP_DPF_NLL_COEFF_N_MAX             0x3ff
 
 /* =================================================================== */
 /*                            CIF Registers                            */
 #define RKISP1_CIF_CTRL_BASE                   0x00000000
 #define RKISP1_CIF_VI_CCL                      (RKISP1_CIF_CTRL_BASE + 0x00000000)
 #define RKISP1_CIF_VI_ID                       (RKISP1_CIF_CTRL_BASE + 0x00000008)
-#define RKISP1_CIF_VI_ISP_CLK_CTRL_V12         (RKISP1_CIF_CTRL_BASE + 0x0000000C)
+#define RKISP1_CIF_VI_ISP_CLK_CTRL_V12         (RKISP1_CIF_CTRL_BASE + 0x0000000c)
 #define RKISP1_CIF_VI_ICCL                     (RKISP1_CIF_CTRL_BASE + 0x00000010)
 #define RKISP1_CIF_VI_IRCL                     (RKISP1_CIF_CTRL_BASE + 0x00000014)
 #define RKISP1_CIF_VI_DPCL                     (RKISP1_CIF_CTRL_BASE + 0x00000018)
 #define RKISP1_CIF_IMG_EFF_CTRL                        (RKISP1_CIF_IMG_EFF_BASE + 0x00000000)
 #define RKISP1_CIF_IMG_EFF_COLOR_SEL           (RKISP1_CIF_IMG_EFF_BASE + 0x00000004)
 #define RKISP1_CIF_IMG_EFF_MAT_1               (RKISP1_CIF_IMG_EFF_BASE + 0x00000008)
-#define RKISP1_CIF_IMG_EFF_MAT_2               (RKISP1_CIF_IMG_EFF_BASE + 0x0000000C)
+#define RKISP1_CIF_IMG_EFF_MAT_2               (RKISP1_CIF_IMG_EFF_BASE + 0x0000000c)
 #define RKISP1_CIF_IMG_EFF_MAT_3               (RKISP1_CIF_IMG_EFF_BASE + 0x00000010)
 #define RKISP1_CIF_IMG_EFF_MAT_4               (RKISP1_CIF_IMG_EFF_BASE + 0x00000014)
 #define RKISP1_CIF_IMG_EFF_MAT_5               (RKISP1_CIF_IMG_EFF_BASE + 0x00000018)
-#define RKISP1_CIF_IMG_EFF_TINT                        (RKISP1_CIF_IMG_EFF_BASE + 0x0000001C)
+#define RKISP1_CIF_IMG_EFF_TINT                        (RKISP1_CIF_IMG_EFF_BASE + 0x0000001c)
 #define RKISP1_CIF_IMG_EFF_CTRL_SHD            (RKISP1_CIF_IMG_EFF_BASE + 0x00000020)
 #define RKISP1_CIF_IMG_EFF_SHARPEN             (RKISP1_CIF_IMG_EFF_BASE + 0x00000024)
 
 #define RKISP1_CIF_SUPER_IMP_CTRL              (RKISP1_CIF_SUPER_IMP_BASE + 0x00000000)
 #define RKISP1_CIF_SUPER_IMP_OFFSET_X          (RKISP1_CIF_SUPER_IMP_BASE + 0x00000004)
 #define RKISP1_CIF_SUPER_IMP_OFFSET_Y          (RKISP1_CIF_SUPER_IMP_BASE + 0x00000008)
-#define RKISP1_CIF_SUPER_IMP_COLOR_Y           (RKISP1_CIF_SUPER_IMP_BASE + 0x0000000C)
+#define RKISP1_CIF_SUPER_IMP_COLOR_Y           (RKISP1_CIF_SUPER_IMP_BASE + 0x0000000c)
 #define RKISP1_CIF_SUPER_IMP_COLOR_CB          (RKISP1_CIF_SUPER_IMP_BASE + 0x00000010)
 #define RKISP1_CIF_SUPER_IMP_COLOR_CR          (RKISP1_CIF_SUPER_IMP_BASE + 0x00000014)
 
 #define RKISP1_CIF_ISP_CTRL                    (RKISP1_CIF_ISP_BASE + 0x00000000)
 #define RKISP1_CIF_ISP_ACQ_PROP                        (RKISP1_CIF_ISP_BASE + 0x00000004)
 #define RKISP1_CIF_ISP_ACQ_H_OFFS              (RKISP1_CIF_ISP_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_ACQ_V_OFFS              (RKISP1_CIF_ISP_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_ACQ_V_OFFS              (RKISP1_CIF_ISP_BASE + 0x0000000c)
 #define RKISP1_CIF_ISP_ACQ_H_SIZE              (RKISP1_CIF_ISP_BASE + 0x00000010)
 #define RKISP1_CIF_ISP_ACQ_V_SIZE              (RKISP1_CIF_ISP_BASE + 0x00000014)
 #define RKISP1_CIF_ISP_ACQ_NR_FRAMES           (RKISP1_CIF_ISP_BASE + 0x00000018)
-#define RKISP1_CIF_ISP_GAMMA_DX_LO             (RKISP1_CIF_ISP_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_GAMMA_DX_LO             (RKISP1_CIF_ISP_BASE + 0x0000001c)
 #define RKISP1_CIF_ISP_GAMMA_DX_HI             (RKISP1_CIF_ISP_BASE + 0x00000020)
 #define RKISP1_CIF_ISP_GAMMA_R_Y0              (RKISP1_CIF_ISP_BASE + 0x00000024)
 #define RKISP1_CIF_ISP_GAMMA_R_Y1              (RKISP1_CIF_ISP_BASE + 0x00000028)
-#define RKISP1_CIF_ISP_GAMMA_R_Y2              (RKISP1_CIF_ISP_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_GAMMA_R_Y2              (RKISP1_CIF_ISP_BASE + 0x0000002c)
 #define RKISP1_CIF_ISP_GAMMA_R_Y3              (RKISP1_CIF_ISP_BASE + 0x00000030)
 #define RKISP1_CIF_ISP_GAMMA_R_Y4              (RKISP1_CIF_ISP_BASE + 0x00000034)
 #define RKISP1_CIF_ISP_GAMMA_R_Y5              (RKISP1_CIF_ISP_BASE + 0x00000038)
-#define RKISP1_CIF_ISP_GAMMA_R_Y6              (RKISP1_CIF_ISP_BASE + 0x0000003C)
+#define RKISP1_CIF_ISP_GAMMA_R_Y6              (RKISP1_CIF_ISP_BASE + 0x0000003c)
 #define RKISP1_CIF_ISP_GAMMA_R_Y7              (RKISP1_CIF_ISP_BASE + 0x00000040)
 #define RKISP1_CIF_ISP_GAMMA_R_Y8              (RKISP1_CIF_ISP_BASE + 0x00000044)
 #define RKISP1_CIF_ISP_GAMMA_R_Y9              (RKISP1_CIF_ISP_BASE + 0x00000048)
-#define RKISP1_CIF_ISP_GAMMA_R_Y10             (RKISP1_CIF_ISP_BASE + 0x0000004C)
+#define RKISP1_CIF_ISP_GAMMA_R_Y10             (RKISP1_CIF_ISP_BASE + 0x0000004c)
 #define RKISP1_CIF_ISP_GAMMA_R_Y11             (RKISP1_CIF_ISP_BASE + 0x00000050)
 #define RKISP1_CIF_ISP_GAMMA_R_Y12             (RKISP1_CIF_ISP_BASE + 0x00000054)
 #define RKISP1_CIF_ISP_GAMMA_R_Y13             (RKISP1_CIF_ISP_BASE + 0x00000058)
-#define RKISP1_CIF_ISP_GAMMA_R_Y14             (RKISP1_CIF_ISP_BASE + 0x0000005C)
+#define RKISP1_CIF_ISP_GAMMA_R_Y14             (RKISP1_CIF_ISP_BASE + 0x0000005c)
 #define RKISP1_CIF_ISP_GAMMA_R_Y15             (RKISP1_CIF_ISP_BASE + 0x00000060)
 #define RKISP1_CIF_ISP_GAMMA_R_Y16             (RKISP1_CIF_ISP_BASE + 0x00000064)
 #define RKISP1_CIF_ISP_GAMMA_G_Y0              (RKISP1_CIF_ISP_BASE + 0x00000068)
-#define RKISP1_CIF_ISP_GAMMA_G_Y1              (RKISP1_CIF_ISP_BASE + 0x0000006C)
+#define RKISP1_CIF_ISP_GAMMA_G_Y1              (RKISP1_CIF_ISP_BASE + 0x0000006c)
 #define RKISP1_CIF_ISP_GAMMA_G_Y2              (RKISP1_CIF_ISP_BASE + 0x00000070)
 #define RKISP1_CIF_ISP_GAMMA_G_Y3              (RKISP1_CIF_ISP_BASE + 0x00000074)
 #define RKISP1_CIF_ISP_GAMMA_G_Y4              (RKISP1_CIF_ISP_BASE + 0x00000078)
-#define RKISP1_CIF_ISP_GAMMA_G_Y5              (RKISP1_CIF_ISP_BASE + 0x0000007C)
+#define RKISP1_CIF_ISP_GAMMA_G_Y5              (RKISP1_CIF_ISP_BASE + 0x0000007c)
 #define RKISP1_CIF_ISP_GAMMA_G_Y6              (RKISP1_CIF_ISP_BASE + 0x00000080)
 #define RKISP1_CIF_ISP_GAMMA_G_Y7              (RKISP1_CIF_ISP_BASE + 0x00000084)
 #define RKISP1_CIF_ISP_GAMMA_G_Y8              (RKISP1_CIF_ISP_BASE + 0x00000088)
-#define RKISP1_CIF_ISP_GAMMA_G_Y9              (RKISP1_CIF_ISP_BASE + 0x0000008C)
+#define RKISP1_CIF_ISP_GAMMA_G_Y9              (RKISP1_CIF_ISP_BASE + 0x0000008c)
 #define RKISP1_CIF_ISP_GAMMA_G_Y10             (RKISP1_CIF_ISP_BASE + 0x00000090)
 #define RKISP1_CIF_ISP_GAMMA_G_Y11             (RKISP1_CIF_ISP_BASE + 0x00000094)
 #define RKISP1_CIF_ISP_GAMMA_G_Y12             (RKISP1_CIF_ISP_BASE + 0x00000098)
-#define RKISP1_CIF_ISP_GAMMA_G_Y13             (RKISP1_CIF_ISP_BASE + 0x0000009C)
-#define RKISP1_CIF_ISP_GAMMA_G_Y14             (RKISP1_CIF_ISP_BASE + 0x000000A0)
-#define RKISP1_CIF_ISP_GAMMA_G_Y15             (RKISP1_CIF_ISP_BASE + 0x000000A4)
-#define RKISP1_CIF_ISP_GAMMA_G_Y16             (RKISP1_CIF_ISP_BASE + 0x000000A8)
-#define RKISP1_CIF_ISP_GAMMA_B_Y0              (RKISP1_CIF_ISP_BASE + 0x000000AC)
-#define RKISP1_CIF_ISP_GAMMA_B_Y1              (RKISP1_CIF_ISP_BASE + 0x000000B0)
-#define RKISP1_CIF_ISP_GAMMA_B_Y2              (RKISP1_CIF_ISP_BASE + 0x000000B4)
-#define RKISP1_CIF_ISP_GAMMA_B_Y3              (RKISP1_CIF_ISP_BASE + 0x000000B8)
-#define RKISP1_CIF_ISP_GAMMA_B_Y4              (RKISP1_CIF_ISP_BASE + 0x000000BC)
-#define RKISP1_CIF_ISP_GAMMA_B_Y5              (RKISP1_CIF_ISP_BASE + 0x000000C0)
-#define RKISP1_CIF_ISP_GAMMA_B_Y6              (RKISP1_CIF_ISP_BASE + 0x000000C4)
-#define RKISP1_CIF_ISP_GAMMA_B_Y7              (RKISP1_CIF_ISP_BASE + 0x000000C8)
-#define RKISP1_CIF_ISP_GAMMA_B_Y8              (RKISP1_CIF_ISP_BASE + 0x000000CC)
-#define RKISP1_CIF_ISP_GAMMA_B_Y9              (RKISP1_CIF_ISP_BASE + 0x000000D0)
-#define RKISP1_CIF_ISP_GAMMA_B_Y10             (RKISP1_CIF_ISP_BASE + 0x000000D4)
-#define RKISP1_CIF_ISP_GAMMA_B_Y11             (RKISP1_CIF_ISP_BASE + 0x000000D8)
-#define RKISP1_CIF_ISP_GAMMA_B_Y12             (RKISP1_CIF_ISP_BASE + 0x000000DC)
-#define RKISP1_CIF_ISP_GAMMA_B_Y13             (RKISP1_CIF_ISP_BASE + 0x000000E0)
-#define RKISP1_CIF_ISP_GAMMA_B_Y14             (RKISP1_CIF_ISP_BASE + 0x000000E4)
-#define RKISP1_CIF_ISP_GAMMA_B_Y15             (RKISP1_CIF_ISP_BASE + 0x000000E8)
-#define RKISP1_CIF_ISP_GAMMA_B_Y16             (RKISP1_CIF_ISP_BASE + 0x000000EC)
+#define RKISP1_CIF_ISP_GAMMA_G_Y13             (RKISP1_CIF_ISP_BASE + 0x0000009c)
+#define RKISP1_CIF_ISP_GAMMA_G_Y14             (RKISP1_CIF_ISP_BASE + 0x000000a0)
+#define RKISP1_CIF_ISP_GAMMA_G_Y15             (RKISP1_CIF_ISP_BASE + 0x000000a4)
+#define RKISP1_CIF_ISP_GAMMA_G_Y16             (RKISP1_CIF_ISP_BASE + 0x000000a8)
+#define RKISP1_CIF_ISP_GAMMA_B_Y0              (RKISP1_CIF_ISP_BASE + 0x000000ac)
+#define RKISP1_CIF_ISP_GAMMA_B_Y1              (RKISP1_CIF_ISP_BASE + 0x000000b0)
+#define RKISP1_CIF_ISP_GAMMA_B_Y2              (RKISP1_CIF_ISP_BASE + 0x000000b4)
+#define RKISP1_CIF_ISP_GAMMA_B_Y3              (RKISP1_CIF_ISP_BASE + 0x000000b8)
+#define RKISP1_CIF_ISP_GAMMA_B_Y4              (RKISP1_CIF_ISP_BASE + 0x000000bc)
+#define RKISP1_CIF_ISP_GAMMA_B_Y5              (RKISP1_CIF_ISP_BASE + 0x000000c0)
+#define RKISP1_CIF_ISP_GAMMA_B_Y6              (RKISP1_CIF_ISP_BASE + 0x000000c4)
+#define RKISP1_CIF_ISP_GAMMA_B_Y7              (RKISP1_CIF_ISP_BASE + 0x000000c8)
+#define RKISP1_CIF_ISP_GAMMA_B_Y8              (RKISP1_CIF_ISP_BASE + 0x000000cc)
+#define RKISP1_CIF_ISP_GAMMA_B_Y9              (RKISP1_CIF_ISP_BASE + 0x000000d0)
+#define RKISP1_CIF_ISP_GAMMA_B_Y10             (RKISP1_CIF_ISP_BASE + 0x000000d4)
+#define RKISP1_CIF_ISP_GAMMA_B_Y11             (RKISP1_CIF_ISP_BASE + 0x000000d8)
+#define RKISP1_CIF_ISP_GAMMA_B_Y12             (RKISP1_CIF_ISP_BASE + 0x000000dc)
+#define RKISP1_CIF_ISP_GAMMA_B_Y13             (RKISP1_CIF_ISP_BASE + 0x000000e0)
+#define RKISP1_CIF_ISP_GAMMA_B_Y14             (RKISP1_CIF_ISP_BASE + 0x000000e4)
+#define RKISP1_CIF_ISP_GAMMA_B_Y15             (RKISP1_CIF_ISP_BASE + 0x000000e8)
+#define RKISP1_CIF_ISP_GAMMA_B_Y16             (RKISP1_CIF_ISP_BASE + 0x000000ec)
 #define RKISP1_CIF_ISP_AWB_PROP_V10            (RKISP1_CIF_ISP_BASE + 0x00000110)
 #define RKISP1_CIF_ISP_AWB_WND_H_OFFS_V10      (RKISP1_CIF_ISP_BASE + 0x00000114)
 #define RKISP1_CIF_ISP_AWB_WND_V_OFFS_V10      (RKISP1_CIF_ISP_BASE + 0x00000118)
-#define RKISP1_CIF_ISP_AWB_WND_H_SIZE_V10      (RKISP1_CIF_ISP_BASE + 0x0000011C)
+#define RKISP1_CIF_ISP_AWB_WND_H_SIZE_V10      (RKISP1_CIF_ISP_BASE + 0x0000011c)
 #define RKISP1_CIF_ISP_AWB_WND_V_SIZE_V10      (RKISP1_CIF_ISP_BASE + 0x00000120)
 #define RKISP1_CIF_ISP_AWB_FRAMES_V10          (RKISP1_CIF_ISP_BASE + 0x00000124)
 #define RKISP1_CIF_ISP_AWB_REF_V10             (RKISP1_CIF_ISP_BASE + 0x00000128)
-#define RKISP1_CIF_ISP_AWB_THRESH_V10          (RKISP1_CIF_ISP_BASE + 0x0000012C)
+#define RKISP1_CIF_ISP_AWB_THRESH_V10          (RKISP1_CIF_ISP_BASE + 0x0000012c)
 #define RKISP1_CIF_ISP_AWB_GAIN_G_V10          (RKISP1_CIF_ISP_BASE + 0x00000138)
-#define RKISP1_CIF_ISP_AWB_GAIN_RB_V10         (RKISP1_CIF_ISP_BASE + 0x0000013C)
+#define RKISP1_CIF_ISP_AWB_GAIN_RB_V10         (RKISP1_CIF_ISP_BASE + 0x0000013c)
 #define RKISP1_CIF_ISP_AWB_WHITE_CNT_V10       (RKISP1_CIF_ISP_BASE + 0x00000140)
 #define RKISP1_CIF_ISP_AWB_MEAN_V10            (RKISP1_CIF_ISP_BASE + 0x00000144)
 #define RKISP1_CIF_ISP_AWB_PROP_V12            (RKISP1_CIF_ISP_BASE + 0x00000110)
 #define RKISP1_CIF_ISP_AWB_SIZE_V12            (RKISP1_CIF_ISP_BASE + 0x00000114)
 #define RKISP1_CIF_ISP_AWB_OFFS_V12            (RKISP1_CIF_ISP_BASE + 0x00000118)
-#define RKISP1_CIF_ISP_AWB_REF_V12             (RKISP1_CIF_ISP_BASE + 0x0000011C)
+#define RKISP1_CIF_ISP_AWB_REF_V12             (RKISP1_CIF_ISP_BASE + 0x0000011c)
 #define RKISP1_CIF_ISP_AWB_THRESH_V12          (RKISP1_CIF_ISP_BASE + 0x00000120)
 #define RKISP1_CIF_ISP_X_COOR12_V12            (RKISP1_CIF_ISP_BASE + 0x00000124)
 #define RKISP1_CIF_ISP_X_COOR34_V12            (RKISP1_CIF_ISP_BASE + 0x00000128)
-#define RKISP1_CIF_ISP_AWB_WHITE_CNT_V12       (RKISP1_CIF_ISP_BASE + 0x0000012C)
+#define RKISP1_CIF_ISP_AWB_WHITE_CNT_V12       (RKISP1_CIF_ISP_BASE + 0x0000012c)
 #define RKISP1_CIF_ISP_AWB_MEAN_V12            (RKISP1_CIF_ISP_BASE + 0x00000130)
 #define RKISP1_CIF_ISP_DEGAIN_V12              (RKISP1_CIF_ISP_BASE + 0x00000134)
 #define RKISP1_CIF_ISP_AWB_GAIN_G_V12          (RKISP1_CIF_ISP_BASE + 0x00000138)
-#define RKISP1_CIF_ISP_AWB_GAIN_RB_V12         (RKISP1_CIF_ISP_BASE + 0x0000013C)
+#define RKISP1_CIF_ISP_AWB_GAIN_RB_V12         (RKISP1_CIF_ISP_BASE + 0x0000013c)
 #define RKISP1_CIF_ISP_REGION_LINE_V12         (RKISP1_CIF_ISP_BASE + 0x00000140)
 #define RKISP1_CIF_ISP_WP_CNT_REGION0_V12      (RKISP1_CIF_ISP_BASE + 0x00000160)
 #define RKISP1_CIF_ISP_WP_CNT_REGION1_V12      (RKISP1_CIF_ISP_BASE + 0x00000164)
 #define RKISP1_CIF_ISP_WP_CNT_REGION2_V12      (RKISP1_CIF_ISP_BASE + 0x00000168)
-#define RKISP1_CIF_ISP_WP_CNT_REGION3_V12      (RKISP1_CIF_ISP_BASE + 0x0000016C)
+#define RKISP1_CIF_ISP_WP_CNT_REGION3_V12      (RKISP1_CIF_ISP_BASE + 0x0000016c)
 #define RKISP1_CIF_ISP_CC_COEFF_0              (RKISP1_CIF_ISP_BASE + 0x00000170)
 #define RKISP1_CIF_ISP_CC_COEFF_1              (RKISP1_CIF_ISP_BASE + 0x00000174)
 #define RKISP1_CIF_ISP_CC_COEFF_2              (RKISP1_CIF_ISP_BASE + 0x00000178)
-#define RKISP1_CIF_ISP_CC_COEFF_3              (RKISP1_CIF_ISP_BASE + 0x0000017C)
+#define RKISP1_CIF_ISP_CC_COEFF_3              (RKISP1_CIF_ISP_BASE + 0x0000017c)
 #define RKISP1_CIF_ISP_CC_COEFF_4              (RKISP1_CIF_ISP_BASE + 0x00000180)
 #define RKISP1_CIF_ISP_CC_COEFF_5              (RKISP1_CIF_ISP_BASE + 0x00000184)
 #define RKISP1_CIF_ISP_CC_COEFF_6              (RKISP1_CIF_ISP_BASE + 0x00000188)
-#define RKISP1_CIF_ISP_CC_COEFF_7              (RKISP1_CIF_ISP_BASE + 0x0000018C)
+#define RKISP1_CIF_ISP_CC_COEFF_7              (RKISP1_CIF_ISP_BASE + 0x0000018c)
 #define RKISP1_CIF_ISP_CC_COEFF_8              (RKISP1_CIF_ISP_BASE + 0x00000190)
 #define RKISP1_CIF_ISP_OUT_H_OFFS              (RKISP1_CIF_ISP_BASE + 0x00000194)
 #define RKISP1_CIF_ISP_OUT_V_OFFS              (RKISP1_CIF_ISP_BASE + 0x00000198)
-#define RKISP1_CIF_ISP_OUT_H_SIZE              (RKISP1_CIF_ISP_BASE + 0x0000019C)
-#define RKISP1_CIF_ISP_OUT_V_SIZE              (RKISP1_CIF_ISP_BASE + 0x000001A0)
-#define RKISP1_CIF_ISP_DEMOSAIC                        (RKISP1_CIF_ISP_BASE + 0x000001A4)
-#define RKISP1_CIF_ISP_FLAGS_SHD               (RKISP1_CIF_ISP_BASE + 0x000001A8)
-#define RKISP1_CIF_ISP_OUT_H_OFFS_SHD          (RKISP1_CIF_ISP_BASE + 0x000001AC)
-#define RKISP1_CIF_ISP_OUT_V_OFFS_SHD          (RKISP1_CIF_ISP_BASE + 0x000001B0)
-#define RKISP1_CIF_ISP_OUT_H_SIZE_SHD          (RKISP1_CIF_ISP_BASE + 0x000001B4)
-#define RKISP1_CIF_ISP_OUT_V_SIZE_SHD          (RKISP1_CIF_ISP_BASE + 0x000001B8)
-#define RKISP1_CIF_ISP_IMSC                    (RKISP1_CIF_ISP_BASE + 0x000001BC)
-#define RKISP1_CIF_ISP_RIS                     (RKISP1_CIF_ISP_BASE + 0x000001C0)
-#define RKISP1_CIF_ISP_MIS                     (RKISP1_CIF_ISP_BASE + 0x000001C4)
-#define RKISP1_CIF_ISP_ICR                     (RKISP1_CIF_ISP_BASE + 0x000001C8)
-#define RKISP1_CIF_ISP_ISR                     (RKISP1_CIF_ISP_BASE + 0x000001CC)
-#define RKISP1_CIF_ISP_CT_COEFF_0              (RKISP1_CIF_ISP_BASE + 0x000001D0)
-#define RKISP1_CIF_ISP_CT_COEFF_1              (RKISP1_CIF_ISP_BASE + 0x000001D4)
-#define RKISP1_CIF_ISP_CT_COEFF_2              (RKISP1_CIF_ISP_BASE + 0x000001D8)
-#define RKISP1_CIF_ISP_CT_COEFF_3              (RKISP1_CIF_ISP_BASE + 0x000001DC)
-#define RKISP1_CIF_ISP_CT_COEFF_4              (RKISP1_CIF_ISP_BASE + 0x000001E0)
-#define RKISP1_CIF_ISP_CT_COEFF_5              (RKISP1_CIF_ISP_BASE + 0x000001E4)
-#define RKISP1_CIF_ISP_CT_COEFF_6              (RKISP1_CIF_ISP_BASE + 0x000001E8)
-#define RKISP1_CIF_ISP_CT_COEFF_7              (RKISP1_CIF_ISP_BASE + 0x000001EC)
-#define RKISP1_CIF_ISP_CT_COEFF_8              (RKISP1_CIF_ISP_BASE + 0x000001F0)
-#define RKISP1_CIF_ISP_GAMMA_OUT_MODE_V10      (RKISP1_CIF_ISP_BASE + 0x000001F4)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V10       (RKISP1_CIF_ISP_BASE + 0x000001F8)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_1_V10       (RKISP1_CIF_ISP_BASE + 0x000001FC)
+#define RKISP1_CIF_ISP_OUT_H_SIZE              (RKISP1_CIF_ISP_BASE + 0x0000019c)
+#define RKISP1_CIF_ISP_OUT_V_SIZE              (RKISP1_CIF_ISP_BASE + 0x000001a0)
+#define RKISP1_CIF_ISP_DEMOSAIC                        (RKISP1_CIF_ISP_BASE + 0x000001a4)
+#define RKISP1_CIF_ISP_FLAGS_SHD               (RKISP1_CIF_ISP_BASE + 0x000001a8)
+#define RKISP1_CIF_ISP_OUT_H_OFFS_SHD          (RKISP1_CIF_ISP_BASE + 0x000001ac)
+#define RKISP1_CIF_ISP_OUT_V_OFFS_SHD          (RKISP1_CIF_ISP_BASE + 0x000001b0)
+#define RKISP1_CIF_ISP_OUT_H_SIZE_SHD          (RKISP1_CIF_ISP_BASE + 0x000001b4)
+#define RKISP1_CIF_ISP_OUT_V_SIZE_SHD          (RKISP1_CIF_ISP_BASE + 0x000001b8)
+#define RKISP1_CIF_ISP_IMSC                    (RKISP1_CIF_ISP_BASE + 0x000001bc)
+#define RKISP1_CIF_ISP_RIS                     (RKISP1_CIF_ISP_BASE + 0x000001c0)
+#define RKISP1_CIF_ISP_MIS                     (RKISP1_CIF_ISP_BASE + 0x000001c4)
+#define RKISP1_CIF_ISP_ICR                     (RKISP1_CIF_ISP_BASE + 0x000001c8)
+#define RKISP1_CIF_ISP_ISR                     (RKISP1_CIF_ISP_BASE + 0x000001cc)
+#define RKISP1_CIF_ISP_CT_COEFF_0              (RKISP1_CIF_ISP_BASE + 0x000001d0)
+#define RKISP1_CIF_ISP_CT_COEFF_1              (RKISP1_CIF_ISP_BASE + 0x000001d4)
+#define RKISP1_CIF_ISP_CT_COEFF_2              (RKISP1_CIF_ISP_BASE + 0x000001d8)
+#define RKISP1_CIF_ISP_CT_COEFF_3              (RKISP1_CIF_ISP_BASE + 0x000001dc)
+#define RKISP1_CIF_ISP_CT_COEFF_4              (RKISP1_CIF_ISP_BASE + 0x000001e0)
+#define RKISP1_CIF_ISP_CT_COEFF_5              (RKISP1_CIF_ISP_BASE + 0x000001e4)
+#define RKISP1_CIF_ISP_CT_COEFF_6              (RKISP1_CIF_ISP_BASE + 0x000001e8)
+#define RKISP1_CIF_ISP_CT_COEFF_7              (RKISP1_CIF_ISP_BASE + 0x000001ec)
+#define RKISP1_CIF_ISP_CT_COEFF_8              (RKISP1_CIF_ISP_BASE + 0x000001f0)
+#define RKISP1_CIF_ISP_GAMMA_OUT_MODE_V10      (RKISP1_CIF_ISP_BASE + 0x000001f4)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V10       (RKISP1_CIF_ISP_BASE + 0x000001f8)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_1_V10       (RKISP1_CIF_ISP_BASE + 0x000001fc)
 #define RKISP1_CIF_ISP_GAMMA_OUT_Y_2_V10       (RKISP1_CIF_ISP_BASE + 0x00000200)
 #define RKISP1_CIF_ISP_GAMMA_OUT_Y_3_V10       (RKISP1_CIF_ISP_BASE + 0x00000204)
 #define RKISP1_CIF_ISP_GAMMA_OUT_Y_4_V10       (RKISP1_CIF_ISP_BASE + 0x00000208)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_5_V10       (RKISP1_CIF_ISP_BASE + 0x0000020C)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_5_V10       (RKISP1_CIF_ISP_BASE + 0x0000020c)
 #define RKISP1_CIF_ISP_GAMMA_OUT_Y_6_V10       (RKISP1_CIF_ISP_BASE + 0x00000210)
 #define RKISP1_CIF_ISP_GAMMA_OUT_Y_7_V10       (RKISP1_CIF_ISP_BASE + 0x00000214)
 #define RKISP1_CIF_ISP_GAMMA_OUT_Y_8_V10       (RKISP1_CIF_ISP_BASE + 0x00000218)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_9_V10       (RKISP1_CIF_ISP_BASE + 0x0000021C)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_9_V10       (RKISP1_CIF_ISP_BASE + 0x0000021c)
 #define RKISP1_CIF_ISP_GAMMA_OUT_Y_10_V10      (RKISP1_CIF_ISP_BASE + 0x00000220)
 #define RKISP1_CIF_ISP_GAMMA_OUT_Y_11_V10      (RKISP1_CIF_ISP_BASE + 0x00000224)
 #define RKISP1_CIF_ISP_GAMMA_OUT_Y_12_V10      (RKISP1_CIF_ISP_BASE + 0x00000228)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_13_V10      (RKISP1_CIF_ISP_BASE + 0x0000022C)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_13_V10      (RKISP1_CIF_ISP_BASE + 0x0000022c)
 #define RKISP1_CIF_ISP_GAMMA_OUT_Y_14_V10      (RKISP1_CIF_ISP_BASE + 0x00000230)
 #define RKISP1_CIF_ISP_GAMMA_OUT_Y_15_V10      (RKISP1_CIF_ISP_BASE + 0x00000234)
 #define RKISP1_CIF_ISP_GAMMA_OUT_Y_16_V10      (RKISP1_CIF_ISP_BASE + 0x00000238)
-#define RKISP1_CIF_ISP_ERR                     (RKISP1_CIF_ISP_BASE + 0x0000023C)
+#define RKISP1_CIF_ISP_ERR                     (RKISP1_CIF_ISP_BASE + 0x0000023c)
 #define RKISP1_CIF_ISP_ERR_CLR                 (RKISP1_CIF_ISP_BASE + 0x00000240)
 #define RKISP1_CIF_ISP_FRAME_COUNT             (RKISP1_CIF_ISP_BASE + 0x00000244)
 #define RKISP1_CIF_ISP_CT_OFFSET_R             (RKISP1_CIF_ISP_BASE + 0x00000248)
-#define RKISP1_CIF_ISP_CT_OFFSET_G             (RKISP1_CIF_ISP_BASE + 0x0000024C)
+#define RKISP1_CIF_ISP_CT_OFFSET_G             (RKISP1_CIF_ISP_BASE + 0x0000024c)
 #define RKISP1_CIF_ISP_CT_OFFSET_B             (RKISP1_CIF_ISP_BASE + 0x00000250)
 #define RKISP1_CIF_ISP_GAMMA_OUT_MODE_V12      (RKISP1_CIF_ISP_BASE + 0x00000300)
 #define RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V12       (RKISP1_CIF_ISP_BASE + 0x00000304)
 #define RKISP1_CIF_ISP_FLASH_CMD               (RKISP1_CIF_ISP_FLASH_BASE + 0x00000000)
 #define RKISP1_CIF_ISP_FLASH_CONFIG            (RKISP1_CIF_ISP_FLASH_BASE + 0x00000004)
 #define RKISP1_CIF_ISP_FLASH_PREDIV            (RKISP1_CIF_ISP_FLASH_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_FLASH_DELAY             (RKISP1_CIF_ISP_FLASH_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_FLASH_DELAY             (RKISP1_CIF_ISP_FLASH_BASE + 0x0000000c)
 #define RKISP1_CIF_ISP_FLASH_TIME              (RKISP1_CIF_ISP_FLASH_BASE + 0x00000010)
 #define RKISP1_CIF_ISP_FLASH_MAXP              (RKISP1_CIF_ISP_FLASH_BASE + 0x00000014)
 
 #define RKISP1_CIF_ISP_SH_CTRL                 (RKISP1_CIF_ISP_SH_BASE + 0x00000000)
 #define RKISP1_CIF_ISP_SH_PREDIV               (RKISP1_CIF_ISP_SH_BASE + 0x00000004)
 #define RKISP1_CIF_ISP_SH_DELAY                        (RKISP1_CIF_ISP_SH_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_SH_TIME                 (RKISP1_CIF_ISP_SH_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_SH_TIME                 (RKISP1_CIF_ISP_SH_BASE + 0x0000000c)
 
 #define RKISP1_CIF_C_PROC_BASE                 0x00000800
 #define RKISP1_CIF_C_PROC_CTRL                 (RKISP1_CIF_C_PROC_BASE + 0x00000000)
 #define RKISP1_CIF_C_PROC_CONTRAST             (RKISP1_CIF_C_PROC_BASE + 0x00000004)
 #define RKISP1_CIF_C_PROC_BRIGHTNESS           (RKISP1_CIF_C_PROC_BASE + 0x00000008)
-#define RKISP1_CIF_C_PROC_SATURATION           (RKISP1_CIF_C_PROC_BASE + 0x0000000C)
+#define RKISP1_CIF_C_PROC_SATURATION           (RKISP1_CIF_C_PROC_BASE + 0x0000000c)
 #define RKISP1_CIF_C_PROC_HUE                  (RKISP1_CIF_C_PROC_BASE + 0x00000010)
 
 #define RKISP1_CIF_DUAL_CROP_BASE              0x00000880
 #define RKISP1_CIF_DUAL_CROP_CTRL              (RKISP1_CIF_DUAL_CROP_BASE + 0x00000000)
 #define RKISP1_CIF_DUAL_CROP_M_H_OFFS          (RKISP1_CIF_DUAL_CROP_BASE + 0x00000004)
 #define RKISP1_CIF_DUAL_CROP_M_V_OFFS          (RKISP1_CIF_DUAL_CROP_BASE + 0x00000008)
-#define RKISP1_CIF_DUAL_CROP_M_H_SIZE          (RKISP1_CIF_DUAL_CROP_BASE + 0x0000000C)
+#define RKISP1_CIF_DUAL_CROP_M_H_SIZE          (RKISP1_CIF_DUAL_CROP_BASE + 0x0000000c)
 #define RKISP1_CIF_DUAL_CROP_M_V_SIZE          (RKISP1_CIF_DUAL_CROP_BASE + 0x00000010)
 #define RKISP1_CIF_DUAL_CROP_S_H_OFFS          (RKISP1_CIF_DUAL_CROP_BASE + 0x00000014)
 #define RKISP1_CIF_DUAL_CROP_S_V_OFFS          (RKISP1_CIF_DUAL_CROP_BASE + 0x00000018)
-#define RKISP1_CIF_DUAL_CROP_S_H_SIZE          (RKISP1_CIF_DUAL_CROP_BASE + 0x0000001C)
+#define RKISP1_CIF_DUAL_CROP_S_H_SIZE          (RKISP1_CIF_DUAL_CROP_BASE + 0x0000001c)
 #define RKISP1_CIF_DUAL_CROP_S_V_SIZE          (RKISP1_CIF_DUAL_CROP_BASE + 0x00000020)
 #define RKISP1_CIF_DUAL_CROP_M_H_OFFS_SHD      (RKISP1_CIF_DUAL_CROP_BASE + 0x00000024)
 #define RKISP1_CIF_DUAL_CROP_M_V_OFFS_SHD      (RKISP1_CIF_DUAL_CROP_BASE + 0x00000028)
-#define RKISP1_CIF_DUAL_CROP_M_H_SIZE_SHD      (RKISP1_CIF_DUAL_CROP_BASE + 0x0000002C)
+#define RKISP1_CIF_DUAL_CROP_M_H_SIZE_SHD      (RKISP1_CIF_DUAL_CROP_BASE + 0x0000002c)
 #define RKISP1_CIF_DUAL_CROP_M_V_SIZE_SHD      (RKISP1_CIF_DUAL_CROP_BASE + 0x00000030)
 #define RKISP1_CIF_DUAL_CROP_S_H_OFFS_SHD      (RKISP1_CIF_DUAL_CROP_BASE + 0x00000034)
 #define RKISP1_CIF_DUAL_CROP_S_V_OFFS_SHD      (RKISP1_CIF_DUAL_CROP_BASE + 0x00000038)
-#define RKISP1_CIF_DUAL_CROP_S_H_SIZE_SHD      (RKISP1_CIF_DUAL_CROP_BASE + 0x0000003C)
+#define RKISP1_CIF_DUAL_CROP_S_H_SIZE_SHD      (RKISP1_CIF_DUAL_CROP_BASE + 0x0000003c)
 #define RKISP1_CIF_DUAL_CROP_S_V_SIZE_SHD      (RKISP1_CIF_DUAL_CROP_BASE + 0x00000040)
 
-#define RKISP1_CIF_MRSZ_BASE                   0x00000C00
+#define RKISP1_CIF_MRSZ_BASE                   0x00000c00
 #define RKISP1_CIF_SRSZ_BASE                   0x00001000
 #define RKISP1_CIF_RSZ_CTRL                    0x0000
 #define RKISP1_CIF_RSZ_SCALE_HY                        0x0004
 #define RKISP1_CIF_RSZ_SCALE_HCB               0x0008
-#define RKISP1_CIF_RSZ_SCALE_HCR               0x000C
+#define RKISP1_CIF_RSZ_SCALE_HCR               0x000c
 #define RKISP1_CIF_RSZ_SCALE_VY                        0x0010
 #define RKISP1_CIF_RSZ_SCALE_VC                        0x0014
 #define RKISP1_CIF_RSZ_PHASE_HY                        0x0018
-#define RKISP1_CIF_RSZ_PHASE_HC                        0x001C
+#define RKISP1_CIF_RSZ_PHASE_HC                        0x001c
 #define RKISP1_CIF_RSZ_PHASE_VY                        0x0020
 #define RKISP1_CIF_RSZ_PHASE_VC                        0x0024
 #define RKISP1_CIF_RSZ_SCALE_LUT_ADDR          0x0028
-#define RKISP1_CIF_RSZ_SCALE_LUT               0x002C
+#define RKISP1_CIF_RSZ_SCALE_LUT               0x002c
 #define RKISP1_CIF_RSZ_CTRL_SHD                        0x0030
 #define RKISP1_CIF_RSZ_SCALE_HY_SHD            0x0034
 #define RKISP1_CIF_RSZ_SCALE_HCB_SHD           0x0038
-#define RKISP1_CIF_RSZ_SCALE_HCR_SHD           0x003C
+#define RKISP1_CIF_RSZ_SCALE_HCR_SHD           0x003c
 #define RKISP1_CIF_RSZ_SCALE_VY_SHD            0x0040
 #define RKISP1_CIF_RSZ_SCALE_VC_SHD            0x0044
 #define RKISP1_CIF_RSZ_PHASE_HY_SHD            0x0048
-#define RKISP1_CIF_RSZ_PHASE_HC_SHD            0x004C
+#define RKISP1_CIF_RSZ_PHASE_HC_SHD            0x004c
 #define RKISP1_CIF_RSZ_PHASE_VY_SHD            0x0050
 #define RKISP1_CIF_RSZ_PHASE_VC_SHD            0x0054
 
 #define RKISP1_CIF_MI_CTRL                     (RKISP1_CIF_MI_BASE + 0x00000000)
 #define RKISP1_CIF_MI_INIT                     (RKISP1_CIF_MI_BASE + 0x00000004)
 #define RKISP1_CIF_MI_MP_Y_BASE_AD_INIT                (RKISP1_CIF_MI_BASE + 0x00000008)
-#define RKISP1_CIF_MI_MP_Y_SIZE_INIT           (RKISP1_CIF_MI_BASE + 0x0000000C)
+#define RKISP1_CIF_MI_MP_Y_SIZE_INIT           (RKISP1_CIF_MI_BASE + 0x0000000c)
 #define RKISP1_CIF_MI_MP_Y_OFFS_CNT_INIT       (RKISP1_CIF_MI_BASE + 0x00000010)
 #define RKISP1_CIF_MI_MP_Y_OFFS_CNT_START      (RKISP1_CIF_MI_BASE + 0x00000014)
 #define RKISP1_CIF_MI_MP_Y_IRQ_OFFS_INIT       (RKISP1_CIF_MI_BASE + 0x00000018)
-#define RKISP1_CIF_MI_MP_CB_BASE_AD_INIT       (RKISP1_CIF_MI_BASE + 0x0000001C)
+#define RKISP1_CIF_MI_MP_CB_BASE_AD_INIT       (RKISP1_CIF_MI_BASE + 0x0000001c)
 #define RKISP1_CIF_MI_MP_CB_SIZE_INIT          (RKISP1_CIF_MI_BASE + 0x00000020)
 #define RKISP1_CIF_MI_MP_CB_OFFS_CNT_INIT      (RKISP1_CIF_MI_BASE + 0x00000024)
 #define RKISP1_CIF_MI_MP_CB_OFFS_CNT_START     (RKISP1_CIF_MI_BASE + 0x00000028)
-#define RKISP1_CIF_MI_MP_CR_BASE_AD_INIT       (RKISP1_CIF_MI_BASE + 0x0000002C)
+#define RKISP1_CIF_MI_MP_CR_BASE_AD_INIT       (RKISP1_CIF_MI_BASE + 0x0000002c)
 #define RKISP1_CIF_MI_MP_CR_SIZE_INIT          (RKISP1_CIF_MI_BASE + 0x00000030)
 #define RKISP1_CIF_MI_MP_CR_OFFS_CNT_INIT      (RKISP1_CIF_MI_BASE + 0x00000034)
 #define RKISP1_CIF_MI_MP_CR_OFFS_CNT_START     (RKISP1_CIF_MI_BASE + 0x00000038)
-#define RKISP1_CIF_MI_SP_Y_BASE_AD_INIT                (RKISP1_CIF_MI_BASE + 0x0000003C)
+#define RKISP1_CIF_MI_SP_Y_BASE_AD_INIT                (RKISP1_CIF_MI_BASE + 0x0000003c)
 #define RKISP1_CIF_MI_SP_Y_SIZE_INIT           (RKISP1_CIF_MI_BASE + 0x00000040)
 #define RKISP1_CIF_MI_SP_Y_OFFS_CNT_INIT       (RKISP1_CIF_MI_BASE + 0x00000044)
 #define RKISP1_CIF_MI_SP_Y_OFFS_CNT_START      (RKISP1_CIF_MI_BASE + 0x00000048)
-#define RKISP1_CIF_MI_SP_Y_LLENGTH             (RKISP1_CIF_MI_BASE + 0x0000004C)
+#define RKISP1_CIF_MI_SP_Y_LLENGTH             (RKISP1_CIF_MI_BASE + 0x0000004c)
 #define RKISP1_CIF_MI_SP_CB_BASE_AD_INIT       (RKISP1_CIF_MI_BASE + 0x00000050)
 #define RKISP1_CIF_MI_SP_CB_SIZE_INIT          (RKISP1_CIF_MI_BASE + 0x00000054)
 #define RKISP1_CIF_MI_SP_CB_OFFS_CNT_INIT      (RKISP1_CIF_MI_BASE + 0x00000058)
-#define RKISP1_CIF_MI_SP_CB_OFFS_CNT_START     (RKISP1_CIF_MI_BASE + 0x0000005C)
+#define RKISP1_CIF_MI_SP_CB_OFFS_CNT_START     (RKISP1_CIF_MI_BASE + 0x0000005c)
 #define RKISP1_CIF_MI_SP_CR_BASE_AD_INIT       (RKISP1_CIF_MI_BASE + 0x00000060)
 #define RKISP1_CIF_MI_SP_CR_SIZE_INIT          (RKISP1_CIF_MI_BASE + 0x00000064)
 #define RKISP1_CIF_MI_SP_CR_OFFS_CNT_INIT      (RKISP1_CIF_MI_BASE + 0x00000068)
-#define RKISP1_CIF_MI_SP_CR_OFFS_CNT_START     (RKISP1_CIF_MI_BASE + 0x0000006C)
+#define RKISP1_CIF_MI_SP_CR_OFFS_CNT_START     (RKISP1_CIF_MI_BASE + 0x0000006c)
 #define RKISP1_CIF_MI_BYTE_CNT                 (RKISP1_CIF_MI_BASE + 0x00000070)
 #define RKISP1_CIF_MI_CTRL_SHD                 (RKISP1_CIF_MI_BASE + 0x00000074)
 #define RKISP1_CIF_MI_MP_Y_BASE_AD_SHD         (RKISP1_CIF_MI_BASE + 0x00000078)
-#define RKISP1_CIF_MI_MP_Y_SIZE_SHD            (RKISP1_CIF_MI_BASE + 0x0000007C)
+#define RKISP1_CIF_MI_MP_Y_SIZE_SHD            (RKISP1_CIF_MI_BASE + 0x0000007c)
 #define RKISP1_CIF_MI_MP_Y_OFFS_CNT_SHD                (RKISP1_CIF_MI_BASE + 0x00000080)
 #define RKISP1_CIF_MI_MP_Y_IRQ_OFFS_SHD                (RKISP1_CIF_MI_BASE + 0x00000084)
 #define RKISP1_CIF_MI_MP_CB_BASE_AD_SHD                (RKISP1_CIF_MI_BASE + 0x00000088)
-#define RKISP1_CIF_MI_MP_CB_SIZE_SHD           (RKISP1_CIF_MI_BASE + 0x0000008C)
+#define RKISP1_CIF_MI_MP_CB_SIZE_SHD           (RKISP1_CIF_MI_BASE + 0x0000008c)
 #define RKISP1_CIF_MI_MP_CB_OFFS_CNT_SHD       (RKISP1_CIF_MI_BASE + 0x00000090)
 #define RKISP1_CIF_MI_MP_CR_BASE_AD_SHD                (RKISP1_CIF_MI_BASE + 0x00000094)
 #define RKISP1_CIF_MI_MP_CR_SIZE_SHD           (RKISP1_CIF_MI_BASE + 0x00000098)
-#define RKISP1_CIF_MI_MP_CR_OFFS_CNT_SHD       (RKISP1_CIF_MI_BASE + 0x0000009C)
-#define RKISP1_CIF_MI_SP_Y_BASE_AD_SHD         (RKISP1_CIF_MI_BASE + 0x000000A0)
-#define RKISP1_CIF_MI_SP_Y_SIZE_SHD            (RKISP1_CIF_MI_BASE + 0x000000A4)
-#define RKISP1_CIF_MI_SP_Y_OFFS_CNT_SHD                (RKISP1_CIF_MI_BASE + 0x000000A8)
-#define RKISP1_CIF_MI_SP_CB_BASE_AD_SHD                (RKISP1_CIF_MI_BASE + 0x000000B0)
-#define RKISP1_CIF_MI_SP_CB_SIZE_SHD           (RKISP1_CIF_MI_BASE + 0x000000B4)
-#define RKISP1_CIF_MI_SP_CB_OFFS_CNT_SHD       (RKISP1_CIF_MI_BASE + 0x000000B8)
-#define RKISP1_CIF_MI_SP_CR_BASE_AD_SHD                (RKISP1_CIF_MI_BASE + 0x000000BC)
-#define RKISP1_CIF_MI_SP_CR_SIZE_SHD           (RKISP1_CIF_MI_BASE + 0x000000C0)
-#define RKISP1_CIF_MI_SP_CR_OFFS_CNT_SHD       (RKISP1_CIF_MI_BASE + 0x000000C4)
-#define RKISP1_CIF_MI_DMA_Y_PIC_START_AD       (RKISP1_CIF_MI_BASE + 0x000000C8)
-#define RKISP1_CIF_MI_DMA_Y_PIC_WIDTH          (RKISP1_CIF_MI_BASE + 0x000000CC)
-#define RKISP1_CIF_MI_DMA_Y_LLENGTH            (RKISP1_CIF_MI_BASE + 0x000000D0)
-#define RKISP1_CIF_MI_DMA_Y_PIC_SIZE           (RKISP1_CIF_MI_BASE + 0x000000D4)
-#define RKISP1_CIF_MI_DMA_CB_PIC_START_AD      (RKISP1_CIF_MI_BASE + 0x000000D8)
-#define RKISP1_CIF_MI_DMA_CR_PIC_START_AD      (RKISP1_CIF_MI_BASE + 0x000000E8)
-#define RKISP1_CIF_MI_IMSC                     (RKISP1_CIF_MI_BASE + 0x000000F8)
-#define RKISP1_CIF_MI_RIS                      (RKISP1_CIF_MI_BASE + 0x000000FC)
+#define RKISP1_CIF_MI_MP_CR_OFFS_CNT_SHD       (RKISP1_CIF_MI_BASE + 0x0000009c)
+#define RKISP1_CIF_MI_SP_Y_BASE_AD_SHD         (RKISP1_CIF_MI_BASE + 0x000000a0)
+#define RKISP1_CIF_MI_SP_Y_SIZE_SHD            (RKISP1_CIF_MI_BASE + 0x000000a4)
+#define RKISP1_CIF_MI_SP_Y_OFFS_CNT_SHD                (RKISP1_CIF_MI_BASE + 0x000000a8)
+#define RKISP1_CIF_MI_SP_CB_BASE_AD_SHD                (RKISP1_CIF_MI_BASE + 0x000000b0)
+#define RKISP1_CIF_MI_SP_CB_SIZE_SHD           (RKISP1_CIF_MI_BASE + 0x000000b4)
+#define RKISP1_CIF_MI_SP_CB_OFFS_CNT_SHD       (RKISP1_CIF_MI_BASE + 0x000000b8)
+#define RKISP1_CIF_MI_SP_CR_BASE_AD_SHD                (RKISP1_CIF_MI_BASE + 0x000000bc)
+#define RKISP1_CIF_MI_SP_CR_SIZE_SHD           (RKISP1_CIF_MI_BASE + 0x000000c0)
+#define RKISP1_CIF_MI_SP_CR_OFFS_CNT_SHD       (RKISP1_CIF_MI_BASE + 0x000000c4)
+#define RKISP1_CIF_MI_DMA_Y_PIC_START_AD       (RKISP1_CIF_MI_BASE + 0x000000c8)
+#define RKISP1_CIF_MI_DMA_Y_PIC_WIDTH          (RKISP1_CIF_MI_BASE + 0x000000cc)
+#define RKISP1_CIF_MI_DMA_Y_LLENGTH            (RKISP1_CIF_MI_BASE + 0x000000d0)
+#define RKISP1_CIF_MI_DMA_Y_PIC_SIZE           (RKISP1_CIF_MI_BASE + 0x000000d4)
+#define RKISP1_CIF_MI_DMA_CB_PIC_START_AD      (RKISP1_CIF_MI_BASE + 0x000000d8)
+#define RKISP1_CIF_MI_DMA_CR_PIC_START_AD      (RKISP1_CIF_MI_BASE + 0x000000e8)
+#define RKISP1_CIF_MI_IMSC                     (RKISP1_CIF_MI_BASE + 0x000000f8)
+#define RKISP1_CIF_MI_RIS                      (RKISP1_CIF_MI_BASE + 0x000000fc)
 #define RKISP1_CIF_MI_MIS                      (RKISP1_CIF_MI_BASE + 0x00000100)
 #define RKISP1_CIF_MI_ICR                      (RKISP1_CIF_MI_BASE + 0x00000104)
 #define RKISP1_CIF_MI_ISR                      (RKISP1_CIF_MI_BASE + 0x00000108)
-#define RKISP1_CIF_MI_STATUS                   (RKISP1_CIF_MI_BASE + 0x0000010C)
+#define RKISP1_CIF_MI_STATUS                   (RKISP1_CIF_MI_BASE + 0x0000010c)
 #define RKISP1_CIF_MI_STATUS_CLR               (RKISP1_CIF_MI_BASE + 0x00000110)
 #define RKISP1_CIF_MI_SP_Y_PIC_WIDTH           (RKISP1_CIF_MI_BASE + 0x00000114)
 #define RKISP1_CIF_MI_SP_Y_PIC_HEIGHT          (RKISP1_CIF_MI_BASE + 0x00000118)
-#define RKISP1_CIF_MI_SP_Y_PIC_SIZE            (RKISP1_CIF_MI_BASE + 0x0000011C)
+#define RKISP1_CIF_MI_SP_Y_PIC_SIZE            (RKISP1_CIF_MI_BASE + 0x0000011c)
 #define RKISP1_CIF_MI_DMA_CTRL                 (RKISP1_CIF_MI_BASE + 0x00000120)
 #define RKISP1_CIF_MI_DMA_START                        (RKISP1_CIF_MI_BASE + 0x00000124)
 #define RKISP1_CIF_MI_DMA_STATUS               (RKISP1_CIF_MI_BASE + 0x00000128)
-#define RKISP1_CIF_MI_PIXEL_COUNT              (RKISP1_CIF_MI_BASE + 0x0000012C)
+#define RKISP1_CIF_MI_PIXEL_COUNT              (RKISP1_CIF_MI_BASE + 0x0000012c)
 #define RKISP1_CIF_MI_MP_Y_BASE_AD_INIT2       (RKISP1_CIF_MI_BASE + 0x00000130)
 #define RKISP1_CIF_MI_MP_CB_BASE_AD_INIT2      (RKISP1_CIF_MI_BASE + 0x00000134)
 #define RKISP1_CIF_MI_MP_CR_BASE_AD_INIT2      (RKISP1_CIF_MI_BASE + 0x00000138)
-#define RKISP1_CIF_MI_SP_Y_BASE_AD_INIT2       (RKISP1_CIF_MI_BASE + 0x0000013C)
+#define RKISP1_CIF_MI_SP_Y_BASE_AD_INIT2       (RKISP1_CIF_MI_BASE + 0x0000013c)
 #define RKISP1_CIF_MI_SP_CB_BASE_AD_INIT2      (RKISP1_CIF_MI_BASE + 0x00000140)
 #define RKISP1_CIF_MI_SP_CR_BASE_AD_INIT2      (RKISP1_CIF_MI_BASE + 0x00000144)
 #define RKISP1_CIF_MI_XTD_FORMAT_CTRL          (RKISP1_CIF_MI_BASE + 0x00000148)
 
-#define RKISP1_CIF_SMIA_BASE                   0x00001A00
+#define RKISP1_CIF_SMIA_BASE                   0x00001a00
 #define RKISP1_CIF_SMIA_CTRL                   (RKISP1_CIF_SMIA_BASE + 0x00000000)
 #define RKISP1_CIF_SMIA_STATUS                 (RKISP1_CIF_SMIA_BASE + 0x00000004)
 #define RKISP1_CIF_SMIA_IMSC                   (RKISP1_CIF_SMIA_BASE + 0x00000008)
-#define RKISP1_CIF_SMIA_RIS                    (RKISP1_CIF_SMIA_BASE + 0x0000000C)
+#define RKISP1_CIF_SMIA_RIS                    (RKISP1_CIF_SMIA_BASE + 0x0000000c)
 #define RKISP1_CIF_SMIA_MIS                    (RKISP1_CIF_SMIA_BASE + 0x00000010)
 #define RKISP1_CIF_SMIA_ICR                    (RKISP1_CIF_SMIA_BASE + 0x00000014)
 #define RKISP1_CIF_SMIA_ISR                    (RKISP1_CIF_SMIA_BASE + 0x00000018)
-#define RKISP1_CIF_SMIA_DATA_FORMAT_SEL                (RKISP1_CIF_SMIA_BASE + 0x0000001C)
+#define RKISP1_CIF_SMIA_DATA_FORMAT_SEL                (RKISP1_CIF_SMIA_BASE + 0x0000001c)
 #define RKISP1_CIF_SMIA_SOF_EMB_DATA_LINES     (RKISP1_CIF_SMIA_BASE + 0x00000020)
 #define RKISP1_CIF_SMIA_EMB_HSTART             (RKISP1_CIF_SMIA_BASE + 0x00000024)
 #define RKISP1_CIF_SMIA_EMB_HSIZE              (RKISP1_CIF_SMIA_BASE + 0x00000028)
 #define RKISP1_CIF_SMIA_EMB_DATA_FIFO          (RKISP1_CIF_SMIA_BASE + 0x00000034)
 #define RKISP1_CIF_SMIA_EMB_DATA_WATERMARK     (RKISP1_CIF_SMIA_BASE + 0x00000038)
 
-#define RKISP1_CIF_MIPI_BASE                   0x00001C00
+#define RKISP1_CIF_MIPI_BASE                   0x00001c00
 #define RKISP1_CIF_MIPI_CTRL                   (RKISP1_CIF_MIPI_BASE + 0x00000000)
 #define RKISP1_CIF_MIPI_STATUS                 (RKISP1_CIF_MIPI_BASE + 0x00000004)
 #define RKISP1_CIF_MIPI_IMSC                   (RKISP1_CIF_MIPI_BASE + 0x00000008)
-#define RKISP1_CIF_MIPI_RIS                    (RKISP1_CIF_MIPI_BASE + 0x0000000C)
+#define RKISP1_CIF_MIPI_RIS                    (RKISP1_CIF_MIPI_BASE + 0x0000000c)
 #define RKISP1_CIF_MIPI_MIS                    (RKISP1_CIF_MIPI_BASE + 0x00000010)
 #define RKISP1_CIF_MIPI_ICR                    (RKISP1_CIF_MIPI_BASE + 0x00000014)
 #define RKISP1_CIF_MIPI_ISR                    (RKISP1_CIF_MIPI_BASE + 0x00000018)
-#define RKISP1_CIF_MIPI_CUR_DATA_ID            (RKISP1_CIF_MIPI_BASE + 0x0000001C)
+#define RKISP1_CIF_MIPI_CUR_DATA_ID            (RKISP1_CIF_MIPI_BASE + 0x0000001c)
 #define RKISP1_CIF_MIPI_IMG_DATA_SEL           (RKISP1_CIF_MIPI_BASE + 0x00000020)
 #define RKISP1_CIF_MIPI_ADD_DATA_SEL_1         (RKISP1_CIF_MIPI_BASE + 0x00000024)
 #define RKISP1_CIF_MIPI_ADD_DATA_SEL_2         (RKISP1_CIF_MIPI_BASE + 0x00000028)
-#define RKISP1_CIF_MIPI_ADD_DATA_SEL_3         (RKISP1_CIF_MIPI_BASE + 0x0000002C)
+#define RKISP1_CIF_MIPI_ADD_DATA_SEL_3         (RKISP1_CIF_MIPI_BASE + 0x0000002c)
 #define RKISP1_CIF_MIPI_ADD_DATA_SEL_4         (RKISP1_CIF_MIPI_BASE + 0x00000030)
 #define RKISP1_CIF_MIPI_ADD_DATA_FIFO          (RKISP1_CIF_MIPI_BASE + 0x00000034)
 #define RKISP1_CIF_MIPI_FIFO_FILL_LEVEL                (RKISP1_CIF_MIPI_BASE + 0x00000038)
-#define RKISP1_CIF_MIPI_COMPRESSED_MODE                (RKISP1_CIF_MIPI_BASE + 0x0000003C)
+#define RKISP1_CIF_MIPI_COMPRESSED_MODE                (RKISP1_CIF_MIPI_BASE + 0x0000003c)
 #define RKISP1_CIF_MIPI_FRAME                  (RKISP1_CIF_MIPI_BASE + 0x00000040)
 #define RKISP1_CIF_MIPI_GEN_SHORT_DT           (RKISP1_CIF_MIPI_BASE + 0x00000044)
 #define RKISP1_CIF_MIPI_GEN_SHORT_8_9          (RKISP1_CIF_MIPI_BASE + 0x00000048)
-#define RKISP1_CIF_MIPI_GEN_SHORT_A_B          (RKISP1_CIF_MIPI_BASE + 0x0000004C)
+#define RKISP1_CIF_MIPI_GEN_SHORT_A_B          (RKISP1_CIF_MIPI_BASE + 0x0000004c)
 #define RKISP1_CIF_MIPI_GEN_SHORT_C_D          (RKISP1_CIF_MIPI_BASE + 0x00000050)
 #define RKISP1_CIF_MIPI_GEN_SHORT_E_F          (RKISP1_CIF_MIPI_BASE + 0x00000054)
 
 #define RKISP1_CIF_ISP_AFM_CTRL                        (RKISP1_CIF_ISP_AFM_BASE + 0x00000000)
 #define RKISP1_CIF_ISP_AFM_LT_A                        (RKISP1_CIF_ISP_AFM_BASE + 0x00000004)
 #define RKISP1_CIF_ISP_AFM_RB_A                        (RKISP1_CIF_ISP_AFM_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_AFM_LT_B                        (RKISP1_CIF_ISP_AFM_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_AFM_LT_B                        (RKISP1_CIF_ISP_AFM_BASE + 0x0000000c)
 #define RKISP1_CIF_ISP_AFM_RB_B                        (RKISP1_CIF_ISP_AFM_BASE + 0x00000010)
 #define RKISP1_CIF_ISP_AFM_LT_C                        (RKISP1_CIF_ISP_AFM_BASE + 0x00000014)
 #define RKISP1_CIF_ISP_AFM_RB_C                        (RKISP1_CIF_ISP_AFM_BASE + 0x00000018)
-#define RKISP1_CIF_ISP_AFM_THRES               (RKISP1_CIF_ISP_AFM_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_AFM_THRES               (RKISP1_CIF_ISP_AFM_BASE + 0x0000001c)
 #define RKISP1_CIF_ISP_AFM_VAR_SHIFT           (RKISP1_CIF_ISP_AFM_BASE + 0x00000020)
 #define RKISP1_CIF_ISP_AFM_SUM_A               (RKISP1_CIF_ISP_AFM_BASE + 0x00000024)
 #define RKISP1_CIF_ISP_AFM_SUM_B               (RKISP1_CIF_ISP_AFM_BASE + 0x00000028)
-#define RKISP1_CIF_ISP_AFM_SUM_C               (RKISP1_CIF_ISP_AFM_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_AFM_SUM_C               (RKISP1_CIF_ISP_AFM_BASE + 0x0000002c)
 #define RKISP1_CIF_ISP_AFM_LUM_A               (RKISP1_CIF_ISP_AFM_BASE + 0x00000030)
 #define RKISP1_CIF_ISP_AFM_LUM_B               (RKISP1_CIF_ISP_AFM_BASE + 0x00000034)
 #define RKISP1_CIF_ISP_AFM_LUM_C               (RKISP1_CIF_ISP_AFM_BASE + 0x00000038)
 #define RKISP1_CIF_ISP_LSC_CTRL                        (RKISP1_CIF_ISP_LSC_BASE + 0x00000000)
 #define RKISP1_CIF_ISP_LSC_R_TABLE_ADDR                (RKISP1_CIF_ISP_LSC_BASE + 0x00000004)
 #define RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR       (RKISP1_CIF_ISP_LSC_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_LSC_B_TABLE_ADDR                (RKISP1_CIF_ISP_LSC_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_LSC_B_TABLE_ADDR                (RKISP1_CIF_ISP_LSC_BASE + 0x0000000c)
 #define RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR       (RKISP1_CIF_ISP_LSC_BASE + 0x00000010)
 #define RKISP1_CIF_ISP_LSC_R_TABLE_DATA                (RKISP1_CIF_ISP_LSC_BASE + 0x00000014)
 #define RKISP1_CIF_ISP_LSC_GR_TABLE_DATA       (RKISP1_CIF_ISP_LSC_BASE + 0x00000018)
-#define RKISP1_CIF_ISP_LSC_B_TABLE_DATA                (RKISP1_CIF_ISP_LSC_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_LSC_B_TABLE_DATA                (RKISP1_CIF_ISP_LSC_BASE + 0x0000001c)
 #define RKISP1_CIF_ISP_LSC_GB_TABLE_DATA       (RKISP1_CIF_ISP_LSC_BASE + 0x00000020)
 #define RKISP1_CIF_ISP_LSC_XGRAD(n)            (RKISP1_CIF_ISP_LSC_BASE + 0x00000024 + (n) * 4)
 #define RKISP1_CIF_ISP_LSC_YGRAD(n)            (RKISP1_CIF_ISP_LSC_BASE + 0x00000034 + (n) * 4)
 #define RKISP1_CIF_ISP_IS_CTRL                 (RKISP1_CIF_ISP_IS_BASE + 0x00000000)
 #define RKISP1_CIF_ISP_IS_RECENTER             (RKISP1_CIF_ISP_IS_BASE + 0x00000004)
 #define RKISP1_CIF_ISP_IS_H_OFFS               (RKISP1_CIF_ISP_IS_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_IS_V_OFFS               (RKISP1_CIF_ISP_IS_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_IS_V_OFFS               (RKISP1_CIF_ISP_IS_BASE + 0x0000000c)
 #define RKISP1_CIF_ISP_IS_H_SIZE               (RKISP1_CIF_ISP_IS_BASE + 0x00000010)
 #define RKISP1_CIF_ISP_IS_V_SIZE               (RKISP1_CIF_ISP_IS_BASE + 0x00000014)
 #define RKISP1_CIF_ISP_IS_MAX_DX               (RKISP1_CIF_ISP_IS_BASE + 0x00000018)
-#define RKISP1_CIF_ISP_IS_MAX_DY               (RKISP1_CIF_ISP_IS_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_IS_MAX_DY               (RKISP1_CIF_ISP_IS_BASE + 0x0000001c)
 #define RKISP1_CIF_ISP_IS_DISPLACE             (RKISP1_CIF_ISP_IS_BASE + 0x00000020)
 #define RKISP1_CIF_ISP_IS_H_OFFS_SHD           (RKISP1_CIF_ISP_IS_BASE + 0x00000024)
 #define RKISP1_CIF_ISP_IS_V_OFFS_SHD           (RKISP1_CIF_ISP_IS_BASE + 0x00000028)
-#define RKISP1_CIF_ISP_IS_H_SIZE_SHD           (RKISP1_CIF_ISP_IS_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_IS_H_SIZE_SHD           (RKISP1_CIF_ISP_IS_BASE + 0x0000002c)
 #define RKISP1_CIF_ISP_IS_V_SIZE_SHD           (RKISP1_CIF_ISP_IS_BASE + 0x00000030)
 
 #define RKISP1_CIF_ISP_HIST_BASE_V10           0x00002400
 #define RKISP1_CIF_ISP_HIST_PROP_V10           (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000000)
 #define RKISP1_CIF_ISP_HIST_H_OFFS_V10         (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000004)
 #define RKISP1_CIF_ISP_HIST_V_OFFS_V10         (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000008)
-#define RKISP1_CIF_ISP_HIST_H_SIZE_V10         (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000000C)
+#define RKISP1_CIF_ISP_HIST_H_SIZE_V10         (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000000c)
 #define RKISP1_CIF_ISP_HIST_V_SIZE_V10         (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000010)
 #define RKISP1_CIF_ISP_HIST_BIN_0_V10          (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000014)
 #define RKISP1_CIF_ISP_HIST_BIN_1_V10          (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000018)
-#define RKISP1_CIF_ISP_HIST_BIN_2_V10          (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000001C)
+#define RKISP1_CIF_ISP_HIST_BIN_2_V10          (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000001c)
 #define RKISP1_CIF_ISP_HIST_BIN_3_V10          (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000020)
 #define RKISP1_CIF_ISP_HIST_BIN_4_V10          (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000024)
 #define RKISP1_CIF_ISP_HIST_BIN_5_V10          (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000028)
-#define RKISP1_CIF_ISP_HIST_BIN_6_V10          (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000002C)
+#define RKISP1_CIF_ISP_HIST_BIN_6_V10          (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000002c)
 #define RKISP1_CIF_ISP_HIST_BIN_7_V10          (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000030)
 #define RKISP1_CIF_ISP_HIST_BIN_8_V10          (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000034)
 #define RKISP1_CIF_ISP_HIST_BIN_9_V10          (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000038)
-#define RKISP1_CIF_ISP_HIST_BIN_10_V10         (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000003C)
+#define RKISP1_CIF_ISP_HIST_BIN_10_V10         (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000003c)
 #define RKISP1_CIF_ISP_HIST_BIN_11_V10         (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000040)
 #define RKISP1_CIF_ISP_HIST_BIN_12_V10         (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000044)
 #define RKISP1_CIF_ISP_HIST_BIN_13_V10         (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000048)
-#define RKISP1_CIF_ISP_HIST_BIN_14_V10         (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000004C)
+#define RKISP1_CIF_ISP_HIST_BIN_14_V10         (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000004c)
 #define RKISP1_CIF_ISP_HIST_BIN_15_V10         (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000050)
 #define RKISP1_CIF_ISP_HIST_WEIGHT_00TO30_V10  (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000054)
 #define RKISP1_CIF_ISP_HIST_WEIGHT_40TO21_V10  (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000058)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_31TO12_V10  (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000005C)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_31TO12_V10  (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000005c)
 #define RKISP1_CIF_ISP_HIST_WEIGHT_22TO03_V10  (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000060)
 #define RKISP1_CIF_ISP_HIST_WEIGHT_13TO43_V10  (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000064)
 #define RKISP1_CIF_ISP_HIST_WEIGHT_04TO34_V10  (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000068)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_44_V10      (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000006C)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_44_V10      (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000006c)
 
 #define RKISP1_CIF_ISP_FILT_BASE               0x00002500
 #define RKISP1_CIF_ISP_FILT_MODE               (RKISP1_CIF_ISP_FILT_BASE + 0x00000000)
 #define RKISP1_CIF_ISP_FILT_FAC_SH0            (RKISP1_CIF_ISP_FILT_BASE + 0x00000040)
 #define RKISP1_CIF_ISP_FILT_FAC_MID            (RKISP1_CIF_ISP_FILT_BASE + 0x00000044)
 #define RKISP1_CIF_ISP_FILT_FAC_BL0            (RKISP1_CIF_ISP_FILT_BASE + 0x00000048)
-#define RKISP1_CIF_ISP_FILT_FAC_BL1            (RKISP1_CIF_ISP_FILT_BASE + 0x0000004C)
+#define RKISP1_CIF_ISP_FILT_FAC_BL1            (RKISP1_CIF_ISP_FILT_BASE + 0x0000004c)
 
 #define RKISP1_CIF_ISP_CAC_BASE                        0x00002580
 #define RKISP1_CIF_ISP_CAC_CTRL                        (RKISP1_CIF_ISP_CAC_BASE + 0x00000000)
 #define RKISP1_CIF_ISP_CAC_COUNT_START         (RKISP1_CIF_ISP_CAC_BASE + 0x00000004)
 #define RKISP1_CIF_ISP_CAC_A                   (RKISP1_CIF_ISP_CAC_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_CAC_B                   (RKISP1_CIF_ISP_CAC_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_CAC_B                   (RKISP1_CIF_ISP_CAC_BASE + 0x0000000c)
 #define RKISP1_CIF_ISP_CAC_C                   (RKISP1_CIF_ISP_CAC_BASE + 0x00000010)
 #define RKISP1_CIF_ISP_X_NORM                  (RKISP1_CIF_ISP_CAC_BASE + 0x00000014)
 #define RKISP1_CIF_ISP_Y_NORM                  (RKISP1_CIF_ISP_CAC_BASE + 0x00000018)
 #define RKISP1_CIF_ISP_EXP_CTRL                        (RKISP1_CIF_ISP_EXP_BASE + 0x00000000)
 #define RKISP1_CIF_ISP_EXP_H_OFFSET_V10                (RKISP1_CIF_ISP_EXP_BASE + 0x00000004)
 #define RKISP1_CIF_ISP_EXP_V_OFFSET_V10                (RKISP1_CIF_ISP_EXP_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_EXP_H_SIZE_V10          (RKISP1_CIF_ISP_EXP_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_EXP_H_SIZE_V10          (RKISP1_CIF_ISP_EXP_BASE + 0x0000000c)
 #define RKISP1_CIF_ISP_EXP_V_SIZE_V10          (RKISP1_CIF_ISP_EXP_BASE + 0x00000010)
 #define RKISP1_CIF_ISP_EXP_MEAN_00_V10         (RKISP1_CIF_ISP_EXP_BASE + 0x00000014)
 #define RKISP1_CIF_ISP_EXP_MEAN_10_V10         (RKISP1_CIF_ISP_EXP_BASE + 0x00000018)
 #define RKISP1_CIF_ISP_DPF_MODE                        (RKISP1_CIF_ISP_DPF_BASE + 0x00000000)
 #define RKISP1_CIF_ISP_DPF_STRENGTH_R          (RKISP1_CIF_ISP_DPF_BASE + 0x00000004)
 #define RKISP1_CIF_ISP_DPF_STRENGTH_G          (RKISP1_CIF_ISP_DPF_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_DPF_STRENGTH_B          (RKISP1_CIF_ISP_DPF_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_DPF_STRENGTH_B          (RKISP1_CIF_ISP_DPF_BASE + 0x0000000c)
 #define RKISP1_CIF_ISP_DPF_S_WEIGHT_G_1_4      (RKISP1_CIF_ISP_DPF_BASE + 0x00000010)
 #define RKISP1_CIF_ISP_DPF_S_WEIGHT_G_5_6      (RKISP1_CIF_ISP_DPF_BASE + 0x00000014)
 #define RKISP1_CIF_ISP_DPF_S_WEIGHT_RB_1_4     (RKISP1_CIF_ISP_DPF_BASE + 0x00000018)
-#define RKISP1_CIF_ISP_DPF_S_WEIGHT_RB_5_6     (RKISP1_CIF_ISP_DPF_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_DPF_S_WEIGHT_RB_5_6     (RKISP1_CIF_ISP_DPF_BASE + 0x0000001c)
 #define RKISP1_CIF_ISP_DPF_NULL_COEFF_0                (RKISP1_CIF_ISP_DPF_BASE + 0x00000020)
 #define RKISP1_CIF_ISP_DPF_NULL_COEFF_1                (RKISP1_CIF_ISP_DPF_BASE + 0x00000024)
 #define RKISP1_CIF_ISP_DPF_NULL_COEFF_2                (RKISP1_CIF_ISP_DPF_BASE + 0x00000028)
-#define RKISP1_CIF_ISP_DPF_NULL_COEFF_3                (RKISP1_CIF_ISP_DPF_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_3                (RKISP1_CIF_ISP_DPF_BASE + 0x0000002c)
 #define RKISP1_CIF_ISP_DPF_NULL_COEFF_4                (RKISP1_CIF_ISP_DPF_BASE + 0x00000030)
 #define RKISP1_CIF_ISP_DPF_NULL_COEFF_5                (RKISP1_CIF_ISP_DPF_BASE + 0x00000034)
 #define RKISP1_CIF_ISP_DPF_NULL_COEFF_6                (RKISP1_CIF_ISP_DPF_BASE + 0x00000038)
-#define RKISP1_CIF_ISP_DPF_NULL_COEFF_7                (RKISP1_CIF_ISP_DPF_BASE + 0x0000003C)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_7                (RKISP1_CIF_ISP_DPF_BASE + 0x0000003c)
 #define RKISP1_CIF_ISP_DPF_NULL_COEFF_8                (RKISP1_CIF_ISP_DPF_BASE + 0x00000040)
 #define RKISP1_CIF_ISP_DPF_NULL_COEFF_9                (RKISP1_CIF_ISP_DPF_BASE + 0x00000044)
 #define RKISP1_CIF_ISP_DPF_NULL_COEFF_10       (RKISP1_CIF_ISP_DPF_BASE + 0x00000048)
-#define RKISP1_CIF_ISP_DPF_NULL_COEFF_11       (RKISP1_CIF_ISP_DPF_BASE + 0x0000004C)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_11       (RKISP1_CIF_ISP_DPF_BASE + 0x0000004c)
 #define RKISP1_CIF_ISP_DPF_NULL_COEFF_12       (RKISP1_CIF_ISP_DPF_BASE + 0x00000050)
 #define RKISP1_CIF_ISP_DPF_NULL_COEFF_13       (RKISP1_CIF_ISP_DPF_BASE + 0x00000054)
 #define RKISP1_CIF_ISP_DPF_NULL_COEFF_14       (RKISP1_CIF_ISP_DPF_BASE + 0x00000058)
-#define RKISP1_CIF_ISP_DPF_NULL_COEFF_15       (RKISP1_CIF_ISP_DPF_BASE + 0x0000005C)
+#define RKISP1_CIF_ISP_DPF_NULL_COEFF_15       (RKISP1_CIF_ISP_DPF_BASE + 0x0000005c)
 #define RKISP1_CIF_ISP_DPF_NULL_COEFF_16       (RKISP1_CIF_ISP_DPF_BASE + 0x00000060)
 #define RKISP1_CIF_ISP_DPF_NF_GAIN_R           (RKISP1_CIF_ISP_DPF_BASE + 0x00000064)
 #define RKISP1_CIF_ISP_DPF_NF_GAIN_GR          (RKISP1_CIF_ISP_DPF_BASE + 0x00000068)
-#define RKISP1_CIF_ISP_DPF_NF_GAIN_GB          (RKISP1_CIF_ISP_DPF_BASE + 0x0000006C)
+#define RKISP1_CIF_ISP_DPF_NF_GAIN_GB          (RKISP1_CIF_ISP_DPF_BASE + 0x0000006c)
 #define RKISP1_CIF_ISP_DPF_NF_GAIN_B           (RKISP1_CIF_ISP_DPF_BASE + 0x00000070)
 
 #define RKISP1_CIF_ISP_DPCC_BASE               0x00002900
 #define RKISP1_CIF_ISP_DPCC_MODE               (RKISP1_CIF_ISP_DPCC_BASE + 0x00000000)
 #define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE                (RKISP1_CIF_ISP_DPCC_BASE + 0x00000004)
 #define RKISP1_CIF_ISP_DPCC_SET_USE            (RKISP1_CIF_ISP_DPCC_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_DPCC_METHODS_SET_1      (RKISP1_CIF_ISP_DPCC_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_DPCC_METHODS_SET_1      (RKISP1_CIF_ISP_DPCC_BASE + 0x0000000c)
 #define RKISP1_CIF_ISP_DPCC_METHODS_SET_2      (RKISP1_CIF_ISP_DPCC_BASE + 0x00000010)
 #define RKISP1_CIF_ISP_DPCC_METHODS_SET_3      (RKISP1_CIF_ISP_DPCC_BASE + 0x00000014)
 #define RKISP1_CIF_ISP_DPCC_LINE_THRESH_1      (RKISP1_CIF_ISP_DPCC_BASE + 0x00000018)
-#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_1     (RKISP1_CIF_ISP_DPCC_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_1     (RKISP1_CIF_ISP_DPCC_BASE + 0x0000001c)
 #define RKISP1_CIF_ISP_DPCC_PG_FAC_1           (RKISP1_CIF_ISP_DPCC_BASE + 0x00000020)
 #define RKISP1_CIF_ISP_DPCC_RND_THRESH_1       (RKISP1_CIF_ISP_DPCC_BASE + 0x00000024)
 #define RKISP1_CIF_ISP_DPCC_RG_FAC_1           (RKISP1_CIF_ISP_DPCC_BASE + 0x00000028)
-#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_2      (RKISP1_CIF_ISP_DPCC_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_2      (RKISP1_CIF_ISP_DPCC_BASE + 0x0000002c)
 #define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_2     (RKISP1_CIF_ISP_DPCC_BASE + 0x00000030)
 #define RKISP1_CIF_ISP_DPCC_PG_FAC_2           (RKISP1_CIF_ISP_DPCC_BASE + 0x00000034)
 #define RKISP1_CIF_ISP_DPCC_RND_THRESH_2       (RKISP1_CIF_ISP_DPCC_BASE + 0x00000038)
-#define RKISP1_CIF_ISP_DPCC_RG_FAC_2           (RKISP1_CIF_ISP_DPCC_BASE + 0x0000003C)
+#define RKISP1_CIF_ISP_DPCC_RG_FAC_2           (RKISP1_CIF_ISP_DPCC_BASE + 0x0000003c)
 #define RKISP1_CIF_ISP_DPCC_LINE_THRESH_3      (RKISP1_CIF_ISP_DPCC_BASE + 0x00000040)
 #define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_3     (RKISP1_CIF_ISP_DPCC_BASE + 0x00000044)
 #define RKISP1_CIF_ISP_DPCC_PG_FAC_3           (RKISP1_CIF_ISP_DPCC_BASE + 0x00000048)
-#define RKISP1_CIF_ISP_DPCC_RND_THRESH_3       (RKISP1_CIF_ISP_DPCC_BASE + 0x0000004C)
+#define RKISP1_CIF_ISP_DPCC_RND_THRESH_3       (RKISP1_CIF_ISP_DPCC_BASE + 0x0000004c)
 #define RKISP1_CIF_ISP_DPCC_RG_FAC_3           (RKISP1_CIF_ISP_DPCC_BASE + 0x00000050)
 #define RKISP1_CIF_ISP_DPCC_RO_LIMITS          (RKISP1_CIF_ISP_DPCC_BASE + 0x00000054)
 #define RKISP1_CIF_ISP_DPCC_RND_OFFS           (RKISP1_CIF_ISP_DPCC_BASE + 0x00000058)
-#define RKISP1_CIF_ISP_DPCC_BPT_CTRL           (RKISP1_CIF_ISP_DPCC_BASE + 0x0000005C)
+#define RKISP1_CIF_ISP_DPCC_BPT_CTRL           (RKISP1_CIF_ISP_DPCC_BASE + 0x0000005c)
 #define RKISP1_CIF_ISP_DPCC_BPT_NUMBER         (RKISP1_CIF_ISP_DPCC_BASE + 0x00000060)
 #define RKISP1_CIF_ISP_DPCC_BPT_ADDR           (RKISP1_CIF_ISP_DPCC_BASE + 0x00000064)
 #define RKISP1_CIF_ISP_DPCC_BPT_DATA           (RKISP1_CIF_ISP_DPCC_BASE + 0x00000068)
 
-#define RKISP1_CIF_ISP_WDR_BASE                        0x00002A00
+#define RKISP1_CIF_ISP_WDR_BASE                        0x00002a00
 #define RKISP1_CIF_ISP_WDR_CTRL                        (RKISP1_CIF_ISP_WDR_BASE + 0x00000000)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_1         (RKISP1_CIF_ISP_WDR_BASE + 0x00000004)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_2         (RKISP1_CIF_ISP_WDR_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_3         (RKISP1_CIF_ISP_WDR_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_3         (RKISP1_CIF_ISP_WDR_BASE + 0x0000000c)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_4         (RKISP1_CIF_ISP_WDR_BASE + 0x00000010)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_0      (RKISP1_CIF_ISP_WDR_BASE + 0x00000014)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_1      (RKISP1_CIF_ISP_WDR_BASE + 0x00000018)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_2      (RKISP1_CIF_ISP_WDR_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_2      (RKISP1_CIF_ISP_WDR_BASE + 0x0000001c)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_3      (RKISP1_CIF_ISP_WDR_BASE + 0x00000020)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_4      (RKISP1_CIF_ISP_WDR_BASE + 0x00000024)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_5      (RKISP1_CIF_ISP_WDR_BASE + 0x00000028)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_6      (RKISP1_CIF_ISP_WDR_BASE + 0x0000002C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_6      (RKISP1_CIF_ISP_WDR_BASE + 0x0000002c)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_7      (RKISP1_CIF_ISP_WDR_BASE + 0x00000030)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_8      (RKISP1_CIF_ISP_WDR_BASE + 0x00000034)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_9      (RKISP1_CIF_ISP_WDR_BASE + 0x00000038)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_10     (RKISP1_CIF_ISP_WDR_BASE + 0x0000003C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_10     (RKISP1_CIF_ISP_WDR_BASE + 0x0000003c)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_11     (RKISP1_CIF_ISP_WDR_BASE + 0x00000040)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_12     (RKISP1_CIF_ISP_WDR_BASE + 0x00000044)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_13     (RKISP1_CIF_ISP_WDR_BASE + 0x00000048)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_14     (RKISP1_CIF_ISP_WDR_BASE + 0x0000004C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_14     (RKISP1_CIF_ISP_WDR_BASE + 0x0000004c)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_15     (RKISP1_CIF_ISP_WDR_BASE + 0x00000050)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_16     (RKISP1_CIF_ISP_WDR_BASE + 0x00000054)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_17     (RKISP1_CIF_ISP_WDR_BASE + 0x00000058)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_18     (RKISP1_CIF_ISP_WDR_BASE + 0x0000005C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_18     (RKISP1_CIF_ISP_WDR_BASE + 0x0000005c)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_19     (RKISP1_CIF_ISP_WDR_BASE + 0x00000060)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_20     (RKISP1_CIF_ISP_WDR_BASE + 0x00000064)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_21     (RKISP1_CIF_ISP_WDR_BASE + 0x00000068)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_22     (RKISP1_CIF_ISP_WDR_BASE + 0x0000006C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_22     (RKISP1_CIF_ISP_WDR_BASE + 0x0000006c)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_23     (RKISP1_CIF_ISP_WDR_BASE + 0x00000070)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_24     (RKISP1_CIF_ISP_WDR_BASE + 0x00000074)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_25     (RKISP1_CIF_ISP_WDR_BASE + 0x00000078)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_26     (RKISP1_CIF_ISP_WDR_BASE + 0x0000007C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_26     (RKISP1_CIF_ISP_WDR_BASE + 0x0000007c)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_27     (RKISP1_CIF_ISP_WDR_BASE + 0x00000080)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_28     (RKISP1_CIF_ISP_WDR_BASE + 0x00000084)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_29     (RKISP1_CIF_ISP_WDR_BASE + 0x00000088)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_30     (RKISP1_CIF_ISP_WDR_BASE + 0x0000008C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_30     (RKISP1_CIF_ISP_WDR_BASE + 0x0000008c)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_31     (RKISP1_CIF_ISP_WDR_BASE + 0x00000090)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_32     (RKISP1_CIF_ISP_WDR_BASE + 0x00000094)
 #define RKISP1_CIF_ISP_WDR_OFFSET              (RKISP1_CIF_ISP_WDR_BASE + 0x00000098)
-#define RKISP1_CIF_ISP_WDR_DELTAMIN            (RKISP1_CIF_ISP_WDR_BASE + 0x0000009C)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_1_SHD     (RKISP1_CIF_ISP_WDR_BASE + 0x000000A0)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_2_SHD     (RKISP1_CIF_ISP_WDR_BASE + 0x000000A4)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_3_SHD     (RKISP1_CIF_ISP_WDR_BASE + 0x000000A8)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_4_SHD     (RKISP1_CIF_ISP_WDR_BASE + 0x000000AC)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_0_SHD  (RKISP1_CIF_ISP_WDR_BASE + 0x000000B0)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_1_SHD  (RKISP1_CIF_ISP_WDR_BASE + 0x000000B4)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_2_SHD  (RKISP1_CIF_ISP_WDR_BASE + 0x000000B8)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_3_SHD  (RKISP1_CIF_ISP_WDR_BASE + 0x000000BC)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_4_SHD  (RKISP1_CIF_ISP_WDR_BASE + 0x000000C0)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_5_SHD  (RKISP1_CIF_ISP_WDR_BASE + 0x000000C4)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_6_SHD  (RKISP1_CIF_ISP_WDR_BASE + 0x000000C8)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_7_SHD  (RKISP1_CIF_ISP_WDR_BASE + 0x000000CC)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_8_SHD  (RKISP1_CIF_ISP_WDR_BASE + 0x000000D0)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_9_SHD  (RKISP1_CIF_ISP_WDR_BASE + 0x000000D4)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_10_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000D8)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_11_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000DC)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_12_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000E0)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_13_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000E4)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_14_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000E8)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_15_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000EC)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_16_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000F0)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_17_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000F4)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_18_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000F8)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_19_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000FC)
+#define RKISP1_CIF_ISP_WDR_DELTAMIN            (RKISP1_CIF_ISP_WDR_BASE + 0x0000009c)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_1_SHD     (RKISP1_CIF_ISP_WDR_BASE + 0x000000a0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_2_SHD     (RKISP1_CIF_ISP_WDR_BASE + 0x000000a4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_3_SHD     (RKISP1_CIF_ISP_WDR_BASE + 0x000000a8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_4_SHD     (RKISP1_CIF_ISP_WDR_BASE + 0x000000ac)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_0_SHD  (RKISP1_CIF_ISP_WDR_BASE + 0x000000b0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_1_SHD  (RKISP1_CIF_ISP_WDR_BASE + 0x000000b4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_2_SHD  (RKISP1_CIF_ISP_WDR_BASE + 0x000000b8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_3_SHD  (RKISP1_CIF_ISP_WDR_BASE + 0x000000bc)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_4_SHD  (RKISP1_CIF_ISP_WDR_BASE + 0x000000c0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_5_SHD  (RKISP1_CIF_ISP_WDR_BASE + 0x000000c4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_6_SHD  (RKISP1_CIF_ISP_WDR_BASE + 0x000000c8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_7_SHD  (RKISP1_CIF_ISP_WDR_BASE + 0x000000cc)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_8_SHD  (RKISP1_CIF_ISP_WDR_BASE + 0x000000d0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_9_SHD  (RKISP1_CIF_ISP_WDR_BASE + 0x000000d4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_10_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000d8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_11_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000dc)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_12_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000e0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_13_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000e4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_14_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000e8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_15_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000ec)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_16_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000f0)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_17_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000f4)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_18_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000f8)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_19_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x000000fc)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_20_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000100)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_21_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000104)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_22_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000108)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_23_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x0000010C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_23_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x0000010c)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_24_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000110)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_25_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000114)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_26_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000118)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_27_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x0000011C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_27_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x0000011c)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_28_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000120)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_29_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000124)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_30_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000128)
-#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_31_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x0000012C)
+#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_31_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x0000012c)
 #define RKISP1_CIF_ISP_WDR_TONECURVE_YM_32_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000130)
 
-#define RKISP1_CIF_ISP_HIST_BASE_V12           0x00002C00
+#define RKISP1_CIF_ISP_HIST_BASE_V12           0x00002c00
 #define RKISP1_CIF_ISP_HIST_CTRL_V12           (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x00000000)
 #define RKISP1_CIF_ISP_HIST_SIZE_V12           (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x00000004)
 #define RKISP1_CIF_ISP_HIST_OFFS_V12           (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x00000008)
-#define RKISP1_CIF_ISP_HIST_DBG1_V12           (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000000C)
-#define RKISP1_CIF_ISP_HIST_DBG2_V12           (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000001C)
-#define RKISP1_CIF_ISP_HIST_DBG3_V12           (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000002C)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_V12         (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000003C)
+#define RKISP1_CIF_ISP_HIST_DBG1_V12           (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000000c)
+#define RKISP1_CIF_ISP_HIST_DBG2_V12           (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000001c)
+#define RKISP1_CIF_ISP_HIST_DBG3_V12           (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000002c)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_V12         (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000003c)
 #define RKISP1_CIF_ISP_HIST_BIN_V12            (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x00000120)
 
-#define RKISP1_CIF_ISP_VSM_BASE                        0x00002F00
+#define RKISP1_CIF_ISP_VSM_BASE                        0x00002f00
 #define RKISP1_CIF_ISP_VSM_MODE                        (RKISP1_CIF_ISP_VSM_BASE + 0x00000000)
 #define RKISP1_CIF_ISP_VSM_H_OFFS              (RKISP1_CIF_ISP_VSM_BASE + 0x00000004)
 #define RKISP1_CIF_ISP_VSM_V_OFFS              (RKISP1_CIF_ISP_VSM_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_VSM_H_SIZE              (RKISP1_CIF_ISP_VSM_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_VSM_H_SIZE              (RKISP1_CIF_ISP_VSM_BASE + 0x0000000c)
 #define RKISP1_CIF_ISP_VSM_V_SIZE              (RKISP1_CIF_ISP_VSM_BASE + 0x00000010)
 #define RKISP1_CIF_ISP_VSM_H_SEGMENTS          (RKISP1_CIF_ISP_VSM_BASE + 0x00000014)
 #define RKISP1_CIF_ISP_VSM_V_SEGMENTS          (RKISP1_CIF_ISP_VSM_BASE + 0x00000018)
-#define RKISP1_CIF_ISP_VSM_DELTA_H             (RKISP1_CIF_ISP_VSM_BASE + 0x0000001C)
+#define RKISP1_CIF_ISP_VSM_DELTA_H             (RKISP1_CIF_ISP_VSM_BASE + 0x0000001c)
 #define RKISP1_CIF_ISP_VSM_DELTA_V             (RKISP1_CIF_ISP_VSM_BASE + 0x00000020)
 
 #define RKISP1_CIF_ISP_CSI0_BASE               0x00007000
index c15ae0218118c01e42cd73974c8e65f0e3d3394e..28ecc7347d5433b423649a094ec12dc0d95a3876 100644 (file)
@@ -60,7 +60,6 @@ struct rkisp1_rsz_config {
        const int min_rsz_height;
        /* registers */
        struct {
-               u32 ctrl;
                u32 yuvmode_mask;
                u32 rawmode_mask;
                u32 h_offset;
@@ -78,7 +77,6 @@ static const struct rkisp1_rsz_config rkisp1_rsz_config_mp = {
        .min_rsz_height = RKISP1_RSZ_SRC_MIN_HEIGHT,
        /* registers */
        .dual_crop = {
-               .ctrl =                 RKISP1_CIF_DUAL_CROP_CTRL,
                .yuvmode_mask =         RKISP1_CIF_DUAL_CROP_MP_MODE_YUV,
                .rawmode_mask =         RKISP1_CIF_DUAL_CROP_MP_MODE_RAW,
                .h_offset =             RKISP1_CIF_DUAL_CROP_M_H_OFFS,
@@ -96,7 +94,6 @@ static const struct rkisp1_rsz_config rkisp1_rsz_config_sp = {
        .min_rsz_height = RKISP1_RSZ_SRC_MIN_HEIGHT,
        /* registers */
        .dual_crop = {
-               .ctrl =                 RKISP1_CIF_DUAL_CROP_CTRL,
                .yuvmode_mask =         RKISP1_CIF_DUAL_CROP_SP_MODE_YUV,
                .rawmode_mask =         RKISP1_CIF_DUAL_CROP_SP_MODE_RAW,
                .h_offset =             RKISP1_CIF_DUAL_CROP_S_H_OFFS,
@@ -117,34 +114,6 @@ static inline void rkisp1_rsz_write(struct rkisp1_resizer *rsz, u32 offset,
        rkisp1_write(rsz->rkisp1, rsz->regs_base + offset, value);
 }
 
-static struct v4l2_mbus_framefmt *
-rkisp1_rsz_get_pad_fmt(struct rkisp1_resizer *rsz,
-                      struct v4l2_subdev_state *sd_state,
-                      unsigned int pad, u32 which)
-{
-       struct v4l2_subdev_state state = {
-               .pads = rsz->pad_cfg,
-       };
-       if (which == V4L2_SUBDEV_FORMAT_TRY)
-               return v4l2_subdev_get_try_format(&rsz->sd, sd_state, pad);
-       else
-               return v4l2_subdev_get_try_format(&rsz->sd, &state, pad);
-}
-
-static struct v4l2_rect *
-rkisp1_rsz_get_pad_crop(struct rkisp1_resizer *rsz,
-                       struct v4l2_subdev_state *sd_state,
-                       unsigned int pad, u32 which)
-{
-       struct v4l2_subdev_state state = {
-               .pads = rsz->pad_cfg,
-       };
-       if (which == V4L2_SUBDEV_FORMAT_TRY)
-               return v4l2_subdev_get_try_crop(&rsz->sd, sd_state, pad);
-       else
-               return v4l2_subdev_get_try_crop(&rsz->sd, &state, pad);
-}
-
 /* ----------------------------------------------------------------------------
  * Dual crop hw configs
  */
@@ -152,7 +121,7 @@ rkisp1_rsz_get_pad_crop(struct rkisp1_resizer *rsz,
 static void rkisp1_dcrop_disable(struct rkisp1_resizer *rsz,
                                 enum rkisp1_shadow_regs_when when)
 {
-       u32 dc_ctrl = rkisp1_read(rsz->rkisp1, rsz->config->dual_crop.ctrl);
+       u32 dc_ctrl = rkisp1_read(rsz->rkisp1, RKISP1_CIF_DUAL_CROP_CTRL);
        u32 mask = ~(rsz->config->dual_crop.yuvmode_mask |
                     rsz->config->dual_crop.rawmode_mask);
 
@@ -161,21 +130,22 @@ static void rkisp1_dcrop_disable(struct rkisp1_resizer *rsz,
                dc_ctrl |= RKISP1_CIF_DUAL_CROP_GEN_CFG_UPD;
        else
                dc_ctrl |= RKISP1_CIF_DUAL_CROP_CFG_UPD;
-       rkisp1_write(rsz->rkisp1, rsz->config->dual_crop.ctrl, dc_ctrl);
+       rkisp1_write(rsz->rkisp1, RKISP1_CIF_DUAL_CROP_CTRL, dc_ctrl);
 }
 
 /* configure dual-crop unit */
-static void rkisp1_dcrop_config(struct rkisp1_resizer *rsz)
+static void rkisp1_dcrop_config(struct rkisp1_resizer *rsz,
+                               struct v4l2_subdev_state *sd_state)
 {
        struct rkisp1_device *rkisp1 = rsz->rkisp1;
        struct v4l2_mbus_framefmt *sink_fmt;
        struct v4l2_rect *sink_crop;
        u32 dc_ctrl;
 
-       sink_crop = rkisp1_rsz_get_pad_crop(rsz, NULL, RKISP1_RSZ_PAD_SINK,
-                                           V4L2_SUBDEV_FORMAT_ACTIVE);
-       sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, NULL, RKISP1_RSZ_PAD_SINK,
-                                         V4L2_SUBDEV_FORMAT_ACTIVE);
+       sink_crop = v4l2_subdev_get_pad_crop(&rsz->sd, sd_state,
+                                            RKISP1_RSZ_PAD_SINK);
+       sink_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
+                                             RKISP1_RSZ_PAD_SINK);
 
        if (sink_crop->width == sink_fmt->width &&
            sink_crop->height == sink_fmt->height &&
@@ -185,14 +155,14 @@ static void rkisp1_dcrop_config(struct rkisp1_resizer *rsz)
                return;
        }
 
-       dc_ctrl = rkisp1_read(rkisp1, rsz->config->dual_crop.ctrl);
+       dc_ctrl = rkisp1_read(rkisp1, RKISP1_CIF_DUAL_CROP_CTRL);
        rkisp1_write(rkisp1, rsz->config->dual_crop.h_offset, sink_crop->left);
        rkisp1_write(rkisp1, rsz->config->dual_crop.v_offset, sink_crop->top);
        rkisp1_write(rkisp1, rsz->config->dual_crop.h_size, sink_crop->width);
        rkisp1_write(rkisp1, rsz->config->dual_crop.v_size, sink_crop->height);
        dc_ctrl |= rsz->config->dual_crop.yuvmode_mask;
        dc_ctrl |= RKISP1_CIF_DUAL_CROP_CFG_UPD;
-       rkisp1_write(rkisp1, rsz->config->dual_crop.ctrl, dc_ctrl);
+       rkisp1_write(rkisp1, RKISP1_CIF_DUAL_CROP_CTRL, dc_ctrl);
 
        dev_dbg(rkisp1->dev, "stream %d crop: %dx%d -> %dx%d\n", rsz->id,
                sink_fmt->width, sink_fmt->height,
@@ -236,10 +206,10 @@ static void rkisp1_rsz_disable(struct rkisp1_resizer *rsz,
 }
 
 static void rkisp1_rsz_config_regs(struct rkisp1_resizer *rsz,
-                                  struct v4l2_rect *sink_y,
-                                  struct v4l2_rect *sink_c,
-                                  struct v4l2_rect *src_y,
-                                  struct v4l2_rect *src_c,
+                                  const struct v4l2_rect *sink_y,
+                                  const struct v4l2_rect *sink_c,
+                                  const struct v4l2_area *src_y,
+                                  const struct v4l2_area *src_c,
                                   enum rkisp1_shadow_regs_when when)
 {
        u32 ratio, rsz_ctrl = 0;
@@ -296,61 +266,63 @@ static void rkisp1_rsz_config_regs(struct rkisp1_resizer *rsz,
 }
 
 static void rkisp1_rsz_config(struct rkisp1_resizer *rsz,
+                             struct v4l2_subdev_state *sd_state,
                              enum rkisp1_shadow_regs_when when)
 {
        const struct rkisp1_rsz_yuv_mbus_info *sink_yuv_info, *src_yuv_info;
-       struct v4l2_rect sink_y, sink_c, src_y, src_c;
-       struct v4l2_mbus_framefmt *src_fmt, *sink_fmt;
-       struct v4l2_rect *sink_crop;
+       const struct v4l2_mbus_framefmt *src_fmt, *sink_fmt;
+       const struct v4l2_rect *sink_y;
+       struct v4l2_area src_y, src_c;
+       struct v4l2_rect sink_c;
+
+       sink_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
+                                             RKISP1_RSZ_PAD_SINK);
+       src_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
+                                            RKISP1_RSZ_PAD_SRC);
 
-       sink_crop = rkisp1_rsz_get_pad_crop(rsz, NULL, RKISP1_RSZ_PAD_SINK,
-                                           V4L2_SUBDEV_FORMAT_ACTIVE);
-       src_fmt = rkisp1_rsz_get_pad_fmt(rsz, NULL, RKISP1_RSZ_PAD_SRC,
-                                        V4L2_SUBDEV_FORMAT_ACTIVE);
-       src_yuv_info = rkisp1_rsz_get_yuv_mbus_info(src_fmt->code);
-       sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, NULL, RKISP1_RSZ_PAD_SINK,
-                                         V4L2_SUBDEV_FORMAT_ACTIVE);
        sink_yuv_info = rkisp1_rsz_get_yuv_mbus_info(sink_fmt->code);
+       src_yuv_info = rkisp1_rsz_get_yuv_mbus_info(src_fmt->code);
 
        /*
-        * The resizer only works on yuv formats,
-        * so return if it is bayer format.
+        * The resizer only works on yuv formats, so return if it is bayer
+        * format.
         */
-       if (rsz->pixel_enc == V4L2_PIXEL_ENC_BAYER) {
+       if (!sink_yuv_info) {
                rkisp1_rsz_disable(rsz, when);
                return;
        }
 
-       sink_y.width = sink_crop->width;
-       sink_y.height = sink_crop->height;
+       sink_y = v4l2_subdev_get_pad_crop(&rsz->sd, sd_state,
+                                         RKISP1_RSZ_PAD_SINK);
+       sink_c.width = sink_y->width / sink_yuv_info->hdiv;
+       sink_c.height = sink_y->height / sink_yuv_info->vdiv;
+
        src_y.width = src_fmt->width;
        src_y.height = src_fmt->height;
-
-       sink_c.width = sink_y.width / sink_yuv_info->hdiv;
-       sink_c.height = sink_y.height / sink_yuv_info->vdiv;
+       src_c.width = src_y.width / src_yuv_info->hdiv;
+       src_c.height = src_y.height / src_yuv_info->vdiv;
 
        /*
         * The resizer is used not only to change the dimensions of the frame
-        * but also to change the scale for YUV formats,
-        * (4:2:2 -> 4:2:0 for example). So the width/height of the CbCr
-        * streams should be set according to the media bus format in the src pad.
+        * but also to change the subsampling for YUV formats (for instance
+        * converting from 4:2:2 to 4:2:0). Check both the luma and chroma
+        * dimensions to decide whether or not to enable the resizer.
         */
-       src_c.width = src_y.width / src_yuv_info->hdiv;
-       src_c.height = src_y.height / src_yuv_info->vdiv;
 
-       if (sink_c.width == src_c.width && sink_c.height == src_c.height) {
+       dev_dbg(rsz->rkisp1->dev,
+               "stream %u rsz/scale: Y %ux%u -> %ux%u, CbCr %ux%u -> %ux%u\n",
+               rsz->id, sink_y->width, sink_y->height,
+               src_fmt->width, src_fmt->height,
+               sink_c.width, sink_c.height, src_c.width, src_c.height);
+
+       if (sink_y->width == src_y.width && sink_y->height == src_y.height &&
+           sink_c.width == src_c.width && sink_c.height == src_c.height) {
                rkisp1_rsz_disable(rsz, when);
                return;
        }
 
-       dev_dbg(rsz->rkisp1->dev, "stream %d rsz/scale: %dx%d -> %dx%d\n",
-               rsz->id, sink_crop->width, sink_crop->height,
-               src_fmt->width, src_fmt->height);
-       dev_dbg(rsz->rkisp1->dev, "chroma scaling %dx%d -> %dx%d\n",
-               sink_c.width, sink_c.height, src_c.width, src_c.height);
-
-       /* set values in the hw */
-       rkisp1_rsz_config_regs(rsz, &sink_y, &sink_c, &src_y, &src_c, when);
+       /* Set values in the hardware. */
+       rkisp1_rsz_config_regs(rsz, sink_y, &sink_c, &src_y, &src_c, when);
 }
 
 /* ----------------------------------------------------------------------------
@@ -405,7 +377,7 @@ static int rkisp1_rsz_init_config(struct v4l2_subdev *sd,
        struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
        struct v4l2_rect *sink_crop;
 
-       sink_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+       sink_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
                                              RKISP1_RSZ_PAD_SRC);
        sink_fmt->width = RKISP1_DEFAULT_WIDTH;
        sink_fmt->height = RKISP1_DEFAULT_HEIGHT;
@@ -423,7 +395,7 @@ static int rkisp1_rsz_init_config(struct v4l2_subdev *sd,
        sink_crop->left = 0;
        sink_crop->top = 0;
 
-       src_fmt = v4l2_subdev_get_try_format(sd, sd_state,
+       src_fmt = v4l2_subdev_get_pad_format(sd, sd_state,
                                             RKISP1_RSZ_PAD_SINK);
        *src_fmt = *sink_fmt;
 
@@ -434,16 +406,16 @@ static int rkisp1_rsz_init_config(struct v4l2_subdev *sd,
 
 static void rkisp1_rsz_set_src_fmt(struct rkisp1_resizer *rsz,
                                   struct v4l2_subdev_state *sd_state,
-                                  struct v4l2_mbus_framefmt *format,
-                                  unsigned int which)
+                                  struct v4l2_mbus_framefmt *format)
 {
        const struct rkisp1_mbus_info *sink_mbus_info;
        struct v4l2_mbus_framefmt *src_fmt, *sink_fmt;
 
-       sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SINK,
-                                         which);
-       src_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SRC,
-                                        which);
+       sink_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
+                                             RKISP1_RSZ_PAD_SINK);
+       src_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
+                                            RKISP1_RSZ_PAD_SRC);
+
        sink_mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
 
        /* for YUV formats, userspace can change the mbus code on the src pad if it is supported */
@@ -463,18 +435,16 @@ static void rkisp1_rsz_set_src_fmt(struct rkisp1_resizer *rsz,
 
 static void rkisp1_rsz_set_sink_crop(struct rkisp1_resizer *rsz,
                                     struct v4l2_subdev_state *sd_state,
-                                    struct v4l2_rect *r,
-                                    unsigned int which)
+                                    struct v4l2_rect *r)
 {
        const struct rkisp1_mbus_info *mbus_info;
        struct v4l2_mbus_framefmt *sink_fmt;
        struct v4l2_rect *sink_crop;
 
-       sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SINK,
-                                         which);
-       sink_crop = rkisp1_rsz_get_pad_crop(rsz, sd_state,
-                                           RKISP1_RSZ_PAD_SINK,
-                                           which);
+       sink_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
+                                             RKISP1_RSZ_PAD_SINK);
+       sink_crop = v4l2_subdev_get_pad_crop(&rsz->sd, sd_state,
+                                            RKISP1_RSZ_PAD_SINK);
 
        /* Not crop for MP bayer raw data */
        mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
@@ -501,21 +471,20 @@ static void rkisp1_rsz_set_sink_crop(struct rkisp1_resizer *rsz,
 
 static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
                                    struct v4l2_subdev_state *sd_state,
-                                   struct v4l2_mbus_framefmt *format,
-                                   unsigned int which)
+                                   struct v4l2_mbus_framefmt *format)
 {
        const struct rkisp1_mbus_info *mbus_info;
        struct v4l2_mbus_framefmt *sink_fmt, *src_fmt;
        struct v4l2_rect *sink_crop;
        bool is_yuv;
 
-       sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SINK,
-                                         which);
-       src_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SRC,
-                                        which);
-       sink_crop = rkisp1_rsz_get_pad_crop(rsz, sd_state,
-                                           RKISP1_RSZ_PAD_SINK,
-                                           which);
+       sink_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
+                                             RKISP1_RSZ_PAD_SINK);
+       src_fmt = v4l2_subdev_get_pad_format(&rsz->sd, sd_state,
+                                            RKISP1_RSZ_PAD_SRC);
+       sink_crop = v4l2_subdev_get_pad_crop(&rsz->sd, sd_state,
+                                            RKISP1_RSZ_PAD_SINK);
+
        if (rsz->id == RKISP1_SELFPATH)
                sink_fmt->code = MEDIA_BUS_FMT_YUYV8_2X8;
        else
@@ -526,8 +495,6 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
                sink_fmt->code = RKISP1_DEF_FMT;
                mbus_info = rkisp1_mbus_info_get_by_code(sink_fmt->code);
        }
-       if (which == V4L2_SUBDEV_FORMAT_ACTIVE)
-               rsz->pixel_enc = mbus_info->pixel_enc;
 
        sink_fmt->width = clamp_t(u32, format->width,
                                  RKISP1_ISP_MIN_WIDTH,
@@ -576,21 +543,7 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz,
        src_fmt->quantization = sink_fmt->quantization;
 
        /* Update sink crop */
-       rkisp1_rsz_set_sink_crop(rsz, sd_state, sink_crop, which);
-}
-
-static int rkisp1_rsz_get_fmt(struct v4l2_subdev *sd,
-                             struct v4l2_subdev_state *sd_state,
-                             struct v4l2_subdev_format *fmt)
-{
-       struct rkisp1_resizer *rsz =
-               container_of(sd, struct rkisp1_resizer, sd);
-
-       mutex_lock(&rsz->ops_lock);
-       fmt->format = *rkisp1_rsz_get_pad_fmt(rsz, sd_state, fmt->pad,
-                                             fmt->which);
-       mutex_unlock(&rsz->ops_lock);
-       return 0;
+       rkisp1_rsz_set_sink_crop(rsz, sd_state, sink_crop);
 }
 
 static int rkisp1_rsz_set_fmt(struct v4l2_subdev *sd,
@@ -600,15 +553,11 @@ static int rkisp1_rsz_set_fmt(struct v4l2_subdev *sd,
        struct rkisp1_resizer *rsz =
                container_of(sd, struct rkisp1_resizer, sd);
 
-       mutex_lock(&rsz->ops_lock);
        if (fmt->pad == RKISP1_RSZ_PAD_SINK)
-               rkisp1_rsz_set_sink_fmt(rsz, sd_state, &fmt->format,
-                                       fmt->which);
+               rkisp1_rsz_set_sink_fmt(rsz, sd_state, &fmt->format);
        else
-               rkisp1_rsz_set_src_fmt(rsz, sd_state, &fmt->format,
-                                      fmt->which);
+               rkisp1_rsz_set_src_fmt(rsz, sd_state, &fmt->format);
 
-       mutex_unlock(&rsz->ops_lock);
        return 0;
 }
 
@@ -616,35 +565,32 @@ static int rkisp1_rsz_get_selection(struct v4l2_subdev *sd,
                                    struct v4l2_subdev_state *sd_state,
                                    struct v4l2_subdev_selection *sel)
 {
-       struct rkisp1_resizer *rsz =
-               container_of(sd, struct rkisp1_resizer, sd);
        struct v4l2_mbus_framefmt *mf_sink;
        int ret = 0;
 
        if (sel->pad == RKISP1_RSZ_PAD_SRC)
                return -EINVAL;
 
-       mutex_lock(&rsz->ops_lock);
        switch (sel->target) {
        case V4L2_SEL_TGT_CROP_BOUNDS:
-               mf_sink = rkisp1_rsz_get_pad_fmt(rsz, sd_state,
-                                                RKISP1_RSZ_PAD_SINK,
-                                                sel->which);
+               mf_sink = v4l2_subdev_get_pad_format(sd, sd_state,
+                                                    RKISP1_RSZ_PAD_SINK);
                sel->r.height = mf_sink->height;
                sel->r.width = mf_sink->width;
                sel->r.left = 0;
                sel->r.top = 0;
                break;
+
        case V4L2_SEL_TGT_CROP:
-               sel->r = *rkisp1_rsz_get_pad_crop(rsz, sd_state,
-                                                 RKISP1_RSZ_PAD_SINK,
-                                                 sel->which);
+               sel->r = *v4l2_subdev_get_pad_crop(sd, sd_state,
+                                                  RKISP1_RSZ_PAD_SINK);
                break;
+
        default:
                ret = -EINVAL;
+               break;
        }
 
-       mutex_unlock(&rsz->ops_lock);
        return ret;
 }
 
@@ -661,9 +607,7 @@ static int rkisp1_rsz_set_selection(struct v4l2_subdev *sd,
        dev_dbg(rsz->rkisp1->dev, "%s: pad: %d sel(%d,%d)/%dx%d\n", __func__,
                sel->pad, sel->r.left, sel->r.top, sel->r.width, sel->r.height);
 
-       mutex_lock(&rsz->ops_lock);
-       rkisp1_rsz_set_sink_crop(rsz, sd_state, &sel->r, sel->which);
-       mutex_unlock(&rsz->ops_lock);
+       rkisp1_rsz_set_sink_crop(rsz, sd_state, &sel->r);
 
        return 0;
 }
@@ -677,7 +621,7 @@ static const struct v4l2_subdev_pad_ops rkisp1_rsz_pad_ops = {
        .get_selection = rkisp1_rsz_get_selection,
        .set_selection = rkisp1_rsz_set_selection,
        .init_cfg = rkisp1_rsz_init_config,
-       .get_fmt = rkisp1_rsz_get_fmt,
+       .get_fmt = v4l2_subdev_get_fmt,
        .set_fmt = rkisp1_rsz_set_fmt,
        .link_validate = v4l2_subdev_link_validate_default,
 };
@@ -693,6 +637,7 @@ static int rkisp1_rsz_s_stream(struct v4l2_subdev *sd, int enable)
        struct rkisp1_device *rkisp1 = rsz->rkisp1;
        struct rkisp1_capture *other = &rkisp1->capture_devs[rsz->id ^ 1];
        enum rkisp1_shadow_regs_when when = RKISP1_SHADOW_REGS_SYNC;
+       struct v4l2_subdev_state *sd_state;
 
        if (!enable) {
                rkisp1_dcrop_disable(rsz, RKISP1_SHADOW_REGS_ASYNC);
@@ -703,11 +648,13 @@ static int rkisp1_rsz_s_stream(struct v4l2_subdev *sd, int enable)
        if (other->is_streaming)
                when = RKISP1_SHADOW_REGS_ASYNC;
 
-       mutex_lock(&rsz->ops_lock);
-       rkisp1_rsz_config(rsz, when);
-       rkisp1_dcrop_config(rsz);
+       sd_state = v4l2_subdev_lock_and_get_active_state(sd);
+
+       rkisp1_rsz_config(rsz, sd_state, when);
+       rkisp1_dcrop_config(rsz, sd_state);
+
+       v4l2_subdev_unlock_state(sd_state);
 
-       mutex_unlock(&rsz->ops_lock);
        return 0;
 }
 
@@ -726,15 +673,12 @@ static void rkisp1_rsz_unregister(struct rkisp1_resizer *rsz)
                return;
 
        v4l2_device_unregister_subdev(&rsz->sd);
+       v4l2_subdev_cleanup(&rsz->sd);
        media_entity_cleanup(&rsz->sd.entity);
-       mutex_destroy(&rsz->ops_lock);
 }
 
 static int rkisp1_rsz_register(struct rkisp1_resizer *rsz)
 {
-       struct v4l2_subdev_state state = {
-               .pads = rsz->pad_cfg,
-       };
        static const char * const dev_names[] = {
                RKISP1_RSZ_MP_DEV_NAME,
                RKISP1_RSZ_SP_DEV_NAME
@@ -763,25 +707,26 @@ static int rkisp1_rsz_register(struct rkisp1_resizer *rsz)
        pads[RKISP1_RSZ_PAD_SRC].flags = MEDIA_PAD_FL_SOURCE |
                                         MEDIA_PAD_FL_MUST_CONNECT;
 
-       rsz->pixel_enc = RKISP1_DEF_PIXEL_ENC;
-
-       mutex_init(&rsz->ops_lock);
        ret = media_entity_pads_init(&sd->entity, RKISP1_RSZ_PAD_MAX, pads);
        if (ret)
-               goto error;
+               goto err_entity_cleanup;
+
+       ret = v4l2_subdev_init_finalize(sd);
+       if (ret)
+               goto err_entity_cleanup;
 
        ret = v4l2_device_register_subdev(&rsz->rkisp1->v4l2_dev, sd);
        if (ret) {
                dev_err(sd->dev, "Failed to register resizer subdev\n");
-               goto error;
+               goto err_subdev_cleanup;
        }
 
-       rkisp1_rsz_init_config(sd, &state);
        return 0;
 
-error:
+err_subdev_cleanup:
+       v4l2_subdev_cleanup(sd);
+err_entity_cleanup:
        media_entity_cleanup(&sd->entity);
-       mutex_destroy(&rsz->ops_lock);
        return ret;
 }
 
index 530a148fe4d3825312af1f4b1e45ef3d61e8b490..a08c87ef6e2d87c8f2adb59442d3f4d68df34403 100644 (file)
@@ -767,12 +767,32 @@ static void fimc_is_debugfs_create(struct fimc_is *is)
 static int fimc_is_runtime_resume(struct device *dev);
 static int fimc_is_runtime_suspend(struct device *dev);
 
+static void __iomem *fimc_is_get_pmu_regs(struct device *dev)
+{
+       struct device_node *node;
+       void __iomem *regs;
+
+       node = of_parse_phandle(dev->of_node, "samsung,pmu-syscon", 0);
+       if (!node) {
+               node = of_get_child_by_name(dev->of_node, "pmu");
+               if (!node)
+                       return IOMEM_ERR_PTR(-ENODEV);
+               dev_warn(dev, "Found PMU node via deprecated method, update your DTB\n");
+       }
+
+       regs = of_iomap(node, 0);
+       of_node_put(node);
+       if (!regs)
+               return IOMEM_ERR_PTR(-ENOMEM);
+
+       return regs;
+}
+
 static int fimc_is_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct fimc_is *is;
        struct resource res;
-       struct device_node *node;
        int ret;
 
        is = devm_kzalloc(&pdev->dev, sizeof(*is), GFP_KERNEL);
@@ -794,14 +814,9 @@ static int fimc_is_probe(struct platform_device *pdev)
        if (IS_ERR(is->regs))
                return PTR_ERR(is->regs);
 
-       node = of_get_child_by_name(dev->of_node, "pmu");
-       if (!node)
-               return -ENODEV;
-
-       is->pmu_regs = of_iomap(node, 0);
-       of_node_put(node);
-       if (!is->pmu_regs)
-               return -ENOMEM;
+       is->pmu_regs = fimc_is_get_pmu_regs(dev);
+       if (IS_ERR(is->pmu_regs))
+               return PTR_ERR(is->pmu_regs);
 
        is->irq = irq_of_parse_and_map(dev->of_node, 0);
        if (!is->irq) {
index 76634d242b10305b7e02cb16a4886e39d5b2afb6..0f5b3845d7b94f6af7c974d416aa589a0e391b1b 100644 (file)
@@ -1133,12 +1133,12 @@ int s3c_camif_register_video_node(struct camif_dev *camif, int idx)
 
        ret = vb2_queue_init(q);
        if (ret)
-               goto err_vd_rel;
+               return ret;
 
        vp->pad.flags = MEDIA_PAD_FL_SINK;
        ret = media_entity_pads_init(&vfd->entity, 1, &vp->pad);
        if (ret)
-               goto err_vd_rel;
+               return ret;
 
        video_set_drvdata(vfd, vp);
 
@@ -1171,8 +1171,6 @@ err_ctrlh_free:
        v4l2_ctrl_handler_free(&vp->ctrl_handler);
 err_me_cleanup:
        media_entity_cleanup(&vfd->entity);
-err_vd_rel:
-       video_device_release(vfd);
        return ret;
 }
 
index f62703cebb77c764d104f1b6bfb0177573a76a15..4b4c129c09e70f3e306f0620cc562512f868e2e7 100644 (file)
@@ -1297,7 +1297,7 @@ static int enc_post_frame_start(struct s5p_mfc_ctx *ctx)
        if (ctx->state == MFCINST_FINISHING && ctx->ref_queue_cnt == 0)
                src_ready = false;
        if (!src_ready || ctx->dst_queue_cnt == 0)
-               clear_work_bit(ctx);
+               clear_work_bit_irqsave(ctx);
 
        return 0;
 }
index 5dc1f908b49bd6d07fecb4fa13d3caa5cbfd5c6c..e4cf27b5a0727351d21e2eca73cbdc378fda8a50 100644 (file)
@@ -695,16 +695,10 @@ static int c8sectpfe_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, fei);
 
-       fei->c8sectpfeclk = devm_clk_get(dev, "c8sectpfe");
+       fei->c8sectpfeclk = devm_clk_get_enabled(dev, "c8sectpfe");
        if (IS_ERR(fei->c8sectpfeclk)) {
-               dev_err(dev, "c8sectpfe clk not found\n");
-               return PTR_ERR(fei->c8sectpfeclk);
-       }
-
-       ret = clk_prepare_enable(fei->c8sectpfeclk);
-       if (ret) {
                dev_err(dev, "Failed to enable c8sectpfe clock\n");
-               return ret;
+               return PTR_ERR(fei->c8sectpfeclk);
        }
 
        /* to save power disable all IP's (on by default) */
@@ -722,7 +716,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
                        0, "c8sectpfe-idle-irq", fei);
        if (ret) {
                dev_err(dev, "Can't register c8sectpfe-idle-irq IRQ.\n");
-               goto err_clk_disable;
+               return ret;
        }
 
        ret = devm_request_irq(dev, fei->error_irq,
@@ -730,7 +724,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
                                "c8sectpfe-error-irq", fei);
        if (ret) {
                dev_err(dev, "Can't register c8sectpfe-error-irq IRQ.\n");
-               goto err_clk_disable;
+               return ret;
        }
 
        fei->tsin_count = of_get_child_count(np);
@@ -739,16 +733,14 @@ static int c8sectpfe_probe(struct platform_device *pdev)
                fei->tsin_count > fei->hw_stats.num_ib) {
 
                dev_err(dev, "More tsin declared than exist on SoC!\n");
-               ret = -EINVAL;
-               goto err_clk_disable;
+               return -EINVAL;
        }
 
        fei->pinctrl = devm_pinctrl_get(dev);
 
        if (IS_ERR(fei->pinctrl)) {
                dev_err(dev, "Error getting tsin pins\n");
-               ret = PTR_ERR(fei->pinctrl);
-               goto err_clk_disable;
+               return PTR_ERR(fei->pinctrl);
        }
 
        for_each_child_of_node(np, child) {
@@ -859,7 +851,7 @@ static int c8sectpfe_probe(struct platform_device *pdev)
        if (ret) {
                dev_err(dev, "c8sectpfe_tuner_register_frontend failed (%d)\n",
                        ret);
-               goto err_clk_disable;
+               return ret;
        }
 
        c8sectpfe_debugfs_init(fei);
@@ -868,8 +860,6 @@ static int c8sectpfe_probe(struct platform_device *pdev)
 
 err_node_put:
        of_node_put(child);
-err_clk_disable:
-       clk_disable_unprepare(fei->c8sectpfeclk);
        return ret;
 }
 
@@ -903,8 +893,6 @@ static void c8sectpfe_remove(struct platform_device *pdev)
 
        if (readl(fei->io + SYS_OTHER_CLKEN))
                writel(0, fei->io + SYS_OTHER_CLKEN);
-
-       clk_disable_unprepare(fei->c8sectpfeclk);
 }
 
 
index e1ab56c3be1fecd4f7b0fd986e2fefdd21379901..bab998c4179aca3b07372782b9be7de340cb8d45 100644 (file)
@@ -63,6 +63,18 @@ config VIDEO_TI_VPE_DEBUG
        help
          Enable debug messages on VPE driver.
 
+config VIDEO_TI_J721E_CSI2RX
+       tristate "TI J721E CSI2RX wrapper layer driver"
+       depends on VIDEO_DEV && VIDEO_V4L2_SUBDEV_API
+       depends on MEDIA_SUPPORT && MEDIA_CONTROLLER
+       depends on (PHY_CADENCE_DPHY_RX && VIDEO_CADENCE_CSI2RX) || COMPILE_TEST
+       depends on ARCH_K3 || COMPILE_TEST
+       select VIDEOBUF2_DMA_CONTIG
+       select V4L2_FWNODE
+       help
+         Support for TI CSI2RX wrapper layer. This just enables the wrapper driver.
+         The Cadence CSI2RX bridge driver needs to be enabled separately.
+
 source "drivers/media/platform/ti/am437x/Kconfig"
 source "drivers/media/platform/ti/davinci/Kconfig"
 source "drivers/media/platform/ti/omap/Kconfig"
index 98c5fe5c40d65f0b3ae5beb3ae615b268c4cbf3f..8a2f74c9380eaba2f0a52f39d5acad80027406ff 100644 (file)
@@ -3,5 +3,6 @@ obj-y += am437x/
 obj-y += cal/
 obj-y += vpe/
 obj-y += davinci/
+obj-y += j721e-csi2rx/
 obj-y += omap/
 obj-y += omap3isp/
index 63092013d476ef32e11cce1085eca04fafde9058..5fa2ea9025d96b919fd456cd6b1943dfaff2239a 100644 (file)
@@ -1271,12 +1271,8 @@ static inline void vpfe_attach_irq(struct vpfe_device *vpfe)
 static int vpfe_querycap(struct file *file, void  *priv,
                         struct v4l2_capability *cap)
 {
-       struct vpfe_device *vpfe = video_drvdata(file);
-
        strscpy(cap->driver, VPFE_MODULE_NAME, sizeof(cap->driver));
        strscpy(cap->card, "TI AM437x VPFE", sizeof(cap->card));
-       snprintf(cap->bus_info, sizeof(cap->bus_info),
-                       "platform:%s", vpfe->v4l2_dev.name);
        return 0;
 }
 
diff --git a/drivers/media/platform/ti/j721e-csi2rx/Makefile b/drivers/media/platform/ti/j721e-csi2rx/Makefile
new file mode 100644 (file)
index 0000000..377afc1
--- /dev/null
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VIDEO_TI_J721E_CSI2RX) += j721e-csi2rx.o
diff --git a/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c b/drivers/media/platform/ti/j721e-csi2rx/j721e-csi2rx.c
new file mode 100644 (file)
index 0000000..ada6139
--- /dev/null
@@ -0,0 +1,1159 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * TI CSI2RX Shim Wrapper Driver
+ *
+ * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ * Author: Pratyush Yadav <p.yadav@ti.com>
+ * Author: Jai Luthra <j-luthra@ti.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+
+#include <media/mipi-csi2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mc.h>
+#include <media/videobuf2-dma-contig.h>
+
+#define TI_CSI2RX_MODULE_NAME          "j721e-csi2rx"
+
+#define SHIM_CNTL                      0x10
+#define SHIM_CNTL_PIX_RST              BIT(0)
+
+#define SHIM_DMACNTX                   0x20
+#define SHIM_DMACNTX_EN                        BIT(31)
+#define SHIM_DMACNTX_YUV422            GENMASK(27, 26)
+#define SHIM_DMACNTX_SIZE              GENMASK(21, 20)
+#define SHIM_DMACNTX_FMT               GENMASK(5, 0)
+#define SHIM_DMACNTX_YUV422_MODE_11    3
+#define SHIM_DMACNTX_SIZE_8            0
+#define SHIM_DMACNTX_SIZE_16           1
+#define SHIM_DMACNTX_SIZE_32           2
+
+#define SHIM_PSI_CFG0                  0x24
+#define SHIM_PSI_CFG0_SRC_TAG          GENMASK(15, 0)
+#define SHIM_PSI_CFG0_DST_TAG          GENMASK(31, 16)
+
+#define PSIL_WORD_SIZE_BYTES           16
+/*
+ * There are no hard limits on the width or height. The DMA engine can handle
+ * all sizes. The max width and height are arbitrary numbers for this driver.
+ * Use 16K * 16K as the arbitrary limit. It is large enough that it is unlikely
+ * the limit will be hit in practice.
+ */
+#define MAX_WIDTH_BYTES                        SZ_16K
+#define MAX_HEIGHT_LINES               SZ_16K
+
+#define DRAIN_TIMEOUT_MS               50
+#define DRAIN_BUFFER_SIZE              SZ_32K
+
+struct ti_csi2rx_fmt {
+       u32                             fourcc; /* Four character code. */
+       u32                             code;   /* Mbus code. */
+       u32                             csi_dt; /* CSI Data type. */
+       u8                              bpp;    /* Bits per pixel. */
+       u8                              size;   /* Data size shift when unpacking. */
+};
+
+struct ti_csi2rx_buffer {
+       /* Common v4l2 buffer. Must be first. */
+       struct vb2_v4l2_buffer          vb;
+       struct list_head                list;
+       struct ti_csi2rx_dev            *csi;
+};
+
+enum ti_csi2rx_dma_state {
+       TI_CSI2RX_DMA_STOPPED,  /* Streaming not started yet. */
+       TI_CSI2RX_DMA_IDLE,     /* Streaming but no pending DMA operation. */
+       TI_CSI2RX_DMA_ACTIVE,   /* Streaming and pending DMA operation. */
+};
+
+struct ti_csi2rx_dma {
+       /* Protects all fields in this struct. */
+       spinlock_t                      lock;
+       struct dma_chan                 *chan;
+       /* Buffers queued to the driver, waiting to be processed by DMA. */
+       struct list_head                queue;
+       enum ti_csi2rx_dma_state        state;
+       /*
+        * Queue of buffers submitted to DMA engine.
+        */
+       struct list_head                submitted;
+       /* Buffer to drain stale data from PSI-L endpoint */
+       struct {
+               void                    *vaddr;
+               dma_addr_t              paddr;
+               size_t                  len;
+       } drain;
+};
+
+struct ti_csi2rx_dev {
+       struct device                   *dev;
+       void __iomem                    *shim;
+       struct v4l2_device              v4l2_dev;
+       struct video_device             vdev;
+       struct media_device             mdev;
+       struct media_pipeline           pipe;
+       struct media_pad                pad;
+       struct v4l2_async_notifier      notifier;
+       struct v4l2_subdev              *source;
+       struct vb2_queue                vidq;
+       struct mutex                    mutex; /* To serialize ioctls. */
+       struct v4l2_format              v_fmt;
+       struct ti_csi2rx_dma            dma;
+       u32                             sequence;
+};
+
+static const struct ti_csi2rx_fmt ti_csi2rx_formats[] = {
+       {
+               .fourcc                 = V4L2_PIX_FMT_YUYV,
+               .code                   = MEDIA_BUS_FMT_YUYV8_1X16,
+               .csi_dt                 = MIPI_CSI2_DT_YUV422_8B,
+               .bpp                    = 16,
+               .size                   = SHIM_DMACNTX_SIZE_8,
+       }, {
+               .fourcc                 = V4L2_PIX_FMT_UYVY,
+               .code                   = MEDIA_BUS_FMT_UYVY8_1X16,
+               .csi_dt                 = MIPI_CSI2_DT_YUV422_8B,
+               .bpp                    = 16,
+               .size                   = SHIM_DMACNTX_SIZE_8,
+       }, {
+               .fourcc                 = V4L2_PIX_FMT_YVYU,
+               .code                   = MEDIA_BUS_FMT_YVYU8_1X16,
+               .csi_dt                 = MIPI_CSI2_DT_YUV422_8B,
+               .bpp                    = 16,
+               .size                   = SHIM_DMACNTX_SIZE_8,
+       }, {
+               .fourcc                 = V4L2_PIX_FMT_VYUY,
+               .code                   = MEDIA_BUS_FMT_VYUY8_1X16,
+               .csi_dt                 = MIPI_CSI2_DT_YUV422_8B,
+               .bpp                    = 16,
+               .size                   = SHIM_DMACNTX_SIZE_8,
+       }, {
+               .fourcc                 = V4L2_PIX_FMT_SBGGR8,
+               .code                   = MEDIA_BUS_FMT_SBGGR8_1X8,
+               .csi_dt                 = MIPI_CSI2_DT_RAW8,
+               .bpp                    = 8,
+               .size                   = SHIM_DMACNTX_SIZE_8,
+       }, {
+               .fourcc                 = V4L2_PIX_FMT_SGBRG8,
+               .code                   = MEDIA_BUS_FMT_SGBRG8_1X8,
+               .csi_dt                 = MIPI_CSI2_DT_RAW8,
+               .bpp                    = 8,
+               .size                   = SHIM_DMACNTX_SIZE_8,
+       }, {
+               .fourcc                 = V4L2_PIX_FMT_SGRBG8,
+               .code                   = MEDIA_BUS_FMT_SGRBG8_1X8,
+               .csi_dt                 = MIPI_CSI2_DT_RAW8,
+               .bpp                    = 8,
+               .size                   = SHIM_DMACNTX_SIZE_8,
+       }, {
+               .fourcc                 = V4L2_PIX_FMT_SRGGB8,
+               .code                   = MEDIA_BUS_FMT_SRGGB8_1X8,
+               .csi_dt                 = MIPI_CSI2_DT_RAW8,
+               .bpp                    = 8,
+               .size                   = SHIM_DMACNTX_SIZE_8,
+       }, {
+               .fourcc                 = V4L2_PIX_FMT_SBGGR10,
+               .code                   = MEDIA_BUS_FMT_SBGGR10_1X10,
+               .csi_dt                 = MIPI_CSI2_DT_RAW10,
+               .bpp                    = 16,
+               .size                   = SHIM_DMACNTX_SIZE_16,
+       }, {
+               .fourcc                 = V4L2_PIX_FMT_SGBRG10,
+               .code                   = MEDIA_BUS_FMT_SGBRG10_1X10,
+               .csi_dt                 = MIPI_CSI2_DT_RAW10,
+               .bpp                    = 16,
+               .size                   = SHIM_DMACNTX_SIZE_16,
+       }, {
+               .fourcc                 = V4L2_PIX_FMT_SGRBG10,
+               .code                   = MEDIA_BUS_FMT_SGRBG10_1X10,
+               .csi_dt                 = MIPI_CSI2_DT_RAW10,
+               .bpp                    = 16,
+               .size                   = SHIM_DMACNTX_SIZE_16,
+       }, {
+               .fourcc                 = V4L2_PIX_FMT_SRGGB10,
+               .code                   = MEDIA_BUS_FMT_SRGGB10_1X10,
+               .csi_dt                 = MIPI_CSI2_DT_RAW10,
+               .bpp                    = 16,
+               .size                   = SHIM_DMACNTX_SIZE_16,
+       },
+
+       /* More formats can be supported but they are not listed for now. */
+};
+
+/* Forward declaration needed by ti_csi2rx_dma_callback. */
+static int ti_csi2rx_start_dma(struct ti_csi2rx_dev *csi,
+                              struct ti_csi2rx_buffer *buf);
+
+static const struct ti_csi2rx_fmt *find_format_by_fourcc(u32 pixelformat)
+{
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(ti_csi2rx_formats); i++) {
+               if (ti_csi2rx_formats[i].fourcc == pixelformat)
+                       return &ti_csi2rx_formats[i];
+       }
+
+       return NULL;
+}
+
+static const struct ti_csi2rx_fmt *find_format_by_code(u32 code)
+{
+       unsigned int i;
+
+       for (i = 0; i < ARRAY_SIZE(ti_csi2rx_formats); i++) {
+               if (ti_csi2rx_formats[i].code == code)
+                       return &ti_csi2rx_formats[i];
+       }
+
+       return NULL;
+}
+
+static void ti_csi2rx_fill_fmt(const struct ti_csi2rx_fmt *csi_fmt,
+                              struct v4l2_format *v4l2_fmt)
+{
+       struct v4l2_pix_format *pix = &v4l2_fmt->fmt.pix;
+       unsigned int pixels_in_word;
+
+       pixels_in_word = PSIL_WORD_SIZE_BYTES * 8 / csi_fmt->bpp;
+
+       /* Clamp width and height to sensible maximums (16K x 16K) */
+       pix->width = clamp_t(unsigned int, pix->width,
+                            pixels_in_word,
+                            MAX_WIDTH_BYTES * 8 / csi_fmt->bpp);
+       pix->height = clamp_t(unsigned int, pix->height, 1, MAX_HEIGHT_LINES);
+
+       /* Width should be a multiple of transfer word-size */
+       pix->width = rounddown(pix->width, pixels_in_word);
+
+       v4l2_fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+       pix->pixelformat = csi_fmt->fourcc;
+       pix->bytesperline = pix->width * (csi_fmt->bpp / 8);
+       pix->sizeimage = pix->bytesperline * pix->height;
+}
+
+static int ti_csi2rx_querycap(struct file *file, void *priv,
+                             struct v4l2_capability *cap)
+{
+       strscpy(cap->driver, TI_CSI2RX_MODULE_NAME, sizeof(cap->driver));
+       strscpy(cap->card, TI_CSI2RX_MODULE_NAME, sizeof(cap->card));
+
+       return 0;
+}
+
+static int ti_csi2rx_enum_fmt_vid_cap(struct file *file, void *priv,
+                                     struct v4l2_fmtdesc *f)
+{
+       const struct ti_csi2rx_fmt *fmt = NULL;
+
+       if (f->mbus_code) {
+               /* 1-to-1 mapping between bus formats and pixel formats */
+               if (f->index > 0)
+                       return -EINVAL;
+
+               fmt = find_format_by_code(f->mbus_code);
+       } else {
+               if (f->index >= ARRAY_SIZE(ti_csi2rx_formats))
+                       return -EINVAL;
+
+               fmt = &ti_csi2rx_formats[f->index];
+       }
+
+       if (!fmt)
+               return -EINVAL;
+
+       f->pixelformat = fmt->fourcc;
+       memset(f->reserved, 0, sizeof(f->reserved));
+       f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+
+       return 0;
+}
+
+static int ti_csi2rx_g_fmt_vid_cap(struct file *file, void *prov,
+                                  struct v4l2_format *f)
+{
+       struct ti_csi2rx_dev *csi = video_drvdata(file);
+
+       *f = csi->v_fmt;
+
+       return 0;
+}
+
+static int ti_csi2rx_try_fmt_vid_cap(struct file *file, void *priv,
+                                    struct v4l2_format *f)
+{
+       const struct ti_csi2rx_fmt *fmt;
+
+       /*
+        * Default to the first format if the requested pixel format code isn't
+        * supported.
+        */
+       fmt = find_format_by_fourcc(f->fmt.pix.pixelformat);
+       if (!fmt)
+               fmt = &ti_csi2rx_formats[0];
+
+       /* Interlaced formats are not supported. */
+       f->fmt.pix.field = V4L2_FIELD_NONE;
+
+       ti_csi2rx_fill_fmt(fmt, f);
+
+       return 0;
+}
+
+static int ti_csi2rx_s_fmt_vid_cap(struct file *file, void *priv,
+                                  struct v4l2_format *f)
+{
+       struct ti_csi2rx_dev *csi = video_drvdata(file);
+       struct vb2_queue *q = &csi->vidq;
+       int ret;
+
+       if (vb2_is_busy(q))
+               return -EBUSY;
+
+       ret = ti_csi2rx_try_fmt_vid_cap(file, priv, f);
+       if (ret < 0)
+               return ret;
+
+       csi->v_fmt = *f;
+
+       return 0;
+}
+
+static int ti_csi2rx_enum_framesizes(struct file *file, void *fh,
+                                    struct v4l2_frmsizeenum *fsize)
+{
+       const struct ti_csi2rx_fmt *fmt;
+       unsigned int pixels_in_word;
+
+       fmt = find_format_by_fourcc(fsize->pixel_format);
+       if (!fmt || fsize->index != 0)
+               return -EINVAL;
+
+       /*
+        * Number of pixels in one PSI-L word. The transfer happens in multiples
+        * of PSI-L word sizes.
+        */
+       pixels_in_word = PSIL_WORD_SIZE_BYTES * 8 / fmt->bpp;
+
+       fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+       fsize->stepwise.min_width = pixels_in_word;
+       fsize->stepwise.max_width = rounddown(MAX_WIDTH_BYTES * 8 / fmt->bpp,
+                                             pixels_in_word);
+       fsize->stepwise.step_width = pixels_in_word;
+       fsize->stepwise.min_height = 1;
+       fsize->stepwise.max_height = MAX_HEIGHT_LINES;
+       fsize->stepwise.step_height = 1;
+
+       return 0;
+}
+
+static const struct v4l2_ioctl_ops csi_ioctl_ops = {
+       .vidioc_querycap      = ti_csi2rx_querycap,
+       .vidioc_enum_fmt_vid_cap = ti_csi2rx_enum_fmt_vid_cap,
+       .vidioc_try_fmt_vid_cap = ti_csi2rx_try_fmt_vid_cap,
+       .vidioc_g_fmt_vid_cap = ti_csi2rx_g_fmt_vid_cap,
+       .vidioc_s_fmt_vid_cap = ti_csi2rx_s_fmt_vid_cap,
+       .vidioc_enum_framesizes = ti_csi2rx_enum_framesizes,
+       .vidioc_reqbufs       = vb2_ioctl_reqbufs,
+       .vidioc_create_bufs   = vb2_ioctl_create_bufs,
+       .vidioc_prepare_buf   = vb2_ioctl_prepare_buf,
+       .vidioc_querybuf      = vb2_ioctl_querybuf,
+       .vidioc_qbuf          = vb2_ioctl_qbuf,
+       .vidioc_dqbuf         = vb2_ioctl_dqbuf,
+       .vidioc_expbuf        = vb2_ioctl_expbuf,
+       .vidioc_streamon      = vb2_ioctl_streamon,
+       .vidioc_streamoff     = vb2_ioctl_streamoff,
+};
+
+static const struct v4l2_file_operations csi_fops = {
+       .owner = THIS_MODULE,
+       .open = v4l2_fh_open,
+       .release = vb2_fop_release,
+       .read = vb2_fop_read,
+       .poll = vb2_fop_poll,
+       .unlocked_ioctl = video_ioctl2,
+       .mmap = vb2_fop_mmap,
+};
+
+static int csi_async_notifier_bound(struct v4l2_async_notifier *notifier,
+                                   struct v4l2_subdev *subdev,
+                                   struct v4l2_async_connection *asc)
+{
+       struct ti_csi2rx_dev *csi = dev_get_drvdata(notifier->v4l2_dev->dev);
+
+       csi->source = subdev;
+
+       return 0;
+}
+
+static int csi_async_notifier_complete(struct v4l2_async_notifier *notifier)
+{
+       struct ti_csi2rx_dev *csi = dev_get_drvdata(notifier->v4l2_dev->dev);
+       struct video_device *vdev = &csi->vdev;
+       int ret;
+
+       ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
+       if (ret)
+               return ret;
+
+       ret = v4l2_create_fwnode_links_to_pad(csi->source, &csi->pad,
+                                             MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
+
+       if (ret) {
+               video_unregister_device(vdev);
+               return ret;
+       }
+
+       ret = v4l2_device_register_subdev_nodes(&csi->v4l2_dev);
+       if (ret)
+               video_unregister_device(vdev);
+
+       return ret;
+}
+
+static const struct v4l2_async_notifier_operations csi_async_notifier_ops = {
+       .bound = csi_async_notifier_bound,
+       .complete = csi_async_notifier_complete,
+};
+
+static int ti_csi2rx_notifier_register(struct ti_csi2rx_dev *csi)
+{
+       struct fwnode_handle *fwnode;
+       struct v4l2_async_connection *asc;
+       struct device_node *node;
+       int ret;
+
+       node = of_get_child_by_name(csi->dev->of_node, "csi-bridge");
+       if (!node)
+               return -EINVAL;
+
+       fwnode = of_fwnode_handle(node);
+       if (!fwnode) {
+               of_node_put(node);
+               return -EINVAL;
+       }
+
+       v4l2_async_nf_init(&csi->notifier, &csi->v4l2_dev);
+       csi->notifier.ops = &csi_async_notifier_ops;
+
+       asc = v4l2_async_nf_add_fwnode(&csi->notifier, fwnode,
+                                      struct v4l2_async_connection);
+       of_node_put(node);
+       if (IS_ERR(asc)) {
+               v4l2_async_nf_cleanup(&csi->notifier);
+               return PTR_ERR(asc);
+       }
+
+       ret = v4l2_async_nf_register(&csi->notifier);
+       if (ret) {
+               v4l2_async_nf_cleanup(&csi->notifier);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void ti_csi2rx_setup_shim(struct ti_csi2rx_dev *csi)
+{
+       const struct ti_csi2rx_fmt *fmt;
+       unsigned int reg;
+
+       fmt = find_format_by_fourcc(csi->v_fmt.fmt.pix.pixelformat);
+
+       /* De-assert the pixel interface reset. */
+       reg = SHIM_CNTL_PIX_RST;
+       writel(reg, csi->shim + SHIM_CNTL);
+
+       reg = SHIM_DMACNTX_EN;
+       reg |= FIELD_PREP(SHIM_DMACNTX_FMT, fmt->csi_dt);
+
+       /*
+        * The hardware assumes incoming YUV422 8-bit data on MIPI CSI2 bus
+        * follows the spec and is packed in the order U0 -> Y0 -> V0 -> Y1 ->
+        * ...
+        *
+        * There is an option to swap the bytes around before storing in
+        * memory, to achieve different pixel formats:
+        *
+        * Byte3 <----------- Byte0
+        * [ Y1 ][ V0 ][ Y0 ][ U0 ]     MODE 11
+        * [ Y1 ][ U0 ][ Y0 ][ V0 ]     MODE 10
+        * [ V0 ][ Y1 ][ U0 ][ Y0 ]     MODE 01
+        * [ U0 ][ Y1 ][ V0 ][ Y0 ]     MODE 00
+        *
+        * We don't have any requirement to change pixelformat from what is
+        * coming from the source, so we keep it in MODE 11, which does not
+        * swap any bytes when storing in memory.
+        */
+       switch (fmt->fourcc) {
+       case V4L2_PIX_FMT_UYVY:
+       case V4L2_PIX_FMT_VYUY:
+       case V4L2_PIX_FMT_YUYV:
+       case V4L2_PIX_FMT_YVYU:
+               reg |= FIELD_PREP(SHIM_DMACNTX_YUV422,
+                                 SHIM_DMACNTX_YUV422_MODE_11);
+               break;
+       default:
+               /* Ignore if not YUV 4:2:2 */
+               break;
+       }
+
+       reg |= FIELD_PREP(SHIM_DMACNTX_SIZE, fmt->size);
+
+       writel(reg, csi->shim + SHIM_DMACNTX);
+
+       reg = FIELD_PREP(SHIM_PSI_CFG0_SRC_TAG, 0) |
+             FIELD_PREP(SHIM_PSI_CFG0_DST_TAG, 0);
+       writel(reg, csi->shim + SHIM_PSI_CFG0);
+}
+
+static void ti_csi2rx_drain_callback(void *param)
+{
+       struct completion *drain_complete = param;
+
+       complete(drain_complete);
+}
+
+/*
+ * Drain the stale data left at the PSI-L endpoint.
+ *
+ * This might happen if no buffers are queued in time but source is still
+ * streaming. In multi-stream scenarios this can happen when one stream is
+ * stopped but other is still streaming, and thus module-level pixel reset is
+ * not asserted.
+ *
+ * To prevent that stale data corrupting the subsequent transactions, it is
+ * required to issue DMA requests to drain it out.
+ */
+static int ti_csi2rx_drain_dma(struct ti_csi2rx_dev *csi)
+{
+       struct dma_async_tx_descriptor *desc;
+       struct completion drain_complete;
+       dma_cookie_t cookie;
+       int ret;
+
+       init_completion(&drain_complete);
+
+       desc = dmaengine_prep_slave_single(csi->dma.chan, csi->dma.drain.paddr,
+                                          csi->dma.drain.len, DMA_DEV_TO_MEM,
+                                          DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+       if (!desc) {
+               ret = -EIO;
+               goto out;
+       }
+
+       desc->callback = ti_csi2rx_drain_callback;
+       desc->callback_param = &drain_complete;
+
+       cookie = dmaengine_submit(desc);
+       ret = dma_submit_error(cookie);
+       if (ret)
+               goto out;
+
+       dma_async_issue_pending(csi->dma.chan);
+
+       if (!wait_for_completion_timeout(&drain_complete,
+                                        msecs_to_jiffies(DRAIN_TIMEOUT_MS))) {
+               dmaengine_terminate_sync(csi->dma.chan);
+               dev_dbg(csi->dev, "DMA transfer timed out for drain buffer\n");
+               ret = -ETIMEDOUT;
+               goto out;
+       }
+out:
+       return ret;
+}
+
+static void ti_csi2rx_dma_callback(void *param)
+{
+       struct ti_csi2rx_buffer *buf = param;
+       struct ti_csi2rx_dev *csi = buf->csi;
+       struct ti_csi2rx_dma *dma = &csi->dma;
+       unsigned long flags;
+
+       /*
+        * TODO: Derive the sequence number from the CSI2RX frame number
+        * hardware monitor registers.
+        */
+       buf->vb.vb2_buf.timestamp = ktime_get_ns();
+       buf->vb.sequence = csi->sequence++;
+
+       spin_lock_irqsave(&dma->lock, flags);
+
+       WARN_ON(!list_is_first(&buf->list, &dma->submitted));
+       vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
+       list_del(&buf->list);
+
+       /* If there are more buffers to process then start their transfer. */
+       while (!list_empty(&dma->queue)) {
+               buf = list_entry(dma->queue.next, struct ti_csi2rx_buffer, list);
+
+               if (ti_csi2rx_start_dma(csi, buf)) {
+                       dev_err(csi->dev, "Failed to queue the next buffer for DMA\n");
+                       vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+               } else {
+                       list_move_tail(&buf->list, &dma->submitted);
+               }
+       }
+
+       if (list_empty(&dma->submitted))
+               dma->state = TI_CSI2RX_DMA_IDLE;
+
+       spin_unlock_irqrestore(&dma->lock, flags);
+}
+
+static int ti_csi2rx_start_dma(struct ti_csi2rx_dev *csi,
+                              struct ti_csi2rx_buffer *buf)
+{
+       unsigned long addr;
+       struct dma_async_tx_descriptor *desc;
+       size_t len = csi->v_fmt.fmt.pix.sizeimage;
+       dma_cookie_t cookie;
+       int ret = 0;
+
+       addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
+       desc = dmaengine_prep_slave_single(csi->dma.chan, addr, len,
+                                          DMA_DEV_TO_MEM,
+                                          DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+       if (!desc)
+               return -EIO;
+
+       desc->callback = ti_csi2rx_dma_callback;
+       desc->callback_param = buf;
+
+       cookie = dmaengine_submit(desc);
+       ret = dma_submit_error(cookie);
+       if (ret)
+               return ret;
+
+       dma_async_issue_pending(csi->dma.chan);
+
+       return 0;
+}
+
+static void ti_csi2rx_stop_dma(struct ti_csi2rx_dev *csi)
+{
+       struct ti_csi2rx_dma *dma = &csi->dma;
+       enum ti_csi2rx_dma_state state;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&dma->lock, flags);
+       state = csi->dma.state;
+       dma->state = TI_CSI2RX_DMA_STOPPED;
+       spin_unlock_irqrestore(&dma->lock, flags);
+
+       if (state != TI_CSI2RX_DMA_STOPPED) {
+               /*
+                * Normal DMA termination does not clean up pending data on
+                * the endpoint if multiple streams are running and only one
+                * is stopped, as the module-level pixel reset cannot be
+                * enforced before terminating DMA.
+                */
+               ret = ti_csi2rx_drain_dma(csi);
+               if (ret && ret != -ETIMEDOUT)
+                       dev_warn(csi->dev,
+                                "Failed to drain DMA. Next frame might be bogus\n");
+       }
+
+       ret = dmaengine_terminate_sync(csi->dma.chan);
+       if (ret)
+               dev_err(csi->dev, "Failed to stop DMA: %d\n", ret);
+}
+
+static void ti_csi2rx_cleanup_buffers(struct ti_csi2rx_dev *csi,
+                                     enum vb2_buffer_state state)
+{
+       struct ti_csi2rx_dma *dma = &csi->dma;
+       struct ti_csi2rx_buffer *buf, *tmp;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dma->lock, flags);
+       list_for_each_entry_safe(buf, tmp, &csi->dma.queue, list) {
+               list_del(&buf->list);
+               vb2_buffer_done(&buf->vb.vb2_buf, state);
+       }
+       list_for_each_entry_safe(buf, tmp, &csi->dma.submitted, list) {
+               list_del(&buf->list);
+               vb2_buffer_done(&buf->vb.vb2_buf, state);
+       }
+       spin_unlock_irqrestore(&dma->lock, flags);
+}
+
+static int ti_csi2rx_queue_setup(struct vb2_queue *q, unsigned int *nbuffers,
+                                unsigned int *nplanes, unsigned int sizes[],
+                                struct device *alloc_devs[])
+{
+       struct ti_csi2rx_dev *csi = vb2_get_drv_priv(q);
+       unsigned int size = csi->v_fmt.fmt.pix.sizeimage;
+
+       if (*nplanes) {
+               if (sizes[0] < size)
+                       return -EINVAL;
+               size = sizes[0];
+       }
+
+       *nplanes = 1;
+       sizes[0] = size;
+
+       return 0;
+}
+
+static int ti_csi2rx_buffer_prepare(struct vb2_buffer *vb)
+{
+       struct ti_csi2rx_dev *csi = vb2_get_drv_priv(vb->vb2_queue);
+       unsigned long size = csi->v_fmt.fmt.pix.sizeimage;
+
+       if (vb2_plane_size(vb, 0) < size) {
+               dev_err(csi->dev, "Data will not fit into plane\n");
+               return -EINVAL;
+       }
+
+       vb2_set_plane_payload(vb, 0, size);
+       return 0;
+}
+
+static void ti_csi2rx_buffer_queue(struct vb2_buffer *vb)
+{
+       struct ti_csi2rx_dev *csi = vb2_get_drv_priv(vb->vb2_queue);
+       struct ti_csi2rx_buffer *buf;
+       struct ti_csi2rx_dma *dma = &csi->dma;
+       bool restart_dma = false;
+       unsigned long flags = 0;
+       int ret;
+
+       buf = container_of(vb, struct ti_csi2rx_buffer, vb.vb2_buf);
+       buf->csi = csi;
+
+       spin_lock_irqsave(&dma->lock, flags);
+       /*
+        * Usually the DMA callback takes care of queueing the pending buffers.
+        * But if DMA has stalled due to lack of buffers, restart it now.
+        */
+       if (dma->state == TI_CSI2RX_DMA_IDLE) {
+               /*
+                * Do not restart DMA with the lock held because
+                * ti_csi2rx_drain_dma() might block for completion.
+                * There won't be a race on queueing DMA anyway since the
+                * callback is not being fired.
+                */
+               restart_dma = true;
+               dma->state = TI_CSI2RX_DMA_ACTIVE;
+       } else {
+               list_add_tail(&buf->list, &dma->queue);
+       }
+       spin_unlock_irqrestore(&dma->lock, flags);
+
+       if (restart_dma) {
+               /*
+                * Once frames start dropping, some data gets stuck in the DMA
+                * pipeline somewhere. So the first DMA transfer after frame
+                * drops gives a partial frame. This is obviously not useful to
+                * the application and will only confuse it. Issue a DMA
+                * transaction to drain that up.
+                */
+               ret = ti_csi2rx_drain_dma(csi);
+               if (ret && ret != -ETIMEDOUT)
+                       dev_warn(csi->dev,
+                                "Failed to drain DMA. Next frame might be bogus\n");
+
+               ret = ti_csi2rx_start_dma(csi, buf);
+               if (ret) {
+                       dev_err(csi->dev, "Failed to start DMA: %d\n", ret);
+                       spin_lock_irqsave(&dma->lock, flags);
+                       vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+                       dma->state = TI_CSI2RX_DMA_IDLE;
+                       spin_unlock_irqrestore(&dma->lock, flags);
+               } else {
+                       spin_lock_irqsave(&dma->lock, flags);
+                       list_add_tail(&buf->list, &dma->submitted);
+                       spin_unlock_irqrestore(&dma->lock, flags);
+               }
+       }
+}
+
+static int ti_csi2rx_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+       struct ti_csi2rx_dev *csi = vb2_get_drv_priv(vq);
+       struct ti_csi2rx_dma *dma = &csi->dma;
+       struct ti_csi2rx_buffer *buf;
+       unsigned long flags;
+       int ret = 0;
+
+       spin_lock_irqsave(&dma->lock, flags);
+       if (list_empty(&dma->queue))
+               ret = -EIO;
+       spin_unlock_irqrestore(&dma->lock, flags);
+       if (ret)
+               return ret;
+
+       ret = video_device_pipeline_start(&csi->vdev, &csi->pipe);
+       if (ret)
+               goto err;
+
+       ti_csi2rx_setup_shim(csi);
+
+       csi->sequence = 0;
+
+       spin_lock_irqsave(&dma->lock, flags);
+       buf = list_entry(dma->queue.next, struct ti_csi2rx_buffer, list);
+
+       ret = ti_csi2rx_start_dma(csi, buf);
+       if (ret) {
+               dev_err(csi->dev, "Failed to start DMA: %d\n", ret);
+               spin_unlock_irqrestore(&dma->lock, flags);
+               goto err_pipeline;
+       }
+
+       list_move_tail(&buf->list, &dma->submitted);
+       dma->state = TI_CSI2RX_DMA_ACTIVE;
+       spin_unlock_irqrestore(&dma->lock, flags);
+
+       ret = v4l2_subdev_call(csi->source, video, s_stream, 1);
+       if (ret)
+               goto err_dma;
+
+       return 0;
+
+err_dma:
+       ti_csi2rx_stop_dma(csi);
+err_pipeline:
+       video_device_pipeline_stop(&csi->vdev);
+       writel(0, csi->shim + SHIM_CNTL);
+       writel(0, csi->shim + SHIM_DMACNTX);
+err:
+       ti_csi2rx_cleanup_buffers(csi, VB2_BUF_STATE_QUEUED);
+       return ret;
+}
+
+static void ti_csi2rx_stop_streaming(struct vb2_queue *vq)
+{
+       struct ti_csi2rx_dev *csi = vb2_get_drv_priv(vq);
+       int ret;
+
+       video_device_pipeline_stop(&csi->vdev);
+
+       writel(0, csi->shim + SHIM_CNTL);
+       writel(0, csi->shim + SHIM_DMACNTX);
+
+       ret = v4l2_subdev_call(csi->source, video, s_stream, 0);
+       if (ret)
+               dev_err(csi->dev, "Failed to stop subdev stream\n");
+
+       ti_csi2rx_stop_dma(csi);
+       ti_csi2rx_cleanup_buffers(csi, VB2_BUF_STATE_ERROR);
+}
+
+static const struct vb2_ops csi_vb2_qops = {
+       .queue_setup = ti_csi2rx_queue_setup,
+       .buf_prepare = ti_csi2rx_buffer_prepare,
+       .buf_queue = ti_csi2rx_buffer_queue,
+       .start_streaming = ti_csi2rx_start_streaming,
+       .stop_streaming = ti_csi2rx_stop_streaming,
+       .wait_prepare = vb2_ops_wait_prepare,
+       .wait_finish = vb2_ops_wait_finish,
+};
+
+static int ti_csi2rx_init_vb2q(struct ti_csi2rx_dev *csi)
+{
+       struct vb2_queue *q = &csi->vidq;
+       int ret;
+
+       q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+       q->io_modes = VB2_MMAP | VB2_DMABUF;
+       q->drv_priv = csi;
+       q->buf_struct_size = sizeof(struct ti_csi2rx_buffer);
+       q->ops = &csi_vb2_qops;
+       q->mem_ops = &vb2_dma_contig_memops;
+       q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+       q->dev = dmaengine_get_dma_device(csi->dma.chan);
+       q->lock = &csi->mutex;
+       q->min_buffers_needed = 1;
+
+       ret = vb2_queue_init(q);
+       if (ret)
+               return ret;
+
+       csi->vdev.queue = q;
+
+       return 0;
+}
+
+static int ti_csi2rx_link_validate(struct media_link *link)
+{
+       struct media_entity *entity = link->sink->entity;
+       struct video_device *vdev = media_entity_to_video_device(entity);
+       struct ti_csi2rx_dev *csi = container_of(vdev, struct ti_csi2rx_dev, vdev);
+       struct v4l2_pix_format *csi_fmt = &csi->v_fmt.fmt.pix;
+       struct v4l2_subdev_format source_fmt = {
+               .which  = V4L2_SUBDEV_FORMAT_ACTIVE,
+               .pad    = link->source->index,
+       };
+       const struct ti_csi2rx_fmt *ti_fmt;
+       int ret;
+
+       ret = v4l2_subdev_call_state_active(csi->source, pad,
+                                           get_fmt, &source_fmt);
+       if (ret)
+               return ret;
+
+       if (source_fmt.format.width != csi_fmt->width) {
+               dev_dbg(csi->dev, "Width does not match (source %u, sink %u)\n",
+                       source_fmt.format.width, csi_fmt->width);
+               return -EPIPE;
+       }
+
+       if (source_fmt.format.height != csi_fmt->height) {
+               dev_dbg(csi->dev, "Height does not match (source %u, sink %u)\n",
+                       source_fmt.format.height, csi_fmt->height);
+               return -EPIPE;
+       }
+
+       if (source_fmt.format.field != csi_fmt->field &&
+           csi_fmt->field != V4L2_FIELD_NONE) {
+               dev_dbg(csi->dev, "Field does not match (source %u, sink %u)\n",
+                       source_fmt.format.field, csi_fmt->field);
+               return -EPIPE;
+       }
+
+       ti_fmt = find_format_by_code(source_fmt.format.code);
+       if (!ti_fmt) {
+               dev_dbg(csi->dev, "Media bus format 0x%x not supported\n",
+                       source_fmt.format.code);
+               return -EPIPE;
+       }
+
+       if (ti_fmt->fourcc != csi_fmt->pixelformat) {
+               dev_dbg(csi->dev,
+                       "Cannot transform source fmt 0x%x to sink fmt 0x%x\n",
+                       ti_fmt->fourcc, csi_fmt->pixelformat);
+               return -EPIPE;
+       }
+
+       return 0;
+}
+
+static const struct media_entity_operations ti_csi2rx_video_entity_ops = {
+       .link_validate = ti_csi2rx_link_validate,
+};
+
+static int ti_csi2rx_init_dma(struct ti_csi2rx_dev *csi)
+{
+       struct dma_slave_config cfg = {
+               .src_addr_width = DMA_SLAVE_BUSWIDTH_16_BYTES,
+       };
+       int ret;
+
+       INIT_LIST_HEAD(&csi->dma.queue);
+       INIT_LIST_HEAD(&csi->dma.submitted);
+       spin_lock_init(&csi->dma.lock);
+
+       csi->dma.state = TI_CSI2RX_DMA_STOPPED;
+
+       csi->dma.chan = dma_request_chan(csi->dev, "rx0");
+       if (IS_ERR(csi->dma.chan))
+               return PTR_ERR(csi->dma.chan);
+
+       ret = dmaengine_slave_config(csi->dma.chan, &cfg);
+       if (ret) {
+               dma_release_channel(csi->dma.chan);
+               return ret;
+       }
+
+       csi->dma.drain.len = DRAIN_BUFFER_SIZE;
+       csi->dma.drain.vaddr = dma_alloc_coherent(csi->dev, csi->dma.drain.len,
+                                                 &csi->dma.drain.paddr,
+                                                 GFP_KERNEL);
+       if (!csi->dma.drain.vaddr)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static int ti_csi2rx_v4l2_init(struct ti_csi2rx_dev *csi)
+{
+       struct media_device *mdev = &csi->mdev;
+       struct video_device *vdev = &csi->vdev;
+       const struct ti_csi2rx_fmt *fmt;
+       struct v4l2_pix_format *pix_fmt = &csi->v_fmt.fmt.pix;
+       int ret;
+
+       fmt = find_format_by_fourcc(V4L2_PIX_FMT_UYVY);
+       if (!fmt)
+               return -EINVAL;
+
+       pix_fmt->width = 640;
+       pix_fmt->height = 480;
+       pix_fmt->field = V4L2_FIELD_NONE;
+       pix_fmt->colorspace = V4L2_COLORSPACE_SRGB;
+       pix_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601,
+       pix_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE,
+       pix_fmt->xfer_func = V4L2_XFER_FUNC_SRGB,
+
+       ti_csi2rx_fill_fmt(fmt, &csi->v_fmt);
+
+       mdev->dev = csi->dev;
+       mdev->hw_revision = 1;
+       strscpy(mdev->model, "TI-CSI2RX", sizeof(mdev->model));
+
+       media_device_init(mdev);
+
+       strscpy(vdev->name, TI_CSI2RX_MODULE_NAME, sizeof(vdev->name));
+       vdev->v4l2_dev = &csi->v4l2_dev;
+       vdev->vfl_dir = VFL_DIR_RX;
+       vdev->fops = &csi_fops;
+       vdev->ioctl_ops = &csi_ioctl_ops;
+       vdev->release = video_device_release_empty;
+       vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+                           V4L2_CAP_IO_MC;
+       vdev->lock = &csi->mutex;
+       video_set_drvdata(vdev, csi);
+
+       csi->pad.flags = MEDIA_PAD_FL_SINK;
+       vdev->entity.ops = &ti_csi2rx_video_entity_ops;
+       ret = media_entity_pads_init(&csi->vdev.entity, 1, &csi->pad);
+       if (ret)
+               return ret;
+
+       csi->v4l2_dev.mdev = mdev;
+
+       ret = v4l2_device_register(csi->dev, &csi->v4l2_dev);
+       if (ret)
+               return ret;
+
+       ret = media_device_register(mdev);
+       if (ret) {
+               v4l2_device_unregister(&csi->v4l2_dev);
+               media_device_cleanup(mdev);
+               return ret;
+       }
+
+       return 0;
+}
+
+static void ti_csi2rx_cleanup_dma(struct ti_csi2rx_dev *csi)
+{
+       dma_free_coherent(csi->dev, csi->dma.drain.len,
+                         csi->dma.drain.vaddr, csi->dma.drain.paddr);
+       csi->dma.drain.vaddr = NULL;
+       dma_release_channel(csi->dma.chan);
+}
+
+static void ti_csi2rx_cleanup_v4l2(struct ti_csi2rx_dev *csi)
+{
+       media_device_unregister(&csi->mdev);
+       v4l2_device_unregister(&csi->v4l2_dev);
+       media_device_cleanup(&csi->mdev);
+}
+
+static void ti_csi2rx_cleanup_subdev(struct ti_csi2rx_dev *csi)
+{
+       v4l2_async_nf_unregister(&csi->notifier);
+       v4l2_async_nf_cleanup(&csi->notifier);
+}
+
+static void ti_csi2rx_cleanup_vb2q(struct ti_csi2rx_dev *csi)
+{
+       vb2_queue_release(&csi->vidq);
+}
+
+static int ti_csi2rx_probe(struct platform_device *pdev)
+{
+       struct ti_csi2rx_dev *csi;
+       struct resource *res;
+       int ret;
+
+       csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
+       if (!csi)
+               return -ENOMEM;
+
+       csi->dev = &pdev->dev;
+       platform_set_drvdata(pdev, csi);
+
+       mutex_init(&csi->mutex);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       csi->shim = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(csi->shim)) {
+               ret = PTR_ERR(csi->shim);
+               goto err_mutex;
+       }
+
+       ret = ti_csi2rx_init_dma(csi);
+       if (ret)
+               goto err_mutex;
+
+       ret = ti_csi2rx_v4l2_init(csi);
+       if (ret)
+               goto err_dma;
+
+       ret = ti_csi2rx_init_vb2q(csi);
+       if (ret)
+               goto err_v4l2;
+
+       ret = ti_csi2rx_notifier_register(csi);
+       if (ret)
+               goto err_vb2q;
+
+       ret = of_platform_populate(csi->dev->of_node, NULL, NULL, csi->dev);
+       if (ret) {
+               dev_err(csi->dev, "Failed to create children: %d\n", ret);
+               goto err_subdev;
+       }
+
+       return 0;
+
+err_subdev:
+       ti_csi2rx_cleanup_subdev(csi);
+err_vb2q:
+       ti_csi2rx_cleanup_vb2q(csi);
+err_v4l2:
+       ti_csi2rx_cleanup_v4l2(csi);
+err_dma:
+       ti_csi2rx_cleanup_dma(csi);
+err_mutex:
+       mutex_destroy(&csi->mutex);
+       return ret;
+}
+
+static int ti_csi2rx_remove(struct platform_device *pdev)
+{
+       struct ti_csi2rx_dev *csi = platform_get_drvdata(pdev);
+
+       video_unregister_device(&csi->vdev);
+
+       ti_csi2rx_cleanup_vb2q(csi);
+       ti_csi2rx_cleanup_subdev(csi);
+       ti_csi2rx_cleanup_v4l2(csi);
+       ti_csi2rx_cleanup_dma(csi);
+
+       mutex_destroy(&csi->mutex);
+
+       return 0;
+}
+
+static const struct of_device_id ti_csi2rx_of_match[] = {
+       { .compatible = "ti,j721e-csi2rx-shim", },
+       { },
+};
+MODULE_DEVICE_TABLE(of, ti_csi2rx_of_match);
+
+static struct platform_driver ti_csi2rx_pdrv = {
+       .probe = ti_csi2rx_probe,
+       .remove = ti_csi2rx_remove,
+       .driver = {
+               .name = TI_CSI2RX_MODULE_NAME,
+               .of_match_table = ti_csi2rx_of_match,
+       },
+};
+
+module_platform_driver(ti_csi2rx_pdrv);
+
+MODULE_DESCRIPTION("TI J721E CSI2 RX Driver");
+MODULE_AUTHOR("Jai Luthra <j-luthra@ti.com>");
+MODULE_LICENSE("GPL");
index 68cf68dbcace281090e81f44ee9ec100a2c6187d..359a846205b0ffe9e736c7ed37c22677991cc9f2 100644 (file)
@@ -1039,7 +1039,7 @@ static int isp_stat_init_entities(struct ispstat *stat, const char *name,
        struct media_entity *me = &subdev->entity;
 
        v4l2_subdev_init(subdev, sd_ops);
-       snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "OMAP3 ISP %s", name);
+       snprintf(subdev->name, sizeof(subdev->name), "OMAP3 ISP %s", name);
        subdev->grp_id = BIT(16);       /* group ID for isp subdevs */
        subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS | V4L2_SUBDEV_FL_HAS_DEVNODE;
        v4l2_set_subdevdata(subdev, stat);
index 423fc85d79ee36de1a30717aeedc25b9b6288a40..a9fa05ac56a9b60367c778ffdb097e8a2a48e6af 100644 (file)
@@ -125,7 +125,8 @@ void hantro_watchdog(struct work_struct *work)
        ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev);
        if (ctx) {
                vpu_err("frame processing timed out!\n");
-               ctx->codec_ops->reset(ctx);
+               if (ctx->codec_ops->reset)
+                       ctx->codec_ops->reset(ctx);
                hantro_job_finish(vpu, ctx, VB2_BUF_STATE_ERROR);
        }
 }
@@ -898,8 +899,9 @@ static int hantro_add_func(struct hantro_dev *vpu, unsigned int funcid)
        vfd->vfl_dir = VFL_DIR_M2M;
        vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
        vfd->ioctl_ops = &hantro_ioctl_ops;
-       snprintf(vfd->name, sizeof(vfd->name), "%s-%s", match->compatible,
-                funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER ? "enc" : "dec");
+       strscpy(vfd->name, match->compatible, sizeof(vfd->name));
+       strlcat(vfd->name, funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER ?
+               "-enc" : "-dec", sizeof(vfd->name));
 
        if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
                vpu->encoder = func;
index 0224ff68ab3fcf1a8c14cfdba5d46280870acbef..64d6fb852ae9b0d879b7de5731dee293b40e54cc 100644 (file)
@@ -107,7 +107,7 @@ static void hantro_postproc_g1_enable(struct hantro_ctx *ctx)
 
 static int down_scale_factor(struct hantro_ctx *ctx)
 {
-       if (ctx->src_fmt.width == ctx->dst_fmt.width)
+       if (ctx->src_fmt.width <= ctx->dst_fmt.width)
                return 0;
 
        return DIV_ROUND_CLOSEST(ctx->src_fmt.width, ctx->dst_fmt.width);
index f2ae84f0b4368e9636eb272f5dfab9d78c3bd9a7..f64dea797eff2720632d1ff5b433e1f3171ea944 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0-only or Apache-2.0
+// SPDX-License-Identifier: GPL-2.0-only OR Apache-2.0
 
 #include "rockchip_av1_filmgrain.h"
 
index 816ffa905a4bb4e690f9b56217e312768a63c70c..f9752767078355ef0f76833c8caf9fba1068202b 100644 (file)
@@ -648,7 +648,7 @@ static const char * const rockchip_vpu_clk_names[] = {
 };
 
 static const char * const rk3588_vpu981_vpu_clk_names[] = {
-       "aclk", "hclk", "aclk_vdpu_root", "hclk_vdpu_root"
+       "aclk", "hclk",
 };
 
 /* VDPU1/VEPU1 */
index 80d6f5b072ea648e029b2fad447664a067159622..a96de5d388a1deec214f9be8ba7e1507a4315519 100644 (file)
@@ -708,9 +708,8 @@ int xvip_dma_init(struct xvip_composite_device *xdev, struct xvip_dma *dma,
        snprintf(name, sizeof(name), "port%u", port);
        dma->dma = dma_request_chan(dma->xdev->dev, name);
        if (IS_ERR(dma->dma)) {
-               ret = PTR_ERR(dma->dma);
-               if (ret != -EPROBE_DEFER)
-                       dev_err(dma->xdev->dev, "no VDMA channel found\n");
+               ret = dev_err_probe(dma->xdev->dev, PTR_ERR(dma->dma),
+                                   "no VDMA channel found\n");
                goto error;
        }
 
index c591c0851fa28bd58087486b855a66900b3fbfe2..ad49151f5ff0943b8c890c4f5327360d042121a2 100644 (file)
@@ -36,7 +36,7 @@ static int radio_isa_querycap(struct file *file, void  *priv,
 
        strscpy(v->driver, isa->drv->driver.driver.name, sizeof(v->driver));
        strscpy(v->card, isa->drv->card, sizeof(v->card));
-       snprintf(v->bus_info, sizeof(v->bus_info), "ISA:%s", isa->v4l2_dev.name);
+       snprintf(v->bus_info, sizeof(v->bus_info), "ISA:%s", dev_name(isa->v4l2_dev.dev));
        return 0;
 }
 
index 99788834c6461915f27e1c791712e53ef5937897..08be77b8f3b71cd6010dec327c33b16126865015 100644 (file)
@@ -199,11 +199,9 @@ static int pcm20_setfreq(struct pcm20 *dev, unsigned long freq)
 static int vidioc_querycap(struct file *file, void *priv,
                                struct v4l2_capability *v)
 {
-       struct pcm20 *dev = video_drvdata(file);
-
        strscpy(v->driver, "Miro PCM20", sizeof(v->driver));
        strscpy(v->card, "Miro PCM20", sizeof(v->card));
-       snprintf(v->bus_info, sizeof(v->bus_info), "ISA:%s", dev->v4l2_dev.name);
+       strscpy(v->bus_info, "ISA:radio-miropcm20", sizeof(v->bus_info));
        return 0;
 }
 
index 6061506159f1b4f30e3f61dd37f26364d2dc93eb..b2c5809a8bc71f0da56c110e8878c6415e5a6d4c 100644 (file)
@@ -328,9 +328,7 @@ static int si476x_radio_querycap(struct file *file, void *priv,
 
        strscpy(capability->driver, radio->v4l2dev.name,
                sizeof(capability->driver));
-       strscpy(capability->card,   DRIVER_CARD, sizeof(capability->card));
-       snprintf(capability->bus_info, sizeof(capability->bus_info),
-                "platform:%s", radio->v4l2dev.name);
+       strscpy(capability->card, DRIVER_CARD, sizeof(capability->card));
        return 0;
 }
 
index e8166eac9efe8da2208e47875e1d89d0543d2040..f6b98c304b72c9c9d8dd61a6c855b161a1fb9f35 100644 (file)
@@ -1020,7 +1020,7 @@ static int wl1273_fm_set_rds(struct wl1273_device *radio, unsigned int new_mode)
        }
 
        if (!r)
-               radio->rds_on = (new_mode == WL1273_RDS_ON) ? true : false;
+               radio->rds_on = new_mode == WL1273_RDS_ON;
 
        return r;
 }
index 07bdf649c60dc3c80d0600207d701e1f8a2ce2f1..2afe67ffa285e3755f35c5842827160b93f573b5 100644 (file)
@@ -338,16 +338,6 @@ config IR_REDRAT3
           To compile this driver as a module, choose M here: the
           module will be called redrat3.
 
-config IR_RX51
-       tristate "Nokia N900 IR transmitter diode"
-       depends on (OMAP_DM_TIMER && PWM_OMAP_DMTIMER && ARCH_OMAP2PLUS || COMPILE_TEST) && RC_CORE
-       help
-          Say Y or M here if you want to enable support for the IR
-          transmitter diode built in the Nokia N900 (RX51) device.
-
-          The driver uses omap DM timers for generating the carrier
-          wave and pulses.
-
 config IR_SERIAL
        tristate "Homebrew Serial Port Receiver"
        depends on HAS_IOPORT
index a9285266e94475b7dd67641a575a3a7a08e528be..2bca6f7f07bc987a7b5c61f43f717e2b3cd967c8 100644 (file)
@@ -43,7 +43,6 @@ obj-$(CONFIG_IR_MTK) += mtk-cir.o
 obj-$(CONFIG_IR_NUVOTON) += nuvoton-cir.o
 obj-$(CONFIG_IR_PWM_TX) += pwm-ir-tx.o
 obj-$(CONFIG_IR_REDRAT3) += redrat3.o
-obj-$(CONFIG_IR_RX51) += ir-rx51.o
 obj-$(CONFIG_IR_SERIAL) += serial_ir.o
 obj-$(CONFIG_IR_SPI) += ir-spi.o
 obj-$(CONFIG_IR_STREAMZAP) += streamzap.o
index fff4dd48eacad2ad208738c2bd23d9b38799420a..d7721e60776edd5ca2cbef5daf3fd4db77e2cb84 100644 (file)
@@ -251,7 +251,7 @@ struct ati_remote {
 
        char rc_name[NAME_BUFSIZE];
        char rc_phys[NAME_BUFSIZE];
-       char mouse_name[NAME_BUFSIZE];
+       char mouse_name[NAME_BUFSIZE + 6];
        char mouse_phys[NAME_BUFSIZE];
 
        wait_queue_head_t wait;
index 74546f7e34691eac2688e892671723f9e18dcf7e..5719dda6e0f0ef5ee964872f4fe08fe4e50348b1 100644 (file)
@@ -2427,6 +2427,12 @@ static int imon_probe(struct usb_interface *interface,
                goto fail;
        }
 
+       if (first_if->dev.driver != interface->dev.driver) {
+               dev_err(&interface->dev, "inconsistent driver matching\n");
+               ret = -EINVAL;
+               goto fail;
+       }
+
        if (ifnum == 0) {
                ictx = imon_init_intf0(interface, id);
                if (!ictx) {
diff --git a/drivers/media/rc/ir-rx51.c b/drivers/media/rc/ir-rx51.c
deleted file mode 100644 (file)
index 13e81bf..0000000
+++ /dev/null
@@ -1,285 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- *  Copyright (C) 2008 Nokia Corporation
- *
- *  Based on lirc_serial.c
- */
-#include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/wait.h>
-#include <linux/pwm.h>
-#include <linux/of.h>
-#include <linux/hrtimer.h>
-
-#include <media/rc-core.h>
-
-#define WBUF_LEN 256
-
-struct ir_rx51 {
-       struct rc_dev *rcdev;
-       struct pwm_device *pwm;
-       struct pwm_state state;
-       struct hrtimer timer;
-       struct device        *dev;
-       wait_queue_head_t     wqueue;
-
-       unsigned int    freq;           /* carrier frequency */
-       unsigned int    duty_cycle;     /* carrier duty cycle */
-       int             wbuf[WBUF_LEN];
-       int             wbuf_index;
-       unsigned long   device_is_open;
-};
-
-static inline void ir_rx51_on(struct ir_rx51 *ir_rx51)
-{
-       ir_rx51->state.enabled = true;
-       pwm_apply_state(ir_rx51->pwm, &ir_rx51->state);
-}
-
-static inline void ir_rx51_off(struct ir_rx51 *ir_rx51)
-{
-       ir_rx51->state.enabled = false;
-       pwm_apply_state(ir_rx51->pwm, &ir_rx51->state);
-}
-
-static int init_timing_params(struct ir_rx51 *ir_rx51)
-{
-       ir_rx51->state.period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, ir_rx51->freq);
-       pwm_set_relative_duty_cycle(&ir_rx51->state, ir_rx51->duty_cycle, 100);
-
-       return 0;
-}
-
-static enum hrtimer_restart ir_rx51_timer_cb(struct hrtimer *timer)
-{
-       struct ir_rx51 *ir_rx51 = container_of(timer, struct ir_rx51, timer);
-       ktime_t now;
-
-       if (ir_rx51->wbuf_index < 0) {
-               dev_err_ratelimited(ir_rx51->dev,
-                                   "BUG wbuf_index has value of %i\n",
-                                   ir_rx51->wbuf_index);
-               goto end;
-       }
-
-       /*
-        * If we happen to hit an odd latency spike, loop through the
-        * pulses until we catch up.
-        */
-       do {
-               u64 ns;
-
-               if (ir_rx51->wbuf_index >= WBUF_LEN)
-                       goto end;
-               if (ir_rx51->wbuf[ir_rx51->wbuf_index] == -1)
-                       goto end;
-
-               if (ir_rx51->wbuf_index % 2)
-                       ir_rx51_off(ir_rx51);
-               else
-                       ir_rx51_on(ir_rx51);
-
-               ns = US_TO_NS(ir_rx51->wbuf[ir_rx51->wbuf_index]);
-               hrtimer_add_expires_ns(timer, ns);
-
-               ir_rx51->wbuf_index++;
-
-               now = timer->base->get_time();
-
-       } while (hrtimer_get_expires_tv64(timer) < now);
-
-       return HRTIMER_RESTART;
-end:
-       /* Stop TX here */
-       ir_rx51_off(ir_rx51);
-       ir_rx51->wbuf_index = -1;
-
-       wake_up_interruptible(&ir_rx51->wqueue);
-
-       return HRTIMER_NORESTART;
-}
-
-static int ir_rx51_tx(struct rc_dev *dev, unsigned int *buffer,
-                     unsigned int count)
-{
-       struct ir_rx51 *ir_rx51 = dev->priv;
-
-       if (count > WBUF_LEN)
-               return -EINVAL;
-
-       memcpy(ir_rx51->wbuf, buffer, count * sizeof(unsigned int));
-
-       /* Wait any pending transfers to finish */
-       wait_event_interruptible(ir_rx51->wqueue, ir_rx51->wbuf_index < 0);
-
-       init_timing_params(ir_rx51);
-       if (count < WBUF_LEN)
-               ir_rx51->wbuf[count] = -1; /* Insert termination mark */
-
-       /*
-        * REVISIT: Adjust latency requirements so the device doesn't go in too
-        * deep sleep states with pm_qos_add_request().
-        */
-
-       ir_rx51_on(ir_rx51);
-       ir_rx51->wbuf_index = 1;
-       hrtimer_start(&ir_rx51->timer,
-                     ns_to_ktime(US_TO_NS(ir_rx51->wbuf[0])),
-                     HRTIMER_MODE_REL);
-       /*
-        * Don't return back to the userspace until the transfer has
-        * finished
-        */
-       wait_event_interruptible(ir_rx51->wqueue, ir_rx51->wbuf_index < 0);
-
-       /* REVISIT: Remove pm_qos constraint, we can sleep again */
-
-       return count;
-}
-
-static int ir_rx51_open(struct rc_dev *dev)
-{
-       struct ir_rx51 *ir_rx51 = dev->priv;
-
-       if (test_and_set_bit(1, &ir_rx51->device_is_open))
-               return -EBUSY;
-
-       ir_rx51->pwm = pwm_get(ir_rx51->dev, NULL);
-       if (IS_ERR(ir_rx51->pwm)) {
-               int res = PTR_ERR(ir_rx51->pwm);
-
-               dev_err(ir_rx51->dev, "pwm_get failed: %d\n", res);
-               return res;
-       }
-
-       return 0;
-}
-
-static void ir_rx51_release(struct rc_dev *dev)
-{
-       struct ir_rx51 *ir_rx51 = dev->priv;
-
-       hrtimer_cancel(&ir_rx51->timer);
-       ir_rx51_off(ir_rx51);
-       pwm_put(ir_rx51->pwm);
-
-       clear_bit(1, &ir_rx51->device_is_open);
-}
-
-static struct ir_rx51 ir_rx51 = {
-       .duty_cycle     = 50,
-       .wbuf_index     = -1,
-};
-
-static int ir_rx51_set_duty_cycle(struct rc_dev *dev, u32 duty)
-{
-       struct ir_rx51 *ir_rx51 = dev->priv;
-
-       ir_rx51->duty_cycle = duty;
-
-       return 0;
-}
-
-static int ir_rx51_set_tx_carrier(struct rc_dev *dev, u32 carrier)
-{
-       struct ir_rx51 *ir_rx51 = dev->priv;
-
-       if (carrier > 500000 || carrier < 20000)
-               return -EINVAL;
-
-       ir_rx51->freq = carrier;
-
-       return 0;
-}
-
-#ifdef CONFIG_PM
-
-static int ir_rx51_suspend(struct platform_device *dev, pm_message_t state)
-{
-       /*
-        * In case the device is still open, do not suspend. Normally
-        * this should not be a problem as lircd only keeps the device
-        * open only for short periods of time. We also don't want to
-        * get involved with race conditions that might happen if we
-        * were in a middle of a transmit. Thus, we defer any suspend
-        * actions until transmit has completed.
-        */
-       if (test_and_set_bit(1, &ir_rx51.device_is_open))
-               return -EAGAIN;
-
-       clear_bit(1, &ir_rx51.device_is_open);
-
-       return 0;
-}
-
-static int ir_rx51_resume(struct platform_device *dev)
-{
-       return 0;
-}
-
-#else
-
-#define ir_rx51_suspend        NULL
-#define ir_rx51_resume NULL
-
-#endif /* CONFIG_PM */
-
-static int ir_rx51_probe(struct platform_device *dev)
-{
-       struct pwm_device *pwm;
-       struct rc_dev *rcdev;
-
-       pwm = pwm_get(&dev->dev, NULL);
-       if (IS_ERR(pwm))
-               return dev_err_probe(&dev->dev, PTR_ERR(pwm), "pwm_get failed\n");
-
-       /* Use default, in case userspace does not set the carrier */
-       ir_rx51.freq = DIV_ROUND_CLOSEST_ULL(pwm_get_period(pwm), NSEC_PER_SEC);
-       pwm_init_state(pwm, &ir_rx51.state);
-       pwm_put(pwm);
-
-       hrtimer_init(&ir_rx51.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-       ir_rx51.timer.function = ir_rx51_timer_cb;
-
-       ir_rx51.dev = &dev->dev;
-
-       rcdev = devm_rc_allocate_device(&dev->dev, RC_DRIVER_IR_RAW_TX);
-       if (!rcdev)
-               return -ENOMEM;
-
-       rcdev->priv = &ir_rx51;
-       rcdev->open = ir_rx51_open;
-       rcdev->close = ir_rx51_release;
-       rcdev->tx_ir = ir_rx51_tx;
-       rcdev->s_tx_duty_cycle = ir_rx51_set_duty_cycle;
-       rcdev->s_tx_carrier = ir_rx51_set_tx_carrier;
-       rcdev->driver_name = KBUILD_MODNAME;
-
-       ir_rx51.rcdev = rcdev;
-
-       return devm_rc_register_device(&dev->dev, ir_rx51.rcdev);
-}
-
-static const struct of_device_id ir_rx51_match[] = {
-       {
-               .compatible = "nokia,n900-ir",
-       },
-       {},
-};
-MODULE_DEVICE_TABLE(of, ir_rx51_match);
-
-static struct platform_driver ir_rx51_platform_driver = {
-       .probe          = ir_rx51_probe,
-       .suspend        = ir_rx51_suspend,
-       .resume         = ir_rx51_resume,
-       .driver         = {
-               .name   = KBUILD_MODNAME,
-               .of_match_table = ir_rx51_match,
-       },
-};
-module_platform_driver(ir_rx51_platform_driver);
-
-MODULE_DESCRIPTION("IR TX driver for Nokia RX51");
-MODULE_AUTHOR("Nokia Corporation");
-MODULE_LICENSE("GPL");
index 3d8488c39c5615343c9f3e797a3d143a3604a0a3..3311099cbd573b840e1fce402df9a2c3bc2a0506 100644 (file)
@@ -15,7 +15,9 @@
 #define SHARP_UNIT             40  /* us */
 #define SHARP_BIT_PULSE                (8    * SHARP_UNIT) /* 320us */
 #define SHARP_BIT_0_PERIOD     (25   * SHARP_UNIT) /* 1ms (680us space) */
-#define SHARP_BIT_1_PERIOD     (50   * SHARP_UNIT) /* 2ms (1680ms space) */
+#define SHARP_BIT_1_PERIOD     (50   * SHARP_UNIT) /* 2ms (1680us space) */
+#define SHARP_BIT_0_SPACE      (17   * SHARP_UNIT) /* 680us space */
+#define SHARP_BIT_1_SPACE      (42   * SHARP_UNIT) /* 1680us space */
 #define SHARP_ECHO_SPACE       (1000 * SHARP_UNIT) /* 40 ms */
 #define SHARP_TRAILER_SPACE    (125  * SHARP_UNIT) /* 5 ms (even longer) */
 
@@ -168,8 +170,8 @@ static const struct ir_raw_timings_pd ir_sharp_timings = {
        .header_pulse  = 0,
        .header_space  = 0,
        .bit_pulse     = SHARP_BIT_PULSE,
-       .bit_space[0]  = SHARP_BIT_0_PERIOD,
-       .bit_space[1]  = SHARP_BIT_1_PERIOD,
+       .bit_space[0]  = SHARP_BIT_0_SPACE,
+       .bit_space[1]  = SHARP_BIT_1_SPACE,
        .trailer_pulse = SHARP_BIT_PULSE,
        .trailer_space = SHARP_ECHO_SPACE,
        .msb_first     = 1,
index 0a867ca90038fbba13cf4fe0288a54d84dbe686e..e24946c8fe204810cf25048d729ee3ed08283edd 100644 (file)
@@ -83,3 +83,4 @@ module_exit(exit_rc_map_adstech_dvb_t_pci)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("ADS Tech Instant TV DVB-T PCI Remote");
index 8a2ccaf3b817419fe028b2f959d7e1df17d39bd0..9926259b43ee068d8f45829ca2a97cf9a90a4ad4 100644 (file)
@@ -54,3 +54,4 @@ module_exit(exit_rc_map_alink_dtu_m)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("A-Link DTU(m) slim remote, 6 rows, 3 columns.");
index 34da03c46104b7bfa0688e08cb58b53032d604ec..e4bcbf889fed42d2545e797f8ccd48c9acb1f604 100644 (file)
@@ -79,3 +79,4 @@ module_exit(exit_rc_map_anysee)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("Anysee remote keytable");
index bdc47e25d46e89508477474bce2e1e28f14eb5c9..80b096f02e995a8446f55c4bc67fcbabb06058f3 100644 (file)
@@ -74,3 +74,4 @@ module_exit(exit_rc_map_apac_viewcomp)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("apac-viewcomp remote controller keytable");
index 1d322137898e3c578a3fe113fc7087b4ac3e0315..212b0d9209013515b0537f6f2922c1bdd96cb254 100644 (file)
@@ -62,3 +62,4 @@ module_exit(exit_rc_map_t2hybrid)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Oleh Kravchenko <oleg@kaa.org.ua>");
+MODULE_DESCRIPTION("Astrometa T2hybrid remote controller keytable");
index 7a4b3a6e3a49e48266281512911eea26b35742ba..bd55b7c6f82a47a8cfdbe7d1712b4f148e01e629 100644 (file)
@@ -85,3 +85,4 @@ module_exit(exit_rc_map_asus_pc39)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Model PC-39 keytable for asus-pc39 remote controller");
index 09b60fa335e3d56dc1e7eb5d43241e3625dd737e..9d63c1e4a17a28b80173a43345fdddb9af433f5d 100644 (file)
@@ -84,3 +84,4 @@ module_exit(exit_rc_map_asus_ps3_100)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Asus My Cinema PS3-100 remote controller keytable");
index b4b7932c0c5a14ab5bed5fa1f35f6e265d21a82b..063237f0d2e20116ea2596734b9e8c971aa968ff 100644 (file)
@@ -63,3 +63,4 @@ module_exit(exit_rc_map_ati_tv_wonder_hd_600)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("ati-tv-wonder-hd-600 remote controller keytable");
index 31fe1106b70873554fa24704a114a6158c8262e7..9f7cbe9a1ac84624f3ed920da9d2def16c04add5 100644 (file)
@@ -123,3 +123,4 @@ module_exit(exit_rc_map_ati_x10)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
+MODULE_DESCRIPTION("ATI X10 RF remote controller keytable");
index 6467ff6e48d77320870adeda6de1904e009e88e4..98497f4f6f92861f0a7cbbede9e4985485ce3fc7 100644 (file)
@@ -69,3 +69,4 @@ module_exit(exit_rc_map_avermedia_a16d)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("avermedia-a16d remote controller keytable");
index 54fc6d9022c2f924ef5b328bb68a2c13fd52775e..5832c2f8ab3fa0ec2d27806958bb786bbd765334 100644 (file)
@@ -91,3 +91,4 @@ module_exit(exit_rc_map_avermedia_cardbus)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("avermedia-cardbus remote controller keytable");
index 92c6df3360b33d9867e353140222e67b07ed07d5..3157d0c1cee97270ca1fb23f62b0a4df1398f9fe 100644 (file)
@@ -72,3 +72,4 @@ module_exit(exit_rc_map_avermedia_dvbt)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("avermedia-dvbt remote controller keytable");
index 311ddeb061cac5a47998241c0c9c50561bd81f64..cc1318ad09d9e62f764ba0e3df5931cda7e29a10 100644 (file)
@@ -142,3 +142,4 @@ module_exit(exit_rc_map_avermedia_m135a)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Avermedia M135A with RM-JX and RM-K6 remote controller keytable");
index a970ed5a090be57e3980a118d54842a383dc1e83..ec6c866c9f5d9ef4035807c1dbb99bdfc8bf8483 100644 (file)
@@ -90,3 +90,4 @@ module_exit(exit_rc_map_avermedia_m733a_rm_k6)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Avermedia M733A with IR model RM-K6 remote controller keytable");
index cf8a4fd107f432dc2c9b8c2af5a0598d919ecc60..ee4fe5791add58527a19bbf783bdd17731ca3dbd 100644 (file)
@@ -65,3 +65,4 @@ module_exit(exit_rc_map_avermedia_rm_ks)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("AverMedia RM-KS remote controller keytable");
index f96f229b70bb16aa6be461f953f4a78880c5c0ab..b827536a1f5f24cde22f0626ed2230e8aa79cf3e 100644 (file)
@@ -80,3 +80,4 @@ module_exit(exit_rc_map_avermedia)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("avermedia remote controller keytable");
index a3e2e945c769ee65e0526296b9b01eabdfb1d4c2..71d1da42528d344abda4dd3f9a6e32e494e1779b 100644 (file)
@@ -79,3 +79,4 @@ module_exit(exit_rc_map_avertv_303)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("AVERTV STUDIO 303 Remote controller keytable");
index 5fc8e4cd102e73f29ded5e693e6240a7a77bbce2..56f8eb1f0d012378f201147a00de61de13cda1e7 100644 (file)
@@ -88,3 +88,4 @@ module_exit(exit_rc_map_azurewave_ad_tu700)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("TwinHan AzureWave AD-TU700(704J) remote controller keytable");
index cedbd5d20bc76d654f560dd174aebae8867ec4dc..6e767d88c707f42ed169533bc99ab054aad3125a 100644 (file)
@@ -82,3 +82,4 @@ module_exit(exit_rc_map_beelink_gs1)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Clément Péron <peron.clem@gmail.com>");
+MODULE_DESCRIPTION("Beelink GS1 remote controller keytable");
index 01180cd92205367ef0851a194b34e7dd245ea183..88fad9959a86683db9d76c8a186d8eeb1c4c0b7c 100644 (file)
@@ -55,3 +55,4 @@ module_exit(exit_rc_map_beelink_mxiii)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com");
+MODULE_DESCRIPTION("Beelink Mini MXIII remote controller keytable");
index 8579b3d5128d8629c3c7ffff42d904988ccdc30f..6bdc924ac3cfab981e39402e874701eefc00b96d 100644 (file)
@@ -102,3 +102,4 @@ module_exit(exit_rc_map_behold_columbus)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("BeholdTV Columbus remote controller keytable");
index 28397ce05a7f4bdc1ffc9c5adfa76e50e3adc783..0251ce835f79c85f54c604a6743b234d4bd3e2f7 100644 (file)
@@ -135,3 +135,4 @@ module_exit(exit_rc_map_behold)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("BeholdTV 60x series remote controller keytable");
index 6ca8222568623fcf97ba9f5ee708676a5ad6f1cb..8fda5d1e140b3eb3993f48eeee826e7532d16f8c 100644 (file)
@@ -87,3 +87,4 @@ module_exit(exit_rc_map_budget_ci_old)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("budget-ci-old remote controller keytable");
index 4433d28b219cdd0d45c83485ed72bc686e0bec5b..092c3533d7124bfb204a2d038b2d6969a7289b57 100644 (file)
@@ -78,3 +78,4 @@ module_exit(exit_rc_map_cinergy_1400)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Cinergy 1400 DVB-T remote controller keytable");
index b34a37b8fe610b9da64848b57fe72c919e5ec356..334a290a3b91a74b0dd631a5b2a21ce02e9db2b3 100644 (file)
@@ -72,3 +72,4 @@ module_exit(exit_rc_map_cinergy)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("cinergy remote controller keytable");
index 8914c83c9d9f14681c93e6eea5fe7300daa7da36..d4638df37c53249feb7f32e7f9a7ee2df38739dc 100644 (file)
@@ -84,3 +84,4 @@ module_exit(exit_rc_map_ct_90405)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Alexander Voronov <avv.0@ya.ru>");
+MODULE_DESCRIPTION("Toshiba CT-90405 remote controller keytable");
index d491a5e9750f4d1714e3eb136067c8833b99db1f..7870d36f2c697b1b3da69ba999952c34fa812505 100644 (file)
@@ -70,3 +70,4 @@ module_exit(exit_rc_map_d680_dmb)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("d680-dmb remote controller keytable");
index f1fcdf16f48520a9da6959b06f7e092d689b0964..0323049fd2b18998d0a7cd82186cbb4e34bc271f 100644 (file)
@@ -118,3 +118,4 @@ module_exit(exit_rc_map)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("dib0700-nec remote controller keytable");
index 002fffcba95d38f94cb61b087485149471ed4ad9..d34e92eb92be73596b6793b75115c79f0078cda7 100644 (file)
@@ -229,3 +229,4 @@ module_exit(exit_rc_map)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("dib0700-rc5 remote controller keytable");
index 2466d8c50226bf8793a3420170dbbeb096ded6db..d18b8f93a018eb8e3eb23e7bb2ec056473366d6f 100644 (file)
@@ -84,3 +84,4 @@ module_exit(exit_rc_map_digitalnow_tinytwin)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("DigitalNow TinyTwin remote controller keytable");
index 65bc8ad7e52ce3c84666b87ceb82a200bff48fb2..129a81f59b4a1163d23f9b1b59c7b80020797132 100644 (file)
@@ -68,3 +68,4 @@ module_exit(exit_rc_map_digittrade)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("Digittrade DVB-T USB Stick remote controller keytable");
index cd0b985c994df02f0b32000e4e7e21cbba88fe72..b82290ce925bab90508e3b565f347926f4514419 100644 (file)
@@ -70,3 +70,4 @@ module_exit(exit_rc_map_dm1105_nec)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("dm1105-nec remote controller keytable");
index a82f64dc94117901f8790ce4c8a348a936db33f2..4b23335615cf27e5b04b377d9e7439f3942106c6 100644 (file)
@@ -72,3 +72,4 @@ module_exit(exit_rc_map_dntv_live_dvb_t)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("dntv-live-dvb-t remote controller keytable");
index d3f5048a0220f6a7af613ab42737cbed2f68f4df..46d8ea1b49a3943a33fcd835995ea8eaae56fa5a 100644 (file)
@@ -91,3 +91,4 @@ module_exit(exit_rc_map_dntv_live_dvbt_pro)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("DigitalNow DNTV Live DVB-T Remote controller keytable");
index dea024fa3a22a6fe3ec2b560a04186f9753f43ee..e1ec99ce3105089c7db79be8b205b742c75f4b0d 100644 (file)
@@ -149,3 +149,4 @@ module_exit(exit_rc_map_dreambox)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Emanuel Strobel <emanuel.strobel@yahoo.com>");
+MODULE_DESCRIPTION("Dreambox RC10/RC0 and RC20/RC-BT remote controller keytable");
index e7f87baa32124e7aeb6b1692b40e0724f88bf8bc..eeb2f6e303d7a429605d66a2dfc5f63140288b3c 100644 (file)
@@ -53,3 +53,4 @@ module_exit(exit_rc_map_dtt200u)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
+MODULE_DESCRIPTION("Wideview WT-220U remote controller keytable");
index f5063af2e5bcc40bf309e0cf0bbc7724dce231fe..1fcd47bd8595b62c7eeba4e4c1274939c73f98ed 100644 (file)
@@ -71,3 +71,4 @@ module_exit(exit_rc_map_rc5_dvbsky)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Nibble Max <nibble.max@gmail.com>");
+MODULE_DESCRIPTION("DVBSky remote controller keytable");
index b1bb8cdb3705d0fcbd8f94ac8d06f0f5bc59925c..4bb4222d259e484480c4f1794f2dd5dc1b49bdb5 100644 (file)
@@ -80,3 +80,4 @@ module_exit(exit_rc_map_dvico_mce)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("dvico-mce remote controller keytable");
index ec12ba6995dc17d41d750db10a9f6276b39c26f3..ba9ef9b757775057d2afa6929519bf20d9e65249 100644 (file)
@@ -71,3 +71,4 @@ module_exit(exit_rc_map_dvico_portable)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("dvico-portable remote controller keytable");
index a1f59aa6ff233d6791518e069c518455ce1ade6d..8a51fe63284051327fe821bad97ca01310f7470d 100644 (file)
@@ -63,3 +63,4 @@ module_exit(exit_rc_map_em_terratec)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("em-terratec remote controller keytable");
index 7a00471b6005273c37797b26934fb1d443d0f2ff..320e184f429832bed1e6a2fcb2e7f1d5a04844bc 100644 (file)
@@ -75,3 +75,4 @@ module_exit(exit_rc_map_encore_enltv_fm53)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Encore ENLTV-FM v5.3 remote controller keytable");
index 712210097b4dba5ea890439d475db7cceaac76f8..0b235d72e57d20895893e38019013043e1dfbf1c 100644 (file)
@@ -106,3 +106,4 @@ module_exit(exit_rc_map_encore_enltv)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Encore ENLTV-FM remote controller keytable");
index a08470b4f1871c5f8eb30d04786fc6b3e31867c6..d8057f41252d000400e3fc06d1c7669efffabfe3 100644 (file)
@@ -84,3 +84,4 @@ module_exit(exit_rc_map_encore_enltv2)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Encore ENLTV2-FM remote controller keytable");
index f4398444330b918b67e921b95f03642ff63ba99d..95295f6882b106c59ba57e508eae6c88cdbf6fa8 100644 (file)
@@ -55,3 +55,4 @@ module_exit(exit_rc_map_evga_indtube)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("EVGA inDtube remote controller keytable");
index 4e494d953e33b86cee7d6b4688d376f5aab6d23a..522e772f7c2bafd0667752d0edc995259f2a0366 100644 (file)
@@ -90,3 +90,4 @@ module_exit(exit_rc_map_eztv)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("eztv remote controller keytable");
index 202a1fbd193560183a075fbb165ea538003f2278..fcb3bcadd82d98c0e5d9935b7d8893bee915bc02 100644 (file)
@@ -71,3 +71,4 @@ module_exit(exit_rc_map_flydvb)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("flydvb remote controller keytable");
index a44467fb15cb9325cd6fef297097d9b3e81461c6..fcb70c9507cf4c96fa0aa9c011eec9debc356ecb 100644 (file)
@@ -64,3 +64,4 @@ module_exit(exit_rc_map_flyvideo)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("flyvideo remote controller keytable");
index 253199f5531afd64da7a435ef6ddc37a1ba44134..43f73db91098802d97506c73fe2ac339d2af8251 100644 (file)
@@ -92,3 +92,4 @@ module_exit(exit_rc_map_fusionhdtv_mce)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("DViCO FUSION HDTV MCE remote controller keytable");
index c630ef306f11616281e8661499089e89ca77e2b0..8a446d125789234f0b511e1d529891174d916bb6 100644 (file)
@@ -75,3 +75,4 @@ module_exit(exit_rc_map_gadmei_rm008z)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("GADMEI UTV330+ RM008Z remote controller keytable");
index 11735ad36c6aa9cf8d3a6801b9aa5c6149132804..d3f2e960c925329536383b5486365f6835c96985 100644 (file)
@@ -47,3 +47,4 @@ module_exit(exit_rc_map_geekbox)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_DESCRIPTION("GeekBox remote controller keytable");
index c966c130b05d05b06b4aadd0022833327846e2ff..e49828ea2b80eea4ae70bb2694e3d22630b6b007 100644 (file)
@@ -78,3 +78,4 @@ module_exit(exit_rc_map_genius_tvgo_a11mce)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Genius TVGO A11MCE remote controller keytable");
index 0dc4ef36d76f801996e6013301778771eb98f143..a044991e30bad963d75553d82d2ebc6b2b36ef11 100644 (file)
@@ -73,3 +73,4 @@ module_exit(exit_rc_map_gotview7135)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("gotview7135 remote controller keytable");
index 82552360c3c38f0ce30e61e44bd76d3ace2bb47c..d7156774aa0e1c318c993d73010b9647f59ee720 100644 (file)
@@ -287,3 +287,4 @@ module_exit(exit_rc_map_rc5_hauppauge_new)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Hauppauge remote controllers keytable");
index 49a18e916915481c18db8bae9ded887a4714766b..b10ad674c32a275cc1578b3c1177f74179c395d3 100644 (file)
@@ -63,3 +63,4 @@ module_init(init_rc_map_hisi_poplar)
 module_exit(exit_rc_map_hisi_poplar)
 
 MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("HiSilicon poplar remote controller keytable");
index c73068b653f7c1bdf47eaf15faec0b7d6e2bcdc2..24dcb38df27c9f7e07ead1f1e7287f6397e6349a 100644 (file)
@@ -75,3 +75,4 @@ module_init(init_rc_map_hisi_tv_demo)
 module_exit(exit_rc_map_hisi_tv_demo)
 
 MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("HiSilicon tv demo remote controller keytable");
index b89e3569e76ac5535ef81e36cf3a630291c88164..130f685ae30ea438a9b262b7850f0e2e97c09b38 100644 (file)
@@ -137,3 +137,4 @@ module_exit(exit_rc_map_imon_mce)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
+MODULE_DESCRIPTION("iMON MCE remote controller keytable");
index bceb4e7726b642267f271af951fb1ddc436c1649..cd5ba44d033cb7a4611a6775970d4b1c60644132 100644 (file)
@@ -150,3 +150,4 @@ module_exit(exit_rc_map_imon_pad)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
+MODULE_DESCRIPTION("iMON PAD remote controller keytable");
index 38787dd0e4a0f3249b6e09a09b3e7cbc958f25e9..e4124fadf70519c94fc4ffdd705dd23a3ec07d1e 100644 (file)
@@ -80,3 +80,4 @@ module_exit(exit_rc_map_imon_rsc)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Sean Young <sean@mess.org>");
+MODULE_DESCRIPTION("iMON RSC remote controller keytable");
index 9cc6ea0f4226335e4bed474a6817ae9655c5a6b4..95256e85458a4216dfeac1579e24bdc3c5793228 100644 (file)
@@ -82,3 +82,4 @@ module_exit(exit_rc_map_iodata_bctv7e)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("IO-DATA BCTV7E remote controller keytable");
index 1e049f26a24659e38eb49d26b46f175c13dc396c..d80764c98f44e157d7af95d55f877a69fb5f18f4 100644 (file)
@@ -89,3 +89,4 @@ module_exit(exit_rc_it913x_v1_map)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Malcolm Priestley tvboxspy@gmail.com");
+MODULE_DESCRIPTION("it913x-v1 remote controller keytable");
index da3107da26b75f349260edbd8871ef9242a0e35a..c37358cf8b86e1a29f55cc951a6e79ee4b9857fd 100644 (file)
@@ -88,3 +88,4 @@ module_exit(exit_rc_it913x_v2_map)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Malcolm Priestley tvboxspy@gmail.com");
+MODULE_DESCRIPTION("it913x-v2 remote controller keytable");
index 548760e86a2d3b2c2106dcc70f6de46000f25a76..bea50c6f7e99c5a83f8570b68bdb0fa2995e6c89 100644 (file)
@@ -81,3 +81,4 @@ module_exit(exit_rc_map_kaiomy)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Kaiomy TVnPC U2 remote controller keytable");
index ce4938444d90cb9b182ac306548102f7d4450953..2b7161e1bc3f2f68e4e0e44c13d9ceb115c512ab 100644 (file)
@@ -52,3 +52,4 @@ module_exit(exit_rc_map_khadas)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com>");
+MODULE_DESCRIPTION("Khadas VIM/EDGE SBC remote controller keytable");
index 0c98c2faacffc7a6bdb885bd269ed31ee07f5014..2121cad8d3df819628dc5daf3bf9eaf5c05b7e8f 100644 (file)
@@ -73,3 +73,4 @@ module_exit(exit_rc_map_khamsin)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com>");
+MODULE_DESCRIPTION("KHAMSIN remote controller keytable");
index f5aed4b9601915d4ae2ee5325ece622c6da61f89..f849dd6b7ef27d4f6a713d8944a4cf48a1c0e195 100644 (file)
@@ -77,3 +77,4 @@ module_exit(exit_rc_map_kworld_315u)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Kworld 315U remote controller keytable");
index 7938761eb9949b42250e0850e8d269413cb35f33..630ef7c330d9670e64201da027cac2c73ed205ae 100644 (file)
@@ -96,3 +96,4 @@ module_exit(exit_rc_map_kworld_pc150u)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Kyle Strickland <kyle@kyle.strickland.name>");
+MODULE_DESCRIPTION("Kworld PC150-U remote controller keytable");
index 75389b74e02db2befc0efde823c96b016d3eda71..1fb9dc434685243f1601e47da13a71a9fd425bb0 100644 (file)
@@ -97,3 +97,4 @@ module_exit(exit_rc_map_kworld_plus_tv_analog)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Kworld Plus TV Analog Lite PCI IR remote controller keytable");
index 2f2b981e19955b24deb6cb8b6b21e3bfa1431c70..c637312643b70b54f6bea039104942508ccce218 100644 (file)
@@ -85,3 +85,4 @@ module_exit(exit_rc_map_leadtek_y04g0051)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("LeadTek Y04G0051 remote controller keytable");
index 181e48f0cb679d08bfce97081db39a1df66c48d9..575485655a85f33cfc89f74454db717e9337cd80 100644 (file)
@@ -104,3 +104,4 @@ module_exit(exit_rc_lme2510_map)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Malcolm Priestley tvboxspy@gmail.com");
+MODULE_DESCRIPTION("LME2510 remote controller keytable");
index e884aeb5c3d604a6bc4a1bde07c99aac81b22344..b81149a0dfd8dde3f1ad600474553d159fb59212 100644 (file)
@@ -128,3 +128,4 @@ module_exit(exit_rc_map_manli)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("MANLI MTV00[0x0c] and BeholdTV 40[13] remote controller keytable");
index 77ca8a8fade80e1079aa904369b57afb819b02c7..273fe1a304f01e921728e5b3f3702713070cdc4a 100644 (file)
@@ -89,3 +89,4 @@ module_exit(exit_rc_map_mecool_kii_pro)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com");
+MODULE_DESCRIPTION("Mecool Kii Pro remote controller keytable");
index 8e99686fd6b166c28ba730a29a478f1163f3f2a8..53fd7c895ddc0e90aaffbee31a53ac276f9fad06 100644 (file)
@@ -86,3 +86,4 @@ module_exit(exit_rc_map_mecool_kiii_pro)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com");
+MODULE_DESCRIPTION("Mecool Kiii Pro remote controller keytable");
index 843dba3bad73da395377bd5c26a1fa6c6c7eb6ca..3ea8fdbaf18c9847b4e73eff242490b7bb53eeec 100644 (file)
@@ -102,3 +102,4 @@ module_exit(exit_rc_map_medion_x10)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
+MODULE_DESCRIPTION("Medion X10 RF remote controller keytable");
index 9165af548ff1dedf15575d8043ba472b511b824b..ce16e964e72e6d4e577a75f3f12c73af24f1618d 100644 (file)
@@ -53,3 +53,4 @@ module_exit(exit_rc_map_minix_neo)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com");
+MODULE_DESCRIPTION("Minix NEO remote controller keytable");
index ab001d2dac67d87122cfc36f772b4cdd152ce170..f152626fd802fb280757c4e8c6298991081fac19 100644 (file)
@@ -53,3 +53,4 @@ module_exit(exit_rc_map_msi_digivox_ii)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("MSI DIGIVOX mini II remote controller keytable");
index 6129d3e925e5c1bf8ef78465ac80d60f5cbecbfd..1250cde3367de0e2e50c0b6272946e377a5c3dae 100644 (file)
@@ -71,3 +71,4 @@ module_exit(exit_rc_map_msi_digivox_iii)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("MSI DIGIVOX mini III remote controller keytable");
index 42270a7ef3ee9b3b23d45acd25af916246cea006..648bac448f293f9c256e955001d6e0c1910c156e 100644 (file)
@@ -117,3 +117,4 @@ module_exit(exit_rc_map_msi_tvanywhere_plus)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("MSI TV@nywhere Plus remote controller keytable");
index 45793c641009f9d2b10c44b9ef3e4e7000e6cf3a..b59af39ba0052fea188c9fc467212ec2f8af78df 100644 (file)
@@ -63,3 +63,4 @@ module_exit(exit_rc_map_msi_tvanywhere)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("MSI TV@nywhere MASTER remote controller keytable");
index 2dc6061f69b3743a4b7a237f7b679653376b21c6..23b75269d307061982166de7a9a953ec7908f778 100644 (file)
@@ -90,3 +90,4 @@ module_exit(exit_rc_map_nebula)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("nebula remote controller keytable");
index b12c54d47db3d3fe6dc0e5d18cde8939f1cc67a3..94340a1864a02c18256d2bac2a34b702524a682b 100644 (file)
@@ -151,3 +151,4 @@ module_exit(exit_rc_map_nec_terratec_cinergy_xs)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Terratec Cinergy Hybrid T USB XS FM remote controller keytable");
index acd5b1ccf8d03be2028ea8602f00e1601e170c06..da00003a5e79882c83125052e8847568a0a81a95 100644 (file)
@@ -79,3 +79,4 @@ module_exit(exit_rc_map_norwood)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Norwood Micro (non-Pro) TV Tuner remote controller keytable");
index 98a755e8bc181adaed20ec9105ef0976d1058373..6f4412922e0cf8d3d9ab965ed05ffba5c8605928 100644 (file)
@@ -74,3 +74,4 @@ module_exit(exit_rc_map_npgtech)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("npgtech remote controller keytable");
index c6fbb64b5c4171b76e860d30c0a52a0e60a662e0..0353229a4915b1726e8c7159c5a5f4b3fd0214fc 100644 (file)
@@ -52,3 +52,4 @@ module_exit(exit_rc_map_odroid)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com");
+MODULE_DESCRIPTION("HardKernel ODROID remote controller keytable");
index c3bb1ecdd0ca117ab81174cdc6f971537f81365d..6583bf4fcb04f2d70488876d11ef3dbdc39b154a 100644 (file)
@@ -74,3 +74,4 @@ module_exit(exit_rc_map_pctv_sedna)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("pctv-sedna remote controller keytable");
index 9b2bdbbce04e9a4d94ed21a3099d654b4645a61e..bcdb99997d4d65844320bdf3e406a1524f7acd34 100644 (file)
@@ -63,3 +63,4 @@ module_exit(exit_rc_map_pine64)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jonas Karlman");
+MODULE_DESCRIPTION("Pine64 IR remote controller keytable");
index b862725635b963768861c6317bb1f13b9c261726..f33c38644f83bdd7942cf3cb345f6292eda980fc 100644 (file)
@@ -88,3 +88,4 @@ module_exit(exit_rc_map_pinnacle_color)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("pinnacle-color remote controller keytable");
index 3853b653cee6f887dc519a23e7edd744ab9d2260..22ef3d4e2e71b4b413105becf4ab239fe9f463e9 100644 (file)
@@ -83,3 +83,4 @@ module_exit(exit_rc_map_pinnacle_grey)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("pinnacle-grey remote controller keytable");
index 96d8112fb46875cd9676d1d83fa27fd782f6e4bb..35f0c790cc0c5b1cb4a725a8b0912097fb701102 100644 (file)
@@ -64,3 +64,4 @@ module_exit(exit_rc_map_pinnacle_pctv_hd)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Pinnacle PCTV HD 800i mini remote controller keytable");
index c3439c46644c5e745a964ab95414e9c601c75131..0966ebf0903afe875f34650779a71b264bc30af6 100644 (file)
@@ -71,3 +71,4 @@ module_exit(exit_rc_map_pixelview)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("002-T IR remote keytable");
index ea11ccde844214c07721be8f5bc84ec8c60f6352..e18774473809b3a933b59335ae8e4d3c4e32a545 100644 (file)
@@ -77,3 +77,4 @@ module_exit(exit_rc_map_pixelview)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("MK-F12 IR remote controller keytable");
index 0259666831b06c9e69f96a55f4699ce35a1f8562..cf7f1cf8c3b53c642b5eccbb5675344ffebe5d68 100644 (file)
@@ -77,3 +77,4 @@ module_exit(exit_rc_map_pixelview_new)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("pixelview-new remote controller keytable");
index 29f6d2c013e41c1b6b1431e1cc209c147c6dcaa0..567ad0a076fcc1fa15794b64af34d0696a070e3b 100644 (file)
@@ -76,3 +76,4 @@ module_exit(exit_rc_map_pixelview)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("pixelview remote controller keytable");
index 66fe2e52e7c8380988bb64305da356d476c5b451..e7a6add1df26dcb972ad8d3ea34e9b1248504e70 100644 (file)
@@ -75,3 +75,4 @@ module_exit(exit_rc_map_powercolor_real_angel)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Powercolor Real Angel 330 remote controller keytable");
index 36eebefd975cc01dfc6d7d61061e35c33caef3ee..1300482a61190df90c7f2e7169afb1a34bbf8aed 100644 (file)
@@ -63,3 +63,4 @@ module_exit(exit_rc_map_proteus_2309)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("proteus-2309 remote controller keytable");
index bf4543fecb6f926cf26a3936baf05aedfcc28087..9f6ee0be134743ee80d6f1064bb51b06745741c4 100644 (file)
@@ -75,3 +75,4 @@ module_exit(exit_rc_map_purpletv)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("purpletv remote controller keytable");
index 69db55463000b0c0452d92f82382346d653e8a80..539e8573eb191a7f57c8686330b474b010ab995d 100644 (file)
@@ -72,3 +72,4 @@ module_exit(exit_rc_map_pv951)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("pv951 remote controller keytable");
index d491e0fa8605de78c87a358c5f2f4dc1f5af0051..ef1c61eb99b233add77741621ba20216baa3acaa 100644 (file)
@@ -114,3 +114,4 @@ module_exit(exit_rc_map_rc6_mce)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
+MODULE_DESCRIPTION("rc6 MCE remote controller keytable");
index 33bb458b81fd1c3c65d3208475615e18aa2f95e2..088ead8f736bd67b1429e07de5ba25c6e3b44a32 100644 (file)
@@ -72,3 +72,4 @@ module_exit(exit_rc_map_real_audio_220_32_keys)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Zogis Real Audio 220 - 32 keys remote controller keytable");
index b70390d19e78182e3b3838ec43c40ea5a1019d25..af50d1ca4b15d2ba3138d9297f309d2db950eef1 100644 (file)
@@ -71,3 +71,4 @@ module_exit(exit_rc_map_reddo)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("reddo remote controller keytable");
index e3d5bff3bd9e58790f8fd3d231a48b48e94f56c6..826f44595e701b1e5c831403750afce77dcc1983 100644 (file)
@@ -92,3 +92,4 @@ module_exit(exit_rc_map_snapstream_firefly)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
+MODULE_DESCRIPTION("SnapStream Firefly X10 RF remote controller keytable");
index 6684e2e86bc96ff39dc38abb088a79954000c663..b82c3cdfca3b79b20bc1e78c6b8fc0bb7a01302e 100644 (file)
@@ -75,3 +75,4 @@ module_exit(exit_rc_map_streamzap)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
+MODULE_DESCRIPTION("Streamzap remote controller keytable");
index 64cfc01aa48f557373c90b40faa3e844f82f122e..a333ade3b1d2a3868a6fe55ec16b88b2382c91c4 100644 (file)
@@ -69,3 +69,4 @@ module_exit(exit_rc_map_su3000)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Evgeny Plehov <Evgeny Plehov@ukr.net>");
+MODULE_DESCRIPTION("Geniatech HDStar remote controller keytable");
index d486cd69afb2abe26df1783b7ab4675c6a80a0b1..b5d77a0c94ed3de5d5e20102676039234e094e7e 100644 (file)
@@ -75,3 +75,4 @@ module_exit(exit_rc_map_tanix_tx3mini)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com>");
+MODULE_DESCRIPTION("Tanix TX3 mini STB remote controller keytable");
index 59aaabed80dd38f3afbea0e3eacf38420f184f30..91db9017845a4e34940ebf6d8e278f45e39f4556 100644 (file)
@@ -66,3 +66,4 @@ module_exit(exit_rc_map_tanix_tx5max)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com>");
+MODULE_DESCRIPTION("Tanix TX5 max STB remote controller keytable");
index 420980925f29875f1fc50735d658fa7e4ce8e3a4..426c767c907f9d5207d2b502b60f562de5826f60 100644 (file)
@@ -69,3 +69,4 @@ module_exit(exit_rc_map_tbs_nec)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("tbs-nec remote controller keytable");
index 9a917ea0ceba8822a898f2233027243e74182cc5..07d5e0884eb949c5473beb19f5fad9db7b4740ae 100644 (file)
@@ -70,3 +70,4 @@ module_init(init_rc_map)
 module_exit(exit_rc_map)
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TechniSat TS35 remote controller keytable");
index 942100686c826b1c9d2940d77155f74c341378a9..74ac89d37966102bbf3b3ece1e5acf95da4880b5 100644 (file)
@@ -88,3 +88,4 @@ module_exit(exit_rc_map)
 
 MODULE_AUTHOR("Patrick Boettcher <pboettcher@kernellabs.com>");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TechniSat TS35 remote controller keytable");
index da06f844d8fb3ca30828a1e3bddbc95c80606421..d448913081937562da044064de404dc56ffe0e5c 100644 (file)
@@ -82,3 +82,4 @@ module_init(init_rc_map_terratec_cinergy_c_pci);
 module_exit(exit_rc_map_terratec_cinergy_c_pci);
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Terratec Cinergy C PCI remote controller keytable");
index a1844b531572d15bf910a5d005e7a56903e00cdc..dbbb1ba0247b23f01ea59d67f7fa6d1cdb5f2bba 100644 (file)
@@ -80,3 +80,4 @@ module_init(init_rc_map_terratec_cinergy_s2_hd);
 module_exit(exit_rc_map_terratec_cinergy_s2_hd);
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Terratec Cinergy S2 HD remote controller keytable");
index fe587e3f02400c26f356cf3eda5c57c7a5938982..a9452d581339fd5b716154df84c412a30b187bef 100644 (file)
@@ -86,3 +86,4 @@ module_exit(exit_rc_map_terratec_cinergy_xs)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Terratec Cinergy Hybrid T USB XS remote controller keytable");
index a54a59f9031319596da3ad19cbe0aaaeffe64a75..ea259d88769b99fae8c2076caf5ba8c5de65124a 100644 (file)
@@ -58,3 +58,4 @@ module_exit(exit_rc_map_terratec_slim_2)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("TerraTec slim remote controller keytable");
index 146e3a3480dc4b383c75a5de6e2b30c29a5dc720..bb40cbd58ebe2c20f52cb8d81380e8c3ed56f3c1 100644 (file)
@@ -65,3 +65,4 @@ module_exit(exit_rc_map_terratec_slim)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("TerraTec slim remote controller keytable");
index 5b96e9a38e9d88d9468578a1acbd15728a7776d0..ee7f8014742b296b8e526ce0d63b861adeb97bdf 100644 (file)
@@ -82,3 +82,4 @@ module_exit(exit_rc_map_tevii_nec)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("tevii-nec remote controller keytable");
index c51606a3be68141aa19286871ce9190eee51f80f..c02b8c8abd5cf520d24e8fdc51796c9462f18adf 100644 (file)
@@ -93,3 +93,4 @@ module_exit(exit_rc_map_tivo)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
+MODULE_DESCRIPTION("TiVo remote controller keytable");
index 40b773ba45b956a9627ae3d2bbe7c9143f7c004e..290d1cc8577c869628e33239a07dcf7bdb0dc00a 100644 (file)
@@ -71,3 +71,4 @@ module_exit(exit_rc_map_total_media_in_hand_02)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR(" Alfredo J. Delaiti <alfredodelaiti@netscape.net>");
+MODULE_DESCRIPTION("Total Media In Hand_02 remote controller keytable");
index 2144db485d83ad81d81dc253900736d662d83fdb..7f4b31b98f35072ff3d7ac71734709d337e76969 100644 (file)
@@ -71,3 +71,4 @@ module_exit(exit_rc_map_total_media_in_hand)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("Total Media In Hand remote controller keytable");
index e938e0da51a69d075a12293739bb52faadf11f79..ff01de550904273681843f8b36bddb22784ecf75 100644 (file)
@@ -66,3 +66,4 @@ module_exit(exit_rc_map_trekstor)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("TrekStor remote controller keytable");
index ff70aab13b486e0373b373965a7cee14b2f6014d..eb8d7fc5061afa2b6a0dee83f65202146a0521af 100644 (file)
@@ -76,3 +76,4 @@ module_exit(exit_rc_map_tt_1500)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Technotrend 1500 remote controller keytable");
index 5fc696d9e583925977f6d311e529eb09a9d31ac4..8e5cf8eb0db9f59a1a1a4cfcf74a7405affe7b50 100644 (file)
@@ -92,3 +92,4 @@ module_init(init_rc_map_twinhan_dtv_cab_ci);
 module_exit(exit_rc_map_twinhan_dtv_cab_ci);
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Twinhan DTV CAB CI remote controller keytable");
index e1cdcfa792dc4f5fe2eaf6a5ba9178b7544602b3..411ce3c8cbd060d0359abfb098aef905db57eba5 100644 (file)
@@ -87,3 +87,4 @@ module_exit(exit_rc_map_twinhan_vp1027)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Sergey Ivanov <123kash@gmail.com>");
+MODULE_DESCRIPTION("twinhan1027 remote controller keytable");
index bf210c4dc5357245fa37036673b11387a8fc0b7e..40fbf408bf65f0b31530ad69d9558787a69c0122 100644 (file)
@@ -52,3 +52,4 @@ module_exit(exit_rc_map_vega_s9x)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com");
+MODULE_DESCRIPTION("Tronsmart Vega S9x remote controller keytable");
index e16b9b851c72972a853fcb9d3c98db38755cd888..1f9be84ff27b29706bfe614bcc026b55baa69e61 100644 (file)
@@ -87,3 +87,4 @@ module_exit(exit_rc_map_videomate_k100)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pavel Osnova <pvosnova@gmail.com>");
+MODULE_DESCRIPTION("videomate-m1f remote controller keytable");
index a867d7a08055cdee79884ed5876732cae0934447..281cc747229fd6966901e7dfd17dd3077e639667 100644 (file)
@@ -79,3 +79,4 @@ module_exit(exit_rc_map_videomate_s350)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("videomate-s350 remote controller keytable");
index fdc3b0e1350f31c8df7afaabb8f74538451d6074..829842425faeab3c13849b129078891caadd0ca8 100644 (file)
@@ -81,3 +81,4 @@ module_exit(exit_rc_map_videomate_tv_pvr)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("videomate-tv-pvr remote controller keytable");
index 414d4d231e7ed3d039d8d42c71831e4dd1111478..10cbc2c781d217b477af1bd0a890519b844697bb 100644 (file)
@@ -81,3 +81,4 @@ module_exit(exit_rc_map_kii_pro)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mohammad Rasim <mohammad.rasim96@gmail.com>");
+MODULE_DESCRIPTION("Videostrong KII Pro STB remote controller keytable");
index b5a21aff45f5c3fa1b7becd5fad8d40bb1cc6c92..591ec20399f4b6d10e42d364b7c15ba441575b03 100644 (file)
@@ -51,3 +51,4 @@ module_exit(exit_rc_map_wetek_hub)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com>");
+MODULE_DESCRIPTION("WeTek Hub STB remote controller keytable");
index bbbb11fa3c11dfe2ff42b74efb390f99ba9acfa2..ce3b1029df231e66c8f32965bed2cf8caeace4d3 100644 (file)
@@ -91,3 +91,4 @@ module_exit(exit_rc_map_wetek_play2)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com");
+MODULE_DESCRIPTION("WeTek Play 2 STB remote controller keytable");
index 999ba4e084aeaf014fc0bb51622e3362d1d56ccf..edfba31f9ae6d3dfbe479833f46074b169ed2e05 100644 (file)
@@ -76,3 +76,4 @@ module_exit(exit_rc_map_winfast_usbii_deluxe)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Leadtek Winfast TV USB II Deluxe remote controller keytable");
index be52a3e1f8ae5d58d05c6ff98f3951008e272d48..89649c8cdee518ee18a1a7196728eb2af8191f09 100644 (file)
@@ -96,3 +96,4 @@ module_exit(exit_rc_map_winfast)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Mauro Carvalho Chehab");
+MODULE_DESCRIPTION("Leadtek Winfast remote controller keytable");
index 0998ec3320e4939be5b9f33fec634d8555ab25dc..a22fcbbfa38358d349c45a69579e4fc714cd0df6 100644 (file)
@@ -81,3 +81,4 @@ module_exit(exit_rc_map_x96max)
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Christian Hewitt <christianshewitt@gmail.com");
+MODULE_DESCRIPTION("X96-max STB remote controller keytable");
index 231aa00514af71a3d5af2f3cdfb5f574a3bfae13..1364daf3ae6f1bbd2f82b4b3519cc38919e6f158 100644 (file)
@@ -81,3 +81,4 @@ module_init(init_rc_map)
 module_exit(exit_rc_map)
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Xbox 360 Universal Media remote controller keytable");
index 9d656042a81ff66d4e6dc2dc9ce4ca5f98d99bdf..11ab134b05b507ea6df45c180bbe1c4a09d53987 100644 (file)
@@ -61,3 +61,4 @@ module_init(init_rc_map)
 module_exit(exit_rc_map)
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Xbox DVD remote controller keytable");
index 7bb0c05eb75983197b86f03345a505fb7a18f98f..e4bea7b39fd1ececa75c41e79205bf7feb4be5bb 100644 (file)
@@ -74,3 +74,4 @@ module_exit(exit_rc_map_zx_irdec)
 
 MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
 MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("zx-irdec remote controller keytable");
index 043d23aaa3cbcc095b5dfd124a2ed4a06c69909a..a537734832c5080498d263428a96d7b1d13dcb88 100644 (file)
@@ -276,7 +276,11 @@ static ssize_t lirc_transmit(struct file *file, const char __user *buf,
                if (ret < 0)
                        goto out_kfree_raw;
 
-               count = ret;
+               /* drop trailing space */
+               if (!(ret % 2))
+                       count = ret - 1;
+               else
+                       count = ret;
 
                txbuf = kmalloc_array(count, sizeof(unsigned int), GFP_KERNEL);
                if (!txbuf) {
index 70322fab34ac3d2f108b9164d7456fcbe90d4977..5303e6da58095eed1a2dcdded57e48fac3c45bf1 100644 (file)
 
 #define DRIVER_NAME            "meson-ir"
 
-#define IR_DEC_LDR_ACTIVE      0x00
-#define IR_DEC_LDR_IDLE                0x04
-#define IR_DEC_LDR_REPEAT      0x08
-#define IR_DEC_BIT_0           0x0c
-#define IR_DEC_REG0            0x10
-#define IR_DEC_REG0_BASE_TIME  GENMASK(11, 0)
-#define IR_DEC_FRAME           0x14
-#define IR_DEC_STATUS          0x18
-#define IR_DEC_STATUS_PULSE    BIT(8)
-#define IR_DEC_REG1            0x1c
-#define IR_DEC_REG1_TIME_IV    GENMASK(28, 16)
-#define IR_DEC_REG1_ENABLE     BIT(15)
-#define IR_DEC_REG1_MODE       GENMASK(8, 7)
-#define IR_DEC_REG1_IRQSEL     GENMASK(3, 2)
-#define IR_DEC_REG1_RESET      BIT(0)
-/* The following regs are only available on Meson 8b and newer */
-#define IR_DEC_REG2            0x20
-#define IR_DEC_REG2_MODE       GENMASK(3, 0)
-
-#define DEC_MODE_NEC           0x0
-#define DEC_MODE_RAW           0x2
-
-#define IRQSEL_NEC_MODE                0
-#define IRQSEL_RISE_FALL       1
-#define IRQSEL_FALL            2
-#define IRQSEL_RISE            3
-
-#define MESON_RAW_TRATE                10      /* us */
-#define MESON_HW_TRATE         20      /* us */
+#define IR_DEC_LDR_ACTIVE                      0x00
+#define IR_DEC_LDR_ACTIVE_MAX                  GENMASK(28, 16)
+#define IR_DEC_LDR_ACTIVE_MIN                  GENMASK(12, 0)
+#define IR_DEC_LDR_IDLE                                0x04
+#define IR_DEC_LDR_IDLE_MAX                    GENMASK(28, 16)
+#define IR_DEC_LDR_IDLE_MIN                    GENMASK(12, 0)
+#define IR_DEC_LDR_REPEAT                      0x08
+#define IR_DEC_LDR_REPEAT_MAX                  GENMASK(25, 16)
+#define IR_DEC_LDR_REPEAT_MIN                  GENMASK(9, 0)
+#define IR_DEC_BIT_0                           0x0c
+#define IR_DEC_BIT_0_MAX                       GENMASK(25, 16)
+#define IR_DEC_BIT_0_MIN                       GENMASK(9, 0)
+#define IR_DEC_REG0                            0x10
+#define IR_DEC_REG0_FILTER                     GENMASK(30, 28)
+#define IR_DEC_REG0_FRAME_TIME_MAX             GENMASK(24, 12)
+#define IR_DEC_REG0_BASE_TIME                  GENMASK(11, 0)
+#define IR_DEC_FRAME                           0x14
+#define IR_DEC_STATUS                          0x18
+#define IR_DEC_STATUS_BIT_1_ENABLE             BIT(30)
+#define IR_DEC_STATUS_BIT_1_MAX                        GENMASK(29, 20)
+#define IR_DEC_STATUS_BIT_1_MIN                        GENMASK(19, 10)
+#define IR_DEC_STATUS_PULSE                    BIT(8)
+#define IR_DEC_STATUS_BUSY                     BIT(7)
+#define IR_DEC_STATUS_FRAME_STATUS             GENMASK(3, 0)
+#define IR_DEC_REG1                            0x1c
+#define IR_DEC_REG1_TIME_IV                    GENMASK(28, 16)
+#define IR_DEC_REG1_FRAME_LEN                  GENMASK(13, 8)
+#define IR_DEC_REG1_ENABLE                     BIT(15)
+#define IR_DEC_REG1_HOLD_CODE                  BIT(6)
+#define IR_DEC_REG1_IRQSEL                     GENMASK(3, 2)
+#define IR_DEC_REG1_RESET                      BIT(0)
+/* Meson 6b uses REG1 to configure IR mode */
+#define IR_DEC_REG1_MODE                       GENMASK(8, 7)
+
+/* The following registers are only available on Meson 8b and newer */
+#define IR_DEC_REG2                            0x20
+#define IR_DEC_REG2_TICK_MODE                  BIT(15)
+#define IR_DEC_REG2_REPEAT_COUNTER             BIT(13)
+#define IR_DEC_REG2_REPEAT_TIME                        BIT(12)
+#define IR_DEC_REG2_COMPARE_FRAME              BIT(11)
+#define IR_DEC_REG2_BIT_ORDER                  BIT(8)
+/* Meson 8b / GXBB use REG2 to configure IR mode */
+#define IR_DEC_REG2_MODE                       GENMASK(3, 0)
+#define IR_DEC_DURATN2                         0x24
+#define IR_DEC_DURATN2_MAX                     GENMASK(25, 16)
+#define IR_DEC_DURATN2_MIN                     GENMASK(9, 0)
+#define IR_DEC_DURATN3                         0x28
+#define IR_DEC_DURATN3_MAX                     GENMASK(25, 16)
+#define IR_DEC_DURATN3_MIN                     GENMASK(9, 0)
+#define IR_DEC_FRAME1                          0x2c
+
+#define FRAME_MSB_FIRST                                true
+#define FRAME_LSB_FIRST                                false
+
+#define DEC_MODE_NEC                           0x0
+#define DEC_MODE_RAW                           0x2
+#define DEC_MODE_RC6                           0x9
+#define DEC_MODE_XMP                           0xE
+#define DEC_MODE_UNKNOW                                0xFF
+
+#define DEC_STATUS_VALID                       BIT(3)
+#define DEC_STATUS_DATA_CODE_ERR               BIT(2)
+#define DEC_STATUS_CUSTOM_CODE_ERR             BIT(1)
+#define DEC_STATUS_REPEAT                      BIT(0)
+
+#define IRQSEL_DEC_MODE                                0
+#define IRQSEL_RISE_FALL                       1
+#define IRQSEL_FALL                            2
+#define IRQSEL_RISE                            3
+
+#define MESON_RAW_TRATE                                10      /* us */
+#define MESON_HW_TRATE                         20      /* us */
+
+/**
+ * struct meson_ir_protocol - describe IR Protocol parameter
+ *
+ * @hw_protocol: select IR Protocol from IR Controller
+ * @repeat_counter_enable: enable frame-to-frame time counter, it should work
+ *                         with @repeat_compare_enable to detect the repeat frame
+ * @repeat_check_enable: enable repeat time check for repeat detection
+ * @repeat_compare_enable: enable to compare frame for repeat frame detection.
+ *                         Some IR Protocol send the same data as repeat frame.
+ *                         In this case, it should work with
+ *                         @repeat_counter_enable to detect the repeat frame.
+ * @bit_order: bit order, LSB or MSB
+ * @bit1_match_enable: enable to check bit 1
+ * @hold_code_enable: hold frame code in register IR_DEC_FRAME1, the new one
+ *                    frame code will not be store in IR_DEC_FRAME1.
+ *                    until IR_DEC_FRAME1 has been read
+ * @count_tick_mode: increasing time unit of frame-to-frame time counter.
+ *                   0 = 100us, 1 = 10us
+ * @code_length: length (N-1) of data frame
+ * @frame_time_max: max time for whole frame. Unit: MESON_HW_TRATE
+ * @leader_active_max: max time for NEC/RC6 leader active part. Unit: MESON_HW_TRATE
+ * @leader_active_min: min time for NEC/RC6 leader active part. Unit: MESON_HW_TRATE
+ * @leader_idle_max: max time for NEC/RC6 leader idle part. Unit: MESON_HW_TRATE
+ * @leader_idle_min: min time for NEC/RC6 leader idle part. Unit: MESON_HW_TRATE
+ * @repeat_leader_max: max time for NEC repeat leader idle part. Unit: MESON_HW_TRATE
+ * @repeat_leader_min: min time for NEC repeat leader idle part. Unit: MESON_HW_TRATE
+ * @bit0_max: max time for NEC Logic '0', half of RC6 trailer bit, XMP Logic '00'
+ * @bit0_min: min time for NEC Logic '0', half of RC6 trailer bit, XMP Logic '00'
+ * @bit1_max: max time for NEC Logic '1', whole of RC6 trailer bit, XMP Logic '01'
+ * @bit1_min: min time for NEC Logic '1', whole of RC6 trailer bit, XMP Logic '01'
+ * @duration2_max: max time for half of RC6 normal bit, XMP Logic '10'
+ * @duration2_min: min time for half of RC6 normal bit, XMP Logic '10'
+ * @duration3_max: max time for whole of RC6 normal bit, XMP Logic '11'
+ * @duration3_min: min time for whole of RC6 normal bit, XMP Logic '11'
+ */
+
+struct meson_ir_protocol {
+       u8 hw_protocol;
+       bool repeat_counter_enable;
+       bool repeat_check_enable;
+       bool repeat_compare_enable;
+       bool bit_order;
+       bool bit1_match_enable;
+       bool hold_code_enable;
+       bool count_tick_mode;
+       u8 code_length;
+       u16 frame_time_max;
+       u16 leader_active_max;
+       u16 leader_active_min;
+       u16 leader_idle_max;
+       u16 leader_idle_min;
+       u16 repeat_leader_max;
+       u16 repeat_leader_min;
+       u16 bit0_max;
+       u16 bit0_min;
+       u16 bit1_max;
+       u16 bit1_min;
+       u16 duration2_max;
+       u16 duration2_min;
+       u16 duration3_max;
+       u16 duration3_min;
+};
+
+struct meson_ir_param {
+       bool support_hw_decoder;
+       unsigned int max_register;
+};
 
 struct meson_ir {
+       const struct meson_ir_param *param;
        struct regmap   *reg;
        struct rc_dev   *rc;
        spinlock_t      lock;
 };
 
-static const struct regmap_config meson_ir_regmap_config = {
+static struct regmap_config meson_ir_regmap_config = {
        .reg_bits = 32,
        .val_bits = 32,
        .reg_stride = 4,
 };
 
+static const struct meson_ir_protocol protocol_timings[] = {
+       /* protocol, repeat counter, repeat check, repeat compare, order */
+       {DEC_MODE_NEC, false, false, false, FRAME_LSB_FIRST,
+       /* bit 1 match, hold code, count tick, len, frame time */
+       true, false, false, 32, 4000,
+       /* leader active max/min, leader idle max/min, repeat leader max/min */
+       500, 400, 300, 200, 150, 80,
+       /* bit0 max/min, bit1 max/min, duration2 max/min, duration3 max/min */
+       72, 40, 134, 90, 0, 0, 0, 0}
+};
+
+static void meson_ir_nec_handler(struct meson_ir *ir)
+{
+       u32 code = 0;
+       u32 status = 0;
+       enum rc_proto proto;
+
+       regmap_read(ir->reg, IR_DEC_STATUS, &status);
+
+       if (status & DEC_STATUS_REPEAT) {
+               rc_repeat(ir->rc);
+       } else {
+               regmap_read(ir->reg, IR_DEC_FRAME, &code);
+
+               code = ir_nec_bytes_to_scancode(code, code >> 8,
+                                               code >> 16, code >> 24, &proto);
+               rc_keydown(ir->rc, proto, code, 0);
+       }
+}
+
+static void meson_ir_hw_handler(struct meson_ir *ir)
+{
+       if (ir->rc->enabled_protocols & RC_PROTO_BIT_NEC)
+               meson_ir_nec_handler(ir);
+}
+
 static irqreturn_t meson_ir_irq(int irqno, void *dev_id)
 {
        struct meson_ir *ir = dev_id;
@@ -70,22 +218,232 @@ static irqreturn_t meson_ir_irq(int irqno, void *dev_id)
 
        spin_lock(&ir->lock);
 
-       regmap_read(ir->reg, IR_DEC_REG1, &duration);
-       duration = FIELD_GET(IR_DEC_REG1_TIME_IV, duration);
-       rawir.duration = duration * MESON_RAW_TRATE;
-
        regmap_read(ir->reg, IR_DEC_STATUS, &status);
-       rawir.pulse = !!(status & IR_DEC_STATUS_PULSE);
 
-       ir_raw_event_store_with_timeout(ir->rc, &rawir);
+       if (ir->rc->driver_type == RC_DRIVER_IR_RAW) {
+               rawir.pulse = !!(status & IR_DEC_STATUS_PULSE);
+
+               regmap_read(ir->reg, IR_DEC_REG1, &duration);
+               duration = FIELD_GET(IR_DEC_REG1_TIME_IV, duration);
+               rawir.duration = duration * MESON_RAW_TRATE;
+
+               ir_raw_event_store_with_timeout(ir->rc, &rawir);
+       } else if (ir->rc->driver_type == RC_DRIVER_SCANCODE) {
+               if (status & DEC_STATUS_VALID)
+                       meson_ir_hw_handler(ir);
+       }
 
        spin_unlock(&ir->lock);
 
        return IRQ_HANDLED;
 }
 
+static int meson_ir_hw_decoder_init(struct rc_dev *dev, u64 *rc_type)
+{
+       u8 protocol;
+       u32 regval;
+       int i;
+       unsigned long flags;
+       const struct meson_ir_protocol *timings;
+       struct meson_ir *ir = dev->priv;
+
+       if (*rc_type & RC_PROTO_BIT_NEC)
+               protocol = DEC_MODE_NEC;
+       else
+               return 0;
+
+       for (i = 0; i < ARRAY_SIZE(protocol_timings); i++)
+               if (protocol_timings[i].hw_protocol == protocol)
+                       break;
+
+       if (i == ARRAY_SIZE(protocol_timings)) {
+               dev_err(&dev->dev, "hw protocol isn't supported: %d\n",
+                       protocol);
+               return -EINVAL;
+       }
+       timings = &protocol_timings[i];
+
+       spin_lock_irqsave(&ir->lock, flags);
+
+       /* Clear controller status */
+       regmap_read(ir->reg, IR_DEC_STATUS, &regval);
+       regmap_read(ir->reg, IR_DEC_FRAME, &regval);
+
+       /* Reset ir decoder and disable decoder */
+       regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_ENABLE, 0);
+       regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_RESET,
+                          IR_DEC_REG1_RESET);
+
+       /* Base time resolution, (19+1)*1us=20us */
+       regval = FIELD_PREP(IR_DEC_REG0_BASE_TIME, MESON_HW_TRATE - 1);
+       regmap_update_bits(ir->reg, IR_DEC_REG0, IR_DEC_REG0_BASE_TIME, regval);
+
+       /* Monitor timing for input filter */
+       regmap_update_bits(ir->reg, IR_DEC_REG0, IR_DEC_REG0_FILTER,
+                          FIELD_PREP(IR_DEC_REG0_FILTER, 7));
+
+       /* HW protocol */
+       regval = FIELD_PREP(IR_DEC_REG2_MODE, timings->hw_protocol);
+       regmap_update_bits(ir->reg, IR_DEC_REG2, IR_DEC_REG2_MODE, regval);
+
+       /* Hold frame data until register was read */
+       regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_HOLD_CODE,
+                          timings->hold_code_enable ?
+                          IR_DEC_REG1_HOLD_CODE : 0);
+
+       /* Bit order */
+       regmap_update_bits(ir->reg, IR_DEC_REG2, IR_DEC_REG2_BIT_ORDER,
+                          timings->bit_order ? IR_DEC_REG2_BIT_ORDER : 0);
+
+       /* Select tick mode */
+       regmap_update_bits(ir->reg, IR_DEC_REG2, IR_DEC_REG2_TICK_MODE,
+                          timings->count_tick_mode ?
+                          IR_DEC_REG2_TICK_MODE : 0);
+
+       /*
+        * Some protocols transmit the same data frame as repeat frame
+        * when the key is pressing. In this case, it could be detected as
+        * repeat frame if the repeat checker was enabled.
+        */
+       regmap_update_bits(ir->reg, IR_DEC_REG2, IR_DEC_REG2_REPEAT_COUNTER,
+                          timings->repeat_counter_enable ?
+                          IR_DEC_REG2_REPEAT_COUNTER : 0);
+       regmap_update_bits(ir->reg, IR_DEC_REG2, IR_DEC_REG2_REPEAT_TIME,
+                          timings->repeat_check_enable ?
+                          IR_DEC_REG2_REPEAT_TIME : 0);
+       regmap_update_bits(ir->reg, IR_DEC_REG2, IR_DEC_REG2_COMPARE_FRAME,
+                          timings->repeat_compare_enable ?
+                          IR_DEC_REG2_COMPARE_FRAME : 0);
+
+       /*
+        * FRAME_TIME_MAX should be larger than the time between
+        * data frame and repeat frame
+        */
+       regval = FIELD_PREP(IR_DEC_REG0_FRAME_TIME_MAX,
+                           timings->frame_time_max);
+       regmap_update_bits(ir->reg, IR_DEC_REG0, IR_DEC_REG0_FRAME_TIME_MAX,
+                          regval);
+
+       /* Length(N-1) of data frame */
+       regval = FIELD_PREP(IR_DEC_REG1_FRAME_LEN, timings->code_length - 1);
+       regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_FRAME_LEN, regval);
+
+       /* Time for leader active part */
+       regval = FIELD_PREP(IR_DEC_LDR_ACTIVE_MAX,
+                           timings->leader_active_max) |
+                FIELD_PREP(IR_DEC_LDR_ACTIVE_MIN,
+                           timings->leader_active_min);
+       regmap_update_bits(ir->reg, IR_DEC_LDR_ACTIVE, IR_DEC_LDR_ACTIVE_MAX |
+                          IR_DEC_LDR_ACTIVE_MIN, regval);
+
+       /* Time for leader idle part */
+       regval = FIELD_PREP(IR_DEC_LDR_IDLE_MAX, timings->leader_idle_max) |
+                FIELD_PREP(IR_DEC_LDR_IDLE_MIN, timings->leader_idle_min);
+       regmap_update_bits(ir->reg, IR_DEC_LDR_IDLE,
+                          IR_DEC_LDR_IDLE_MAX | IR_DEC_LDR_IDLE_MIN, regval);
+
+       /* Time for repeat leader idle part */
+       regval = FIELD_PREP(IR_DEC_LDR_REPEAT_MAX, timings->repeat_leader_max) |
+                FIELD_PREP(IR_DEC_LDR_REPEAT_MIN, timings->repeat_leader_min);
+       regmap_update_bits(ir->reg, IR_DEC_LDR_REPEAT, IR_DEC_LDR_REPEAT_MAX |
+                          IR_DEC_LDR_REPEAT_MIN, regval);
+
+       /*
+        * NEC: Time for logic '0'
+        * RC6: Time for half of trailer bit
+        */
+       regval = FIELD_PREP(IR_DEC_BIT_0_MAX, timings->bit0_max) |
+                FIELD_PREP(IR_DEC_BIT_0_MIN, timings->bit0_min);
+       regmap_update_bits(ir->reg, IR_DEC_BIT_0,
+                          IR_DEC_BIT_0_MAX | IR_DEC_BIT_0_MIN, regval);
+
+       /*
+        * NEC: Time for logic '1'
+        * RC6: Time for whole of trailer bit
+        */
+       regval = FIELD_PREP(IR_DEC_STATUS_BIT_1_MAX, timings->bit1_max) |
+                FIELD_PREP(IR_DEC_STATUS_BIT_1_MIN, timings->bit1_min);
+       regmap_update_bits(ir->reg, IR_DEC_STATUS, IR_DEC_STATUS_BIT_1_MAX |
+                          IR_DEC_STATUS_BIT_1_MIN, regval);
+
+       /* Enable to match logic '1' */
+       regmap_update_bits(ir->reg, IR_DEC_STATUS, IR_DEC_STATUS_BIT_1_ENABLE,
+                          timings->bit1_match_enable ?
+                          IR_DEC_STATUS_BIT_1_ENABLE : 0);
+
+       /*
+        * NEC: Unused
+        * RC6: Time for halt of logic 0/1
+        */
+       regval = FIELD_PREP(IR_DEC_DURATN2_MAX, timings->duration2_max) |
+                FIELD_PREP(IR_DEC_DURATN2_MIN, timings->duration2_min);
+       regmap_update_bits(ir->reg, IR_DEC_DURATN2,
+                          IR_DEC_DURATN2_MAX | IR_DEC_DURATN2_MIN, regval);
+
+       /*
+        * NEC: Unused
+        * RC6: Time for whole logic 0/1
+        */
+       regval = FIELD_PREP(IR_DEC_DURATN3_MAX, timings->duration3_max) |
+                FIELD_PREP(IR_DEC_DURATN3_MIN, timings->duration3_min);
+       regmap_update_bits(ir->reg, IR_DEC_DURATN3,
+                          IR_DEC_DURATN3_MAX | IR_DEC_DURATN3_MIN, regval);
+
+       /* Reset ir decoder and enable decode */
+       regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_RESET,
+                          IR_DEC_REG1_RESET);
+       regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_RESET, 0);
+       regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_ENABLE,
+                          IR_DEC_REG1_ENABLE);
+
+       spin_unlock_irqrestore(&ir->lock, flags);
+
+       dev_info(&dev->dev, "hw decoder init, protocol: %d\n", protocol);
+
+       return 0;
+}
+
+static void meson_ir_sw_decoder_init(struct rc_dev *dev)
+{
+       unsigned long flags;
+       struct meson_ir *ir = dev->priv;
+
+       spin_lock_irqsave(&ir->lock, flags);
+
+       /* Reset the decoder */
+       regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_RESET,
+                          IR_DEC_REG1_RESET);
+       regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_RESET, 0);
+
+       /* Set general operation mode (= raw/software decoding) */
+       if (of_device_is_compatible(dev->dev.of_node, "amlogic,meson6-ir"))
+               regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_MODE,
+                                  FIELD_PREP(IR_DEC_REG1_MODE,
+                                             DEC_MODE_RAW));
+       else
+               regmap_update_bits(ir->reg, IR_DEC_REG2, IR_DEC_REG2_MODE,
+                                  FIELD_PREP(IR_DEC_REG2_MODE,
+                                             DEC_MODE_RAW));
+
+       /* Set rate */
+       regmap_update_bits(ir->reg, IR_DEC_REG0, IR_DEC_REG0_BASE_TIME,
+                          FIELD_PREP(IR_DEC_REG0_BASE_TIME,
+                                     MESON_RAW_TRATE - 1));
+       /* IRQ on rising and falling edges */
+       regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_IRQSEL,
+                          FIELD_PREP(IR_DEC_REG1_IRQSEL, IRQSEL_RISE_FALL));
+       /* Enable the decoder */
+       regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_ENABLE,
+                          IR_DEC_REG1_ENABLE);
+
+       spin_unlock_irqrestore(&ir->lock, flags);
+
+       dev_info(&dev->dev, "sw decoder init\n");
+}
+
 static int meson_ir_probe(struct platform_device *pdev)
 {
+       const struct meson_ir_param *match_data;
        struct device *dev = &pdev->dev;
        struct device_node *node = dev->of_node;
        void __iomem *res_start;
@@ -97,10 +455,17 @@ static int meson_ir_probe(struct platform_device *pdev)
        if (!ir)
                return -ENOMEM;
 
+       match_data = of_device_get_match_data(dev);
+       if (!match_data)
+               return dev_err_probe(dev, -ENODEV, "failed to get match data\n");
+
+       ir->param = match_data;
+
        res_start = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(res_start))
                return PTR_ERR(res_start);
 
+       meson_ir_regmap_config.max_register = ir->param->max_register;
        ir->reg = devm_regmap_init_mmio(&pdev->dev, res_start,
                                        &meson_ir_regmap_config);
        if (IS_ERR(ir->reg))
@@ -110,23 +475,34 @@ static int meson_ir_probe(struct platform_device *pdev)
        if (irq < 0)
                return irq;
 
-       ir->rc = devm_rc_allocate_device(dev, RC_DRIVER_IR_RAW);
+       if (ir->param->support_hw_decoder)
+               ir->rc = devm_rc_allocate_device(&pdev->dev,
+                                                RC_DRIVER_SCANCODE);
+       else
+               ir->rc = devm_rc_allocate_device(&pdev->dev, RC_DRIVER_IR_RAW);
+
        if (!ir->rc) {
                dev_err(dev, "failed to allocate rc device\n");
                return -ENOMEM;
        }
 
+       if (ir->rc->driver_type == RC_DRIVER_IR_RAW) {
+               ir->rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
+               ir->rc->rx_resolution = MESON_RAW_TRATE;
+               ir->rc->min_timeout = 1;
+               ir->rc->timeout = IR_DEFAULT_TIMEOUT;
+               ir->rc->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
+       } else if (ir->rc->driver_type == RC_DRIVER_SCANCODE) {
+               ir->rc->allowed_protocols = RC_PROTO_BIT_NEC;
+               ir->rc->change_protocol = meson_ir_hw_decoder_init;
+       }
+
        ir->rc->priv = ir;
        ir->rc->device_name = DRIVER_NAME;
        ir->rc->input_phys = DRIVER_NAME "/input0";
        ir->rc->input_id.bustype = BUS_HOST;
        map_name = of_get_property(node, "linux,rc-map-name", NULL);
        ir->rc->map_name = map_name ? map_name : RC_MAP_EMPTY;
-       ir->rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
-       ir->rc->rx_resolution = MESON_RAW_TRATE;
-       ir->rc->min_timeout = 1;
-       ir->rc->timeout = IR_DEFAULT_TIMEOUT;
-       ir->rc->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
        ir->rc->driver_name = DRIVER_NAME;
 
        spin_lock_init(&ir->lock);
@@ -138,36 +514,15 @@ static int meson_ir_probe(struct platform_device *pdev)
                return ret;
        }
 
-       ret = devm_request_irq(dev, irq, meson_ir_irq, 0, NULL, ir);
+       if (ir->rc->driver_type == RC_DRIVER_IR_RAW)
+               meson_ir_sw_decoder_init(ir->rc);
+
+       ret = devm_request_irq(dev, irq, meson_ir_irq, 0, "meson_ir", ir);
        if (ret) {
                dev_err(dev, "failed to request irq\n");
                return ret;
        }
 
-       /* Reset the decoder */
-       regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_RESET,
-                          IR_DEC_REG1_RESET);
-       regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_RESET, 0);
-
-       /* Set general operation mode (= raw/software decoding) */
-       if (of_device_is_compatible(node, "amlogic,meson6-ir"))
-               regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_MODE,
-                                  FIELD_PREP(IR_DEC_REG1_MODE, DEC_MODE_RAW));
-       else
-               regmap_update_bits(ir->reg, IR_DEC_REG2, IR_DEC_REG2_MODE,
-                                  FIELD_PREP(IR_DEC_REG2_MODE, DEC_MODE_RAW));
-
-       /* Set rate */
-       regmap_update_bits(ir->reg, IR_DEC_REG0, IR_DEC_REG0_BASE_TIME,
-                          FIELD_PREP(IR_DEC_REG0_BASE_TIME,
-                                     MESON_RAW_TRATE - 1));
-       /* IRQ on rising and falling edges */
-       regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_IRQSEL,
-                          FIELD_PREP(IR_DEC_REG1_IRQSEL, IRQSEL_RISE_FALL));
-       /* Enable the decoder */
-       regmap_update_bits(ir->reg, IR_DEC_REG1, IR_DEC_REG1_ENABLE,
-                          IR_DEC_REG1_ENABLE);
-
        dev_info(dev, "receiver initialized\n");
 
        return 0;
@@ -212,11 +567,36 @@ static void meson_ir_shutdown(struct platform_device *pdev)
        spin_unlock_irqrestore(&ir->lock, flags);
 }
 
+static const struct meson_ir_param meson6_ir_param = {
+       .support_hw_decoder = false,
+       .max_register = IR_DEC_REG1,
+};
+
+static const struct meson_ir_param meson8b_ir_param = {
+       .support_hw_decoder = false,
+       .max_register = IR_DEC_REG2,
+};
+
+static const struct meson_ir_param meson_s4_ir_param = {
+       .support_hw_decoder = true,
+       .max_register = IR_DEC_FRAME1,
+};
+
 static const struct of_device_id meson_ir_match[] = {
-       { .compatible = "amlogic,meson6-ir" },
-       { .compatible = "amlogic,meson8b-ir" },
-       { .compatible = "amlogic,meson-gxbb-ir" },
-       { },
+       {
+               .compatible = "amlogic,meson6-ir",
+               .data = &meson6_ir_param,
+       }, {
+               .compatible = "amlogic,meson8b-ir",
+               .data = &meson8b_ir_param,
+       }, {
+               .compatible = "amlogic,meson-gxbb-ir",
+               .data = &meson8b_ir_param,
+       }, {
+               .compatible = "amlogic,meson-s4-ir",
+               .data = &meson_s4_ir_param,
+       },
+       {},
 };
 MODULE_DEVICE_TABLE(of, meson_ir_match);
 
index 7732054c4621e614fac262fd9da044abf8771e6e..c5f37c03af9c95793be0f708feaa4e6d66982371 100644 (file)
@@ -23,6 +23,7 @@ struct pwm_ir {
 
 static const struct of_device_id pwm_ir_of_match[] = {
        { .compatible = "pwm-ir-tx", },
+       { .compatible = "nokia,n900-ir" },
        { },
 };
 MODULE_DEVICE_TABLE(of, pwm_ir_of_match);
index b51e6a3b8cbeb5652752f8def1dcc01fe7e9efc4..f99878eff7aceac019bed70af1d93dd4582df510 100644 (file)
@@ -504,13 +504,16 @@ struct vidtv_mux *vidtv_mux_init(struct dvb_frontend *fe,
        m->priv = args->priv;
        m->network_id = args->network_id;
        m->network_name = kstrdup(args->network_name, GFP_KERNEL);
+       if (!m->network_name)
+               goto free_mux_buf;
+
        m->timing.current_jiffies = get_jiffies_64();
 
        if (args->channels)
                m->channels = args->channels;
        else
                if (vidtv_channels_init(m) < 0)
-                       goto free_mux_buf;
+                       goto free_mux_network_name;
 
        /* will alloc data for pmt_sections after initializing pat */
        if (vidtv_channel_si_init(m) < 0)
@@ -527,6 +530,8 @@ free_channel_si:
        vidtv_channel_si_destroy(m);
 free_channels:
        vidtv_channels_destroy(m);
+free_mux_network_name:
+       kfree(m->network_name);
 free_mux_buf:
        vfree(m->mux_buf);
 free_mux:
index ce0b7a6e92dc3338e8cb824573bd692cd4bda4ef..2a51c898c11ebdadb4dbfe05ac11170c4a7b12aa 100644 (file)
@@ -301,16 +301,29 @@ struct vidtv_psi_desc_service *vidtv_psi_service_desc_init(struct vidtv_psi_desc
 
        desc->service_name_len = service_name_len;
 
-       if (service_name && service_name_len)
+       if (service_name && service_name_len) {
                desc->service_name = kstrdup(service_name, GFP_KERNEL);
+               if (!desc->service_name)
+                       goto free_desc;
+       }
 
        desc->provider_name_len = provider_name_len;
 
-       if (provider_name && provider_name_len)
+       if (provider_name && provider_name_len) {
                desc->provider_name = kstrdup(provider_name, GFP_KERNEL);
+               if (!desc->provider_name)
+                       goto free_desc_service_name;
+       }
 
        vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
        return desc;
+
+free_desc_service_name:
+       if (service_name && service_name_len)
+               kfree(desc->service_name);
+free_desc:
+       kfree(desc);
+       return NULL;
 }
 
 struct vidtv_psi_desc_registration
@@ -355,8 +368,13 @@ struct vidtv_psi_desc_network_name
 
        desc->length = network_name_len;
 
-       if (network_name && network_name_len)
+       if (network_name && network_name_len) {
                desc->network_name = kstrdup(network_name, GFP_KERNEL);
+               if (!desc->network_name) {
+                       kfree(desc);
+                       return NULL;
+               }
+       }
 
        vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
        return desc;
@@ -442,15 +460,32 @@ struct vidtv_psi_desc_short_event
                iso_language_code = "eng";
 
        desc->iso_language_code = kstrdup(iso_language_code, GFP_KERNEL);
+       if (!desc->iso_language_code)
+               goto free_desc;
 
-       if (event_name && event_name_len)
+       if (event_name && event_name_len) {
                desc->event_name = kstrdup(event_name, GFP_KERNEL);
+               if (!desc->event_name)
+                       goto free_desc_language_code;
+       }
 
-       if (text && text_len)
+       if (text && text_len) {
                desc->text = kstrdup(text, GFP_KERNEL);
+               if (!desc->text)
+                       goto free_desc_event_name;
+       }
 
        vidtv_psi_desc_chain(head, (struct vidtv_psi_desc *)desc);
        return desc;
+
+free_desc_event_name:
+       if (event_name && event_name_len)
+               kfree(desc->event_name);
+free_desc_language_code:
+       kfree(desc->iso_language_code);
+free_desc:
+       kfree(desc);
+       return NULL;
 }
 
 struct vidtv_psi_desc *vidtv_psi_desc_clone(struct vidtv_psi_desc *desc)
index e95bdccfc18e9ff7f244b7bd68099af0ebb82454..394c9f81ea727db2cb399b81011916efb08f0905 100644 (file)
@@ -240,7 +240,7 @@ static int vidioc_querycap(struct file *file, void  *priv,
        strscpy(cap->driver, "vivid", sizeof(cap->driver));
        strscpy(cap->card, "vivid", sizeof(cap->card));
        snprintf(cap->bus_info, sizeof(cap->bus_info),
-                       "platform:%s", dev->v4l2_dev.name);
+                "platform:%s-%03d", VIVID_MODULE_NAME, dev->inst);
 
        cap->capabilities = dev->vid_cap_caps | dev->vid_out_caps |
                dev->vbi_cap_caps | dev->vbi_out_caps |
index b5b104ee64c99f4ba31d07ecb96b62ed64d815cb..c57771119a34b0f6cd50e863239e24c9730c12fd 100644 (file)
@@ -145,7 +145,7 @@ void vivid_rds_gen_fill(struct vivid_rds_gen *rds, unsigned freq,
        rds->ta = alt;
        rds->ms = true;
        snprintf(rds->psname, sizeof(rds->psname), "%6d.%1d",
-                freq / 16, ((freq & 0xf) * 10) / 16);
+                (freq / 16) % 1000000, (((freq & 0xf) * 10) / 16) % 10);
        if (alt)
                strscpy(rds->radiotext,
                        " The Radio Data System can switch between different Radio Texts ",
index c5e21785fafe286501597ab385fa941cbc8015e7..fe4410a5e128489557dd1cc124026da43a38955f 100644 (file)
@@ -937,7 +937,6 @@ static int cx231xx_load_firmware(struct cx231xx *dev)
        u32 *p_current_fw, *p_fw;
        u32 *p_fw_data;
        int frame = 0;
-       u16 _buffer_size = 4096;
        u8 *p_buffer;
 
        p_current_fw = vmalloc(1884180 * 4);
@@ -947,7 +946,7 @@ static int cx231xx_load_firmware(struct cx231xx *dev)
                return -ENOMEM;
        }
 
-       p_buffer = vmalloc(4096);
+       p_buffer = vmalloc(EP5_BUF_SIZE);
        if (p_buffer == NULL) {
                dprintk(2, "FAIL!!!\n");
                vfree(p_current_fw);
@@ -1030,9 +1029,9 @@ static int cx231xx_load_firmware(struct cx231xx *dev)
 
        /*download the firmware by ep5-out*/
 
-       for (frame = 0; frame < (int)(CX231xx_FIRM_IMAGE_SIZE*20/_buffer_size);
+       for (frame = 0; frame < (int)(CX231xx_FIRM_IMAGE_SIZE*20/EP5_BUF_SIZE);
             frame++) {
-               for (i = 0; i < _buffer_size; i++) {
+               for (i = 0; i < EP5_BUF_SIZE; i++) {
                        *(p_buffer + i) = (u8)(*(p_fw + (frame * 128 * 8 + (i / 4))) & 0x000000FF);
                        i++;
                        *(p_buffer + i) = (u8)((*(p_fw + (frame * 128 * 8 + (i / 4))) & 0x0000FF00) >> 8);
@@ -1041,7 +1040,7 @@ static int cx231xx_load_firmware(struct cx231xx *dev)
                        i++;
                        *(p_buffer + i) = (u8)((*(p_fw + (frame * 128 * 8 + (i / 4))) & 0xFF000000) >> 24);
                }
-               cx231xx_ep5_bulkout(dev, p_buffer, _buffer_size);
+               cx231xx_ep5_bulkout(dev, p_buffer, EP5_BUF_SIZE);
        }
 
        p_current_fw = p_fw;
index 727e6268567f757d553bcfefd17d8a99aa9ea4f4..7b7e2a26ef93b9c2ac86178bc78736129a27972b 100644 (file)
@@ -751,13 +751,12 @@ int cx231xx_ep5_bulkout(struct cx231xx *dev, u8 *firmware, u16 size)
        int ret = -ENOMEM;
        u32 *buffer;
 
-       buffer = kzalloc(4096, GFP_KERNEL);
+       buffer = kmemdup(firmware, EP5_BUF_SIZE, GFP_KERNEL);
        if (buffer == NULL)
                return -ENOMEM;
-       memcpy(&buffer[0], firmware, 4096);
 
        ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 5),
-                       buffer, 4096, &actlen, 2000);
+                       buffer, EP5_BUF_SIZE, &actlen, EP5_TIMEOUT_MS);
 
        if (ret)
                dev_err(dev->dev,
@@ -994,7 +993,7 @@ int cx231xx_init_isoc(struct cx231xx *dev, int max_packets,
        /* De-allocates all pending stuff */
        cx231xx_uninit_isoc(dev);
 
-       dma_q->p_left_data = kzalloc(4096, GFP_KERNEL);
+       dma_q->p_left_data = kzalloc(EP5_BUF_SIZE, GFP_KERNEL);
        if (dma_q->p_left_data == NULL)
                return -ENOMEM;
 
index 6929e4d97067ad493aad1d4f2e4a7927e2bced6b..74339a6a2f718436dc4c13889eef5017e630959b 100644 (file)
 #define CX23417_OSC_EN   8
 #define CX23417_RESET    9
 
+#define EP5_BUF_SIZE     4096
+#define EP5_TIMEOUT_MS   2000
+
 struct cx23417_fmt {
        u32   fourcc;          /* v4l2 format id */
        int   depth;
index 33a2aa8907e6530a03820ee1b115b8018a6003b5..4eb7dd4599b7e64bdf0ffdd643a3c44d06601da3 100644 (file)
@@ -322,8 +322,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
                        ret = -EOPNOTSUPP;
                } else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
                           (msg[0].addr == state->af9033_i2c_addr[1])) {
-                       if (msg[0].len < 3 || msg[1].len < 1)
-                               return -EOPNOTSUPP;
+                       if (msg[0].len < 3 || msg[1].len < 1) {
+                               ret = -EOPNOTSUPP;
+                               goto unlock;
+                       }
                        /* demod access via firmware interface */
                        u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
                                        msg[0].buf[2];
@@ -383,8 +385,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
                        ret = -EOPNOTSUPP;
                } else if ((msg[0].addr == state->af9033_i2c_addr[0]) ||
                           (msg[0].addr == state->af9033_i2c_addr[1])) {
-                       if (msg[0].len < 3)
-                               return -EOPNOTSUPP;
+                       if (msg[0].len < 3) {
+                               ret = -EOPNOTSUPP;
+                               goto unlock;
+                       }
                        /* demod access via firmware interface */
                        u32 reg = msg[0].buf[0] << 16 | msg[0].buf[1] << 8 |
                                        msg[0].buf[2];
@@ -459,6 +463,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
                ret = -EOPNOTSUPP;
        }
 
+unlock:
        mutex_unlock(&d->i2c_mutex);
 
        if (ret < 0)
index b2b27a86dfe522ddd566377aff88274d6d11bed8..4cd21bb8805ed78234237141ba5a9f74a41cf159 100644 (file)
@@ -287,7 +287,7 @@ static int gp8psk_frontend_attach(struct dvb_usb_adapter *adap)
        int id = le16_to_cpu(d->udev->descriptor.idProduct);
        int is_rev1;
 
-       is_rev1 = (id == USB_PID_GENPIX_8PSK_REV_1_WARM) ? true : false;
+       is_rev1 = id == USB_PID_GENPIX_8PSK_REV_1_WARM;
 
        adap->fe_adap[0].fe = dvb_attach(gp8psk_fe_attach,
                                         &gp8psk_fe_ops, d, is_rev1);
index 46ed95483e2222fc60e87078ea3316271d85f538..5f5fa851ca640dc0ca01a38bf3fb3c975cad69b3 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <linux/input.h>
 #include <linux/sched/signal.h>
+#include <linux/bitops.h>
 
 #include "gspca.h"
 
@@ -1028,6 +1029,8 @@ static int set_flicker(struct gspca_dev *gspca_dev, int on, int apply)
                        sd->params.exposure.expMode = 2;
                        sd->exposure_status = EXPOSURE_NORMAL;
                }
+               if (sd->params.exposure.gain >= BITS_PER_TYPE(currentexp))
+                       return -EINVAL;
                currentexp = currentexp << sd->params.exposure.gain;
                sd->params.exposure.gain = 0;
                /* round down current exposure to nearest value */
index 8a39cac76c5850edbfbfedf859e8084cdaf39cd5..9d9e14c858e670d6588e87808c41c058a8d2e1e9 100644 (file)
@@ -279,10 +279,8 @@ static int smsusb1_load_firmware(struct usb_device *udev, int id, int board_id)
                }
        }
 
-       fw_buffer = kmalloc(fw->size, GFP_KERNEL);
+       fw_buffer = kmemdup(fw->data, fw->size, GFP_KERNEL);
        if (fw_buffer) {
-               memcpy(fw_buffer, fw->data, fw->size);
-
                rc = usb_bulk_msg(udev, usb_sndbulkpipe(udev, 2),
                                  fw_buffer, fw->size, &dummy, 1000);
 
index f77ebd688cde76d83d7425eddb89d1fbfed60339..331b8e535e5bbf33f22638b2ae8bc764ad5fc407 100644 (file)
@@ -82,19 +82,3 @@ config V4L2_CCI_I2C
        depends on I2C
        select REGMAP_I2C
        select V4L2_CCI
-
-# Used by drivers that need Videobuf modules
-config VIDEOBUF_GEN
-       tristate
-
-config VIDEOBUF_DMA_SG
-       tristate
-       select VIDEOBUF_GEN
-
-config VIDEOBUF_VMALLOC
-       tristate
-       select VIDEOBUF_GEN
-
-config VIDEOBUF_DMA_CONTIG
-       tristate
-       select VIDEOBUF_GEN
index be2551705755e40db993600e1991637c8c606d78..2177b9d63a8ffc1127c5a70118249a2ff63cd759 100644 (file)
@@ -33,10 +33,5 @@ obj-$(CONFIG_V4L2_JPEG_HELPER) += v4l2-jpeg.o
 obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o
 obj-$(CONFIG_V4L2_VP9) += v4l2-vp9.o
 
-obj-$(CONFIG_VIDEOBUF_DMA_CONTIG) += videobuf-dma-contig.o
-obj-$(CONFIG_VIDEOBUF_DMA_SG) += videobuf-dma-sg.o
-obj-$(CONFIG_VIDEOBUF_GEN) += videobuf-core.o
-obj-$(CONFIG_VIDEOBUF_VMALLOC) += videobuf-vmalloc.o
-
 obj-$(CONFIG_VIDEO_TUNER) += tuner.o
 obj-$(CONFIG_VIDEO_DEV) += v4l2-dv-timings.o videodev.o
index c5ce9f11ad7bcece86f98be2a6c88f76c200d79e..3898ff7edddbf74d2650e5567d3f7b7d1c93552f 100644 (file)
@@ -238,6 +238,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
        sev = kvzalloc(struct_size(sev, events, elems), GFP_KERNEL);
        if (!sev)
                return -ENOMEM;
+       sev->elems = elems;
        for (i = 0; i < elems; i++)
                sev->events[i].sev = sev;
        sev->type = sub->type;
@@ -245,7 +246,6 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
        sev->flags = sub->flags;
        sev->fh = fh;
        sev->ops = ops;
-       sev->elems = elems;
 
        mutex_lock(&fh->subscribe_lock);
 
index f4d9d62790940b2efe8a0b6750990220358818b0..9b1de54ce379bed517918d3bacaf79000a2f4129 100644 (file)
@@ -1510,6 +1510,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
                case V4L2_PIX_FMT_AV1_FRAME:    descr = "AV1 Frame"; break;
                case V4L2_PIX_FMT_MT2110T:      descr = "Mediatek 10bit Tile Mode"; break;
                case V4L2_PIX_FMT_MT2110R:      descr = "Mediatek 10bit Raster Mode"; break;
+               case V4L2_PIX_FMT_HEXTILE:      descr = "Hextile Compressed Format"; break;
                default:
                        if (fmt->description[0])
                                return;
index 31752c06d1f0c8bba6915f4e7bd9d5a4b75029c0..be86b906c985cc33166e2e803f6c1e47be6ed156 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/module.h>
 #include <linux/overflow.h>
 #include <linux/slab.h>
+#include <linux/string.h>
 #include <linux/types.h>
 #include <linux/version.h>
 #include <linux/videodev2.h>
@@ -306,6 +307,42 @@ static int call_set_selection(struct v4l2_subdev *sd,
               sd->ops->pad->set_selection(sd, state, sel);
 }
 
+static int call_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
+                              struct v4l2_mbus_frame_desc *fd)
+{
+       unsigned int i;
+       int ret;
+
+       memset(fd, 0, sizeof(*fd));
+
+       ret = sd->ops->pad->get_frame_desc(sd, pad, fd);
+       if (ret)
+               return ret;
+
+       dev_dbg(sd->dev, "Frame descriptor on pad %u, type %s\n", pad,
+               fd->type == V4L2_MBUS_FRAME_DESC_TYPE_PARALLEL ? "parallel" :
+               fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2 ? "CSI-2" :
+               "unknown");
+
+       for (i = 0; i < fd->num_entries; i++) {
+               struct v4l2_mbus_frame_desc_entry *entry = &fd->entry[i];
+               char buf[20] = "";
+
+               if (fd->type == V4L2_MBUS_FRAME_DESC_TYPE_CSI2)
+                       WARN_ON(snprintf(buf, sizeof(buf),
+                                        ", vc %u, dt 0x%02x",
+                                        entry->bus.csi2.vc,
+                                        entry->bus.csi2.dt) >= sizeof(buf));
+
+               dev_dbg(sd->dev,
+                       "\tstream %u, code 0x%04x, length %u, flags 0x%04x%s\n",
+                       entry->stream, entry->pixelcode, entry->length,
+                       entry->flags, buf);
+       }
+
+       return 0;
+}
+
 static inline int check_edid(struct v4l2_subdev *sd,
                             struct v4l2_subdev_edid *edid)
 {
@@ -359,6 +396,18 @@ static int call_s_stream(struct v4l2_subdev *sd, int enable)
 {
        int ret;
 
+       /*
+        * The .s_stream() operation must never be called to start or stop an
+        * already started or stopped subdev. Catch offenders but don't return
+        * an error yet to avoid regressions.
+        *
+        * As .s_stream() is mutually exclusive with the .enable_streams() and
+        * .disable_streams() operation, we can use the enabled_streams field
+        * to store the subdev streaming state.
+        */
+       if (WARN_ON(!!sd->enabled_streams == !!enable))
+               return 0;
+
 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
        if (!IS_ERR_OR_NULL(sd->privacy_led)) {
                if (enable)
@@ -372,9 +421,12 @@ static int call_s_stream(struct v4l2_subdev *sd, int enable)
 
        if (!enable && ret < 0) {
                dev_warn(sd->dev, "disabling streaming failed (%d)\n", ret);
-               return 0;
+               ret = 0;
        }
 
+       if (!ret)
+               sd->enabled_streams = enable ? BIT(0) : 0;
+
        return ret;
 }
 
@@ -431,6 +483,7 @@ static const struct v4l2_subdev_pad_ops v4l2_subdev_call_pad_wrappers = {
        .set_edid               = call_set_edid,
        .dv_timings_cap         = call_dv_timings_cap,
        .enum_dv_timings        = call_enum_dv_timings,
+       .get_frame_desc         = call_get_frame_desc,
        .get_mbus_config        = call_get_mbus_config,
 };
 
diff --git a/drivers/media/v4l2-core/videobuf-core.c b/drivers/media/v4l2-core/videobuf-core.c
deleted file mode 100644 (file)
index 606a271..0000000
+++ /dev/null
@@ -1,1198 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * generic helper functions for handling video4linux capture buffers
- *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
- *
- * Highly based on video-buf written originally by:
- * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
- * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
- * (c) 2006 Ted Walther and John Sokol
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-
-#include <media/videobuf-core.h>
-#include <media/v4l2-common.h>
-
-#define MAGIC_BUFFER 0x20070728
-#define MAGIC_CHECK(is, should)                                                \
-       do {                                                            \
-               if (unlikely((is) != (should))) {                       \
-                       printk(KERN_ERR                                 \
-                               "magic mismatch: %x (expected %x)\n",   \
-                                       is, should);                    \
-                       BUG();                                          \
-               }                                                       \
-       } while (0)
-
-static int debug;
-module_param(debug, int, 0644);
-
-MODULE_DESCRIPTION("helper module to manage video4linux buffers");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
-MODULE_LICENSE("GPL");
-
-#define dprintk(level, fmt, arg...)                                    \
-       do {                                                            \
-               if (debug >= level)                                     \
-                       printk(KERN_DEBUG "vbuf: " fmt, ## arg);        \
-       } while (0)
-
-/* --------------------------------------------------------------------- */
-
-#define CALL(q, f, arg...)                                             \
-       ((q->int_ops->f) ? q->int_ops->f(arg) : 0)
-#define CALLPTR(q, f, arg...)                                          \
-       ((q->int_ops->f) ? q->int_ops->f(arg) : NULL)
-
-struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q)
-{
-       struct videobuf_buffer *vb;
-
-       BUG_ON(q->msize < sizeof(*vb));
-
-       if (!q->int_ops || !q->int_ops->alloc_vb) {
-               printk(KERN_ERR "No specific ops defined!\n");
-               BUG();
-       }
-
-       vb = q->int_ops->alloc_vb(q->msize);
-       if (NULL != vb) {
-               init_waitqueue_head(&vb->done);
-               vb->magic = MAGIC_BUFFER;
-       }
-
-       return vb;
-}
-EXPORT_SYMBOL_GPL(videobuf_alloc_vb);
-
-static int state_neither_active_nor_queued(struct videobuf_queue *q,
-                                          struct videobuf_buffer *vb)
-{
-       unsigned long flags;
-       bool rc;
-
-       spin_lock_irqsave(q->irqlock, flags);
-       rc = vb->state != VIDEOBUF_ACTIVE && vb->state != VIDEOBUF_QUEUED;
-       spin_unlock_irqrestore(q->irqlock, flags);
-       return rc;
-};
-
-int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
-               int non_blocking, int intr)
-{
-       bool is_ext_locked;
-       int ret = 0;
-
-       MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
-
-       if (non_blocking) {
-               if (state_neither_active_nor_queued(q, vb))
-                       return 0;
-               return -EAGAIN;
-       }
-
-       is_ext_locked = q->ext_lock && mutex_is_locked(q->ext_lock);
-
-       /* Release vdev lock to prevent this wait from blocking outside access to
-          the device. */
-       if (is_ext_locked)
-               mutex_unlock(q->ext_lock);
-       if (intr)
-               ret = wait_event_interruptible(vb->done,
-                                       state_neither_active_nor_queued(q, vb));
-       else
-               wait_event(vb->done, state_neither_active_nor_queued(q, vb));
-       /* Relock */
-       if (is_ext_locked)
-               mutex_lock(q->ext_lock);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(videobuf_waiton);
-
-int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
-                   struct v4l2_framebuffer *fbuf)
-{
-       MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
-       MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
-       return CALL(q, iolock, q, vb, fbuf);
-}
-EXPORT_SYMBOL_GPL(videobuf_iolock);
-
-void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
-                             struct videobuf_buffer *buf)
-{
-       if (q->int_ops->vaddr)
-               return q->int_ops->vaddr(buf);
-       return NULL;
-}
-EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr);
-
-/* --------------------------------------------------------------------- */
-
-
-void videobuf_queue_core_init(struct videobuf_queue *q,
-                        const struct videobuf_queue_ops *ops,
-                        struct device *dev,
-                        spinlock_t *irqlock,
-                        enum v4l2_buf_type type,
-                        enum v4l2_field field,
-                        unsigned int msize,
-                        void *priv,
-                        struct videobuf_qtype_ops *int_ops,
-                        struct mutex *ext_lock)
-{
-       BUG_ON(!q);
-       memset(q, 0, sizeof(*q));
-       q->irqlock   = irqlock;
-       q->ext_lock  = ext_lock;
-       q->dev       = dev;
-       q->type      = type;
-       q->field     = field;
-       q->msize     = msize;
-       q->ops       = ops;
-       q->priv_data = priv;
-       q->int_ops   = int_ops;
-
-       /* All buffer operations are mandatory */
-       BUG_ON(!q->ops->buf_setup);
-       BUG_ON(!q->ops->buf_prepare);
-       BUG_ON(!q->ops->buf_queue);
-       BUG_ON(!q->ops->buf_release);
-
-       /* Lock is mandatory for queue_cancel to work */
-       BUG_ON(!irqlock);
-
-       /* Having implementations for abstract methods are mandatory */
-       BUG_ON(!q->int_ops);
-
-       mutex_init(&q->vb_lock);
-       init_waitqueue_head(&q->wait);
-       INIT_LIST_HEAD(&q->stream);
-}
-EXPORT_SYMBOL_GPL(videobuf_queue_core_init);
-
-/* Locking: Only usage in bttv unsafe find way to remove */
-int videobuf_queue_is_busy(struct videobuf_queue *q)
-{
-       int i;
-
-       MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
-       if (q->streaming) {
-               dprintk(1, "busy: streaming active\n");
-               return 1;
-       }
-       if (q->reading) {
-               dprintk(1, "busy: pending read #1\n");
-               return 1;
-       }
-       if (q->read_buf) {
-               dprintk(1, "busy: pending read #2\n");
-               return 1;
-       }
-       for (i = 0; i < VIDEO_MAX_FRAME; i++) {
-               if (NULL == q->bufs[i])
-                       continue;
-               if (q->bufs[i]->map) {
-                       dprintk(1, "busy: buffer #%d mapped\n", i);
-                       return 1;
-               }
-               if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
-                       dprintk(1, "busy: buffer #%d queued\n", i);
-                       return 1;
-               }
-               if (q->bufs[i]->state == VIDEOBUF_ACTIVE) {
-                       dprintk(1, "busy: buffer #%d active\n", i);
-                       return 1;
-               }
-       }
-       return 0;
-}
-EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
-
-/*
- * __videobuf_free() - free all the buffers and their control structures
- *
- * This function can only be called if streaming/reading is off, i.e. no buffers
- * are under control of the driver.
- */
-/* Locking: Caller holds q->vb_lock */
-static int __videobuf_free(struct videobuf_queue *q)
-{
-       int i;
-
-       dprintk(1, "%s\n", __func__);
-       if (!q)
-               return 0;
-
-       if (q->streaming || q->reading) {
-               dprintk(1, "Cannot free buffers when streaming or reading\n");
-               return -EBUSY;
-       }
-
-       MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
-       for (i = 0; i < VIDEO_MAX_FRAME; i++)
-               if (q->bufs[i] && q->bufs[i]->map) {
-                       dprintk(1, "Cannot free mmapped buffers\n");
-                       return -EBUSY;
-               }
-
-       for (i = 0; i < VIDEO_MAX_FRAME; i++) {
-               if (NULL == q->bufs[i])
-                       continue;
-               q->ops->buf_release(q, q->bufs[i]);
-               kfree(q->bufs[i]);
-               q->bufs[i] = NULL;
-       }
-
-       return 0;
-}
-
-/* Locking: Caller holds q->vb_lock */
-void videobuf_queue_cancel(struct videobuf_queue *q)
-{
-       unsigned long flags = 0;
-       int i;
-
-       q->streaming = 0;
-       q->reading  = 0;
-       wake_up_interruptible_sync(&q->wait);
-
-       /* remove queued buffers from list */
-       spin_lock_irqsave(q->irqlock, flags);
-       for (i = 0; i < VIDEO_MAX_FRAME; i++) {
-               if (NULL == q->bufs[i])
-                       continue;
-               if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
-                       list_del(&q->bufs[i]->queue);
-                       q->bufs[i]->state = VIDEOBUF_ERROR;
-                       wake_up_all(&q->bufs[i]->done);
-               }
-       }
-       spin_unlock_irqrestore(q->irqlock, flags);
-
-       /* free all buffers + clear queue */
-       for (i = 0; i < VIDEO_MAX_FRAME; i++) {
-               if (NULL == q->bufs[i])
-                       continue;
-               q->ops->buf_release(q, q->bufs[i]);
-       }
-       INIT_LIST_HEAD(&q->stream);
-}
-EXPORT_SYMBOL_GPL(videobuf_queue_cancel);
-
-/* --------------------------------------------------------------------- */
-
-/* Locking: Caller holds q->vb_lock */
-enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
-{
-       enum v4l2_field field = q->field;
-
-       BUG_ON(V4L2_FIELD_ANY == field);
-
-       if (V4L2_FIELD_ALTERNATE == field) {
-               if (V4L2_FIELD_TOP == q->last) {
-                       field   = V4L2_FIELD_BOTTOM;
-                       q->last = V4L2_FIELD_BOTTOM;
-               } else {
-                       field   = V4L2_FIELD_TOP;
-                       q->last = V4L2_FIELD_TOP;
-               }
-       }
-       return field;
-}
-EXPORT_SYMBOL_GPL(videobuf_next_field);
-
-/* Locking: Caller holds q->vb_lock */
-static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
-                           struct videobuf_buffer *vb, enum v4l2_buf_type type)
-{
-       MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
-       MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
-       b->index    = vb->i;
-       b->type     = type;
-
-       b->memory   = vb->memory;
-       switch (b->memory) {
-       case V4L2_MEMORY_MMAP:
-               b->m.offset  = vb->boff;
-               b->length    = vb->bsize;
-               break;
-       case V4L2_MEMORY_USERPTR:
-               b->m.userptr = vb->baddr;
-               b->length    = vb->bsize;
-               break;
-       case V4L2_MEMORY_OVERLAY:
-               b->m.offset  = vb->boff;
-               break;
-       case V4L2_MEMORY_DMABUF:
-               /* DMABUF is not handled in videobuf framework */
-               break;
-       }
-
-       b->flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
-       if (vb->map)
-               b->flags |= V4L2_BUF_FLAG_MAPPED;
-
-       switch (vb->state) {
-       case VIDEOBUF_PREPARED:
-       case VIDEOBUF_QUEUED:
-       case VIDEOBUF_ACTIVE:
-               b->flags |= V4L2_BUF_FLAG_QUEUED;
-               break;
-       case VIDEOBUF_ERROR:
-               b->flags |= V4L2_BUF_FLAG_ERROR;
-               fallthrough;
-       case VIDEOBUF_DONE:
-               b->flags |= V4L2_BUF_FLAG_DONE;
-               break;
-       case VIDEOBUF_NEEDS_INIT:
-       case VIDEOBUF_IDLE:
-               /* nothing */
-               break;
-       }
-
-       b->field     = vb->field;
-       v4l2_buffer_set_timestamp(b, vb->ts);
-       b->bytesused = vb->size;
-       b->sequence  = vb->field_count >> 1;
-}
-
-int videobuf_mmap_free(struct videobuf_queue *q)
-{
-       int ret;
-       videobuf_queue_lock(q);
-       ret = __videobuf_free(q);
-       videobuf_queue_unlock(q);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(videobuf_mmap_free);
-
-/* Locking: Caller holds q->vb_lock */
-int __videobuf_mmap_setup(struct videobuf_queue *q,
-                       unsigned int bcount, unsigned int bsize,
-                       enum v4l2_memory memory)
-{
-       unsigned int i;
-       int err;
-
-       MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
-       err = __videobuf_free(q);
-       if (0 != err)
-               return err;
-
-       /* Allocate and initialize buffers */
-       for (i = 0; i < bcount; i++) {
-               q->bufs[i] = videobuf_alloc_vb(q);
-
-               if (NULL == q->bufs[i])
-                       break;
-
-               q->bufs[i]->i      = i;
-               q->bufs[i]->memory = memory;
-               q->bufs[i]->bsize  = bsize;
-               switch (memory) {
-               case V4L2_MEMORY_MMAP:
-                       q->bufs[i]->boff = PAGE_ALIGN(bsize) * i;
-                       break;
-               case V4L2_MEMORY_USERPTR:
-               case V4L2_MEMORY_OVERLAY:
-               case V4L2_MEMORY_DMABUF:
-                       /* nothing */
-                       break;
-               }
-       }
-
-       if (!i)
-               return -ENOMEM;
-
-       dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i, bsize);
-
-       return i;
-}
-EXPORT_SYMBOL_GPL(__videobuf_mmap_setup);
-
-int videobuf_mmap_setup(struct videobuf_queue *q,
-                       unsigned int bcount, unsigned int bsize,
-                       enum v4l2_memory memory)
-{
-       int ret;
-       videobuf_queue_lock(q);
-       ret = __videobuf_mmap_setup(q, bcount, bsize, memory);
-       videobuf_queue_unlock(q);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
-
-int videobuf_reqbufs(struct videobuf_queue *q,
-                struct v4l2_requestbuffers *req)
-{
-       unsigned int size, count;
-       int retval;
-
-       if (req->memory != V4L2_MEMORY_MMAP     &&
-           req->memory != V4L2_MEMORY_USERPTR  &&
-           req->memory != V4L2_MEMORY_OVERLAY) {
-               dprintk(1, "reqbufs: memory type invalid\n");
-               return -EINVAL;
-       }
-
-       videobuf_queue_lock(q);
-       if (req->type != q->type) {
-               dprintk(1, "reqbufs: queue type invalid\n");
-               retval = -EINVAL;
-               goto done;
-       }
-
-       if (q->streaming) {
-               dprintk(1, "reqbufs: streaming already exists\n");
-               retval = -EBUSY;
-               goto done;
-       }
-       if (!list_empty(&q->stream)) {
-               dprintk(1, "reqbufs: stream running\n");
-               retval = -EBUSY;
-               goto done;
-       }
-
-       if (req->count == 0) {
-               dprintk(1, "reqbufs: count invalid (%d)\n", req->count);
-               retval = __videobuf_free(q);
-               goto done;
-       }
-
-       count = req->count;
-       if (count > VIDEO_MAX_FRAME)
-               count = VIDEO_MAX_FRAME;
-       size = 0;
-       q->ops->buf_setup(q, &count, &size);
-       dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
-               count, size,
-               (unsigned int)((count * PAGE_ALIGN(size)) >> PAGE_SHIFT));
-
-       retval = __videobuf_mmap_setup(q, count, size, req->memory);
-       if (retval < 0) {
-               dprintk(1, "reqbufs: mmap setup returned %d\n", retval);
-               goto done;
-       }
-
-       req->count = retval;
-       retval = 0;
-
- done:
-       videobuf_queue_unlock(q);
-       return retval;
-}
-EXPORT_SYMBOL_GPL(videobuf_reqbufs);
-
-int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
-{
-       int ret = -EINVAL;
-
-       videobuf_queue_lock(q);
-       if (unlikely(b->type != q->type)) {
-               dprintk(1, "querybuf: Wrong type.\n");
-               goto done;
-       }
-       if (unlikely(b->index >= VIDEO_MAX_FRAME)) {
-               dprintk(1, "querybuf: index out of range.\n");
-               goto done;
-       }
-       if (unlikely(NULL == q->bufs[b->index])) {
-               dprintk(1, "querybuf: buffer is null.\n");
-               goto done;
-       }
-
-       videobuf_status(q, b, q->bufs[b->index], q->type);
-
-       ret = 0;
-done:
-       videobuf_queue_unlock(q);
-       return ret;
-}
-EXPORT_SYMBOL_GPL(videobuf_querybuf);
-
-int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
-{
-       struct videobuf_buffer *buf;
-       enum v4l2_field field;
-       unsigned long flags = 0;
-       int retval;
-
-       MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
-       if (b->memory == V4L2_MEMORY_MMAP)
-               mmap_read_lock(current->mm);
-
-       videobuf_queue_lock(q);
-       retval = -EBUSY;
-       if (q->reading) {
-               dprintk(1, "qbuf: Reading running...\n");
-               goto done;
-       }
-       retval = -EINVAL;
-       if (b->type != q->type) {
-               dprintk(1, "qbuf: Wrong type.\n");
-               goto done;
-       }
-       if (b->index >= VIDEO_MAX_FRAME) {
-               dprintk(1, "qbuf: index out of range.\n");
-               goto done;
-       }
-       buf = q->bufs[b->index];
-       if (NULL == buf) {
-               dprintk(1, "qbuf: buffer is null.\n");
-               goto done;
-       }
-       MAGIC_CHECK(buf->magic, MAGIC_BUFFER);
-       if (buf->memory != b->memory) {
-               dprintk(1, "qbuf: memory type is wrong.\n");
-               goto done;
-       }
-       if (buf->state != VIDEOBUF_NEEDS_INIT && buf->state != VIDEOBUF_IDLE) {
-               dprintk(1, "qbuf: buffer is already queued or active.\n");
-               goto done;
-       }
-
-       switch (b->memory) {
-       case V4L2_MEMORY_MMAP:
-               if (0 == buf->baddr) {
-                       dprintk(1, "qbuf: mmap requested but buffer addr is zero!\n");
-                       goto done;
-               }
-               if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
-                   || q->type == V4L2_BUF_TYPE_VBI_OUTPUT
-                   || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT
-                   || q->type == V4L2_BUF_TYPE_SDR_OUTPUT) {
-                       buf->size = b->bytesused;
-                       buf->field = b->field;
-                       buf->ts = v4l2_buffer_get_timestamp(b);
-               }
-               break;
-       case V4L2_MEMORY_USERPTR:
-               if (b->length < buf->bsize) {
-                       dprintk(1, "qbuf: buffer length is not enough\n");
-                       goto done;
-               }
-               if (VIDEOBUF_NEEDS_INIT != buf->state &&
-                   buf->baddr != b->m.userptr)
-                       q->ops->buf_release(q, buf);
-               buf->baddr = b->m.userptr;
-               break;
-       case V4L2_MEMORY_OVERLAY:
-               buf->boff = b->m.offset;
-               break;
-       default:
-               dprintk(1, "qbuf: wrong memory type\n");
-               goto done;
-       }
-
-       dprintk(1, "qbuf: requesting next field\n");
-       field = videobuf_next_field(q);
-       retval = q->ops->buf_prepare(q, buf, field);
-       if (0 != retval) {
-               dprintk(1, "qbuf: buffer_prepare returned %d\n", retval);
-               goto done;
-       }
-
-       list_add_tail(&buf->stream, &q->stream);
-       if (q->streaming) {
-               spin_lock_irqsave(q->irqlock, flags);
-               q->ops->buf_queue(q, buf);
-               spin_unlock_irqrestore(q->irqlock, flags);
-       }
-       dprintk(1, "qbuf: succeeded\n");
-       retval = 0;
-       wake_up_interruptible_sync(&q->wait);
-
-done:
-       videobuf_queue_unlock(q);
-
-       if (b->memory == V4L2_MEMORY_MMAP)
-               mmap_read_unlock(current->mm);
-
-       return retval;
-}
-EXPORT_SYMBOL_GPL(videobuf_qbuf);
-
-/* Locking: Caller holds q->vb_lock */
-static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock)
-{
-       int retval;
-
-checks:
-       if (!q->streaming) {
-               dprintk(1, "next_buffer: Not streaming\n");
-               retval = -EINVAL;
-               goto done;
-       }
-
-       if (list_empty(&q->stream)) {
-               if (noblock) {
-                       retval = -EAGAIN;
-                       dprintk(2, "next_buffer: no buffers to dequeue\n");
-                       goto done;
-               } else {
-                       dprintk(2, "next_buffer: waiting on buffer\n");
-
-                       /* Drop lock to avoid deadlock with qbuf */
-                       videobuf_queue_unlock(q);
-
-                       /* Checking list_empty and streaming is safe without
-                        * locks because we goto checks to validate while
-                        * holding locks before proceeding */
-                       retval = wait_event_interruptible(q->wait,
-                               !list_empty(&q->stream) || !q->streaming);
-                       videobuf_queue_lock(q);
-
-                       if (retval)
-                               goto done;
-
-                       goto checks;
-               }
-       }
-
-       retval = 0;
-
-done:
-       return retval;
-}
-
-/* Locking: Caller holds q->vb_lock */
-static int stream_next_buffer(struct videobuf_queue *q,
-                       struct videobuf_buffer **vb, int nonblocking)
-{
-       int retval;
-       struct videobuf_buffer *buf = NULL;
-
-       retval = stream_next_buffer_check_queue(q, nonblocking);
-       if (retval)
-               goto done;
-
-       buf = list_entry(q->stream.next, struct videobuf_buffer, stream);
-       retval = videobuf_waiton(q, buf, nonblocking, 1);
-       if (retval < 0)
-               goto done;
-
-       *vb = buf;
-done:
-       return retval;
-}
-
-int videobuf_dqbuf(struct videobuf_queue *q,
-                  struct v4l2_buffer *b, int nonblocking)
-{
-       struct videobuf_buffer *buf = NULL;
-       int retval;
-
-       MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
-       memset(b, 0, sizeof(*b));
-       videobuf_queue_lock(q);
-
-       retval = stream_next_buffer(q, &buf, nonblocking);
-       if (retval < 0) {
-               dprintk(1, "dqbuf: next_buffer error: %i\n", retval);
-               goto done;
-       }
-
-       switch (buf->state) {
-       case VIDEOBUF_ERROR:
-               dprintk(1, "dqbuf: state is error\n");
-               break;
-       case VIDEOBUF_DONE:
-               dprintk(1, "dqbuf: state is done\n");
-               break;
-       default:
-               dprintk(1, "dqbuf: state invalid\n");
-               retval = -EINVAL;
-               goto done;
-       }
-       CALL(q, sync, q, buf);
-       videobuf_status(q, b, buf, q->type);
-       list_del(&buf->stream);
-       buf->state = VIDEOBUF_IDLE;
-       b->flags &= ~V4L2_BUF_FLAG_DONE;
-done:
-       videobuf_queue_unlock(q);
-       return retval;
-}
-EXPORT_SYMBOL_GPL(videobuf_dqbuf);
-
-int videobuf_streamon(struct videobuf_queue *q)
-{
-       struct videobuf_buffer *buf;
-       unsigned long flags = 0;
-       int retval;
-
-       videobuf_queue_lock(q);
-       retval = -EBUSY;
-       if (q->reading)
-               goto done;
-       retval = 0;
-       if (q->streaming)
-               goto done;
-       q->streaming = 1;
-       spin_lock_irqsave(q->irqlock, flags);
-       list_for_each_entry(buf, &q->stream, stream)
-               if (buf->state == VIDEOBUF_PREPARED)
-                       q->ops->buf_queue(q, buf);
-       spin_unlock_irqrestore(q->irqlock, flags);
-
-       wake_up_interruptible_sync(&q->wait);
-done:
-       videobuf_queue_unlock(q);
-       return retval;
-}
-EXPORT_SYMBOL_GPL(videobuf_streamon);
-
-/* Locking: Caller holds q->vb_lock */
-static int __videobuf_streamoff(struct videobuf_queue *q)
-{
-       if (!q->streaming)
-               return -EINVAL;
-
-       videobuf_queue_cancel(q);
-
-       return 0;
-}
-
-int videobuf_streamoff(struct videobuf_queue *q)
-{
-       int retval;
-
-       videobuf_queue_lock(q);
-       retval = __videobuf_streamoff(q);
-       videobuf_queue_unlock(q);
-
-       return retval;
-}
-EXPORT_SYMBOL_GPL(videobuf_streamoff);
-
-/* Locking: Caller holds q->vb_lock */
-static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
-                                     char __user *data,
-                                     size_t count, loff_t *ppos)
-{
-       enum v4l2_field field;
-       unsigned long flags = 0;
-       int retval;
-
-       MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
-       /* setup stuff */
-       q->read_buf = videobuf_alloc_vb(q);
-       if (NULL == q->read_buf)
-               return -ENOMEM;
-
-       q->read_buf->memory = V4L2_MEMORY_USERPTR;
-       q->read_buf->baddr  = (unsigned long)data;
-       q->read_buf->bsize  = count;
-
-       field = videobuf_next_field(q);
-       retval = q->ops->buf_prepare(q, q->read_buf, field);
-       if (0 != retval)
-               goto done;
-
-       /* start capture & wait */
-       spin_lock_irqsave(q->irqlock, flags);
-       q->ops->buf_queue(q, q->read_buf);
-       spin_unlock_irqrestore(q->irqlock, flags);
-       retval = videobuf_waiton(q, q->read_buf, 0, 0);
-       if (0 == retval) {
-               CALL(q, sync, q, q->read_buf);
-               if (VIDEOBUF_ERROR == q->read_buf->state)
-                       retval = -EIO;
-               else
-                       retval = q->read_buf->size;
-       }
-
-done:
-       /* cleanup */
-       q->ops->buf_release(q, q->read_buf);
-       kfree(q->read_buf);
-       q->read_buf = NULL;
-       return retval;
-}
-
-static int __videobuf_copy_to_user(struct videobuf_queue *q,
-                                  struct videobuf_buffer *buf,
-                                  char __user *data, size_t count,
-                                  int nonblocking)
-{
-       void *vaddr = CALLPTR(q, vaddr, buf);
-
-       /* copy to userspace */
-       if (count > buf->size - q->read_off)
-               count = buf->size - q->read_off;
-
-       if (copy_to_user(data, vaddr + q->read_off, count))
-               return -EFAULT;
-
-       return count;
-}
-
-static int __videobuf_copy_stream(struct videobuf_queue *q,
-                                 struct videobuf_buffer *buf,
-                                 char __user *data, size_t count, size_t pos,
-                                 int vbihack, int nonblocking)
-{
-       unsigned int *fc = CALLPTR(q, vaddr, buf);
-
-       if (vbihack) {
-               /* dirty, undocumented hack -- pass the frame counter
-                       * within the last four bytes of each vbi data block.
-                       * We need that one to maintain backward compatibility
-                       * to all vbi decoding software out there ... */
-               fc += (buf->size >> 2) - 1;
-               *fc = buf->field_count >> 1;
-               dprintk(1, "vbihack: %d\n", *fc);
-       }
-
-       /* copy stuff using the common method */
-       count = __videobuf_copy_to_user(q, buf, data, count, nonblocking);
-
-       if ((count == -EFAULT) && (pos == 0))
-               return -EFAULT;
-
-       return count;
-}
-
-ssize_t videobuf_read_one(struct videobuf_queue *q,
-                         char __user *data, size_t count, loff_t *ppos,
-                         int nonblocking)
-{
-       enum v4l2_field field;
-       unsigned long flags = 0;
-       unsigned size = 0, nbufs = 1;
-       int retval;
-
-       MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
-       videobuf_queue_lock(q);
-
-       q->ops->buf_setup(q, &nbufs, &size);
-
-       if (NULL == q->read_buf  &&
-           count >= size        &&
-           !nonblocking) {
-               retval = videobuf_read_zerocopy(q, data, count, ppos);
-               if (retval >= 0  ||  retval == -EIO)
-                       /* ok, all done */
-                       goto done;
-               /* fallback to kernel bounce buffer on failures */
-       }
-
-       if (NULL == q->read_buf) {
-               /* need to capture a new frame */
-               retval = -ENOMEM;
-               q->read_buf = videobuf_alloc_vb(q);
-
-               dprintk(1, "video alloc=0x%p\n", q->read_buf);
-               if (NULL == q->read_buf)
-                       goto done;
-               q->read_buf->memory = V4L2_MEMORY_USERPTR;
-               q->read_buf->bsize = count; /* preferred size */
-               field = videobuf_next_field(q);
-               retval = q->ops->buf_prepare(q, q->read_buf, field);
-
-               if (0 != retval) {
-                       kfree(q->read_buf);
-                       q->read_buf = NULL;
-                       goto done;
-               }
-
-               spin_lock_irqsave(q->irqlock, flags);
-               q->ops->buf_queue(q, q->read_buf);
-               spin_unlock_irqrestore(q->irqlock, flags);
-
-               q->read_off = 0;
-       }
-
-       /* wait until capture is done */
-       retval = videobuf_waiton(q, q->read_buf, nonblocking, 1);
-       if (0 != retval)
-               goto done;
-
-       CALL(q, sync, q, q->read_buf);
-
-       if (VIDEOBUF_ERROR == q->read_buf->state) {
-               /* catch I/O errors */
-               q->ops->buf_release(q, q->read_buf);
-               kfree(q->read_buf);
-               q->read_buf = NULL;
-               retval = -EIO;
-               goto done;
-       }
-
-       /* Copy to userspace */
-       retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking);
-       if (retval < 0)
-               goto done;
-
-       q->read_off += retval;
-       if (q->read_off == q->read_buf->size) {
-               /* all data copied, cleanup */
-               q->ops->buf_release(q, q->read_buf);
-               kfree(q->read_buf);
-               q->read_buf = NULL;
-       }
-
-done:
-       videobuf_queue_unlock(q);
-       return retval;
-}
-EXPORT_SYMBOL_GPL(videobuf_read_one);
-
-/* Locking: Caller holds q->vb_lock */
-static int __videobuf_read_start(struct videobuf_queue *q)
-{
-       enum v4l2_field field;
-       unsigned long flags = 0;
-       unsigned int count = 0, size = 0;
-       int err, i;
-
-       q->ops->buf_setup(q, &count, &size);
-       if (count < 2)
-               count = 2;
-       if (count > VIDEO_MAX_FRAME)
-               count = VIDEO_MAX_FRAME;
-       size = PAGE_ALIGN(size);
-
-       err = __videobuf_mmap_setup(q, count, size, V4L2_MEMORY_USERPTR);
-       if (err < 0)
-               return err;
-
-       count = err;
-
-       for (i = 0; i < count; i++) {
-               field = videobuf_next_field(q);
-               err = q->ops->buf_prepare(q, q->bufs[i], field);
-               if (err)
-                       return err;
-               list_add_tail(&q->bufs[i]->stream, &q->stream);
-       }
-       spin_lock_irqsave(q->irqlock, flags);
-       for (i = 0; i < count; i++)
-               q->ops->buf_queue(q, q->bufs[i]);
-       spin_unlock_irqrestore(q->irqlock, flags);
-       q->reading = 1;
-       return 0;
-}
-
-static void __videobuf_read_stop(struct videobuf_queue *q)
-{
-       int i;
-
-       videobuf_queue_cancel(q);
-       __videobuf_free(q);
-       INIT_LIST_HEAD(&q->stream);
-       for (i = 0; i < VIDEO_MAX_FRAME; i++) {
-               if (NULL == q->bufs[i])
-                       continue;
-               kfree(q->bufs[i]);
-               q->bufs[i] = NULL;
-       }
-       q->read_buf = NULL;
-}
-
-int videobuf_read_start(struct videobuf_queue *q)
-{
-       int rc;
-
-       videobuf_queue_lock(q);
-       rc = __videobuf_read_start(q);
-       videobuf_queue_unlock(q);
-
-       return rc;
-}
-EXPORT_SYMBOL_GPL(videobuf_read_start);
-
-void videobuf_read_stop(struct videobuf_queue *q)
-{
-       videobuf_queue_lock(q);
-       __videobuf_read_stop(q);
-       videobuf_queue_unlock(q);
-}
-EXPORT_SYMBOL_GPL(videobuf_read_stop);
-
-void videobuf_stop(struct videobuf_queue *q)
-{
-       videobuf_queue_lock(q);
-
-       if (q->streaming)
-               __videobuf_streamoff(q);
-
-       if (q->reading)
-               __videobuf_read_stop(q);
-
-       videobuf_queue_unlock(q);
-}
-EXPORT_SYMBOL_GPL(videobuf_stop);
-
-ssize_t videobuf_read_stream(struct videobuf_queue *q,
-                            char __user *data, size_t count, loff_t *ppos,
-                            int vbihack, int nonblocking)
-{
-       int rc, retval;
-       unsigned long flags = 0;
-
-       MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
-       dprintk(2, "%s\n", __func__);
-       videobuf_queue_lock(q);
-       retval = -EBUSY;
-       if (q->streaming)
-               goto done;
-       if (!q->reading) {
-               retval = __videobuf_read_start(q);
-               if (retval < 0)
-                       goto done;
-       }
-
-       retval = 0;
-       while (count > 0) {
-               /* get / wait for data */
-               if (NULL == q->read_buf) {
-                       q->read_buf = list_entry(q->stream.next,
-                                                struct videobuf_buffer,
-                                                stream);
-                       list_del(&q->read_buf->stream);
-                       q->read_off = 0;
-               }
-               rc = videobuf_waiton(q, q->read_buf, nonblocking, 1);
-               if (rc < 0) {
-                       if (0 == retval)
-                               retval = rc;
-                       break;
-               }
-
-               if (q->read_buf->state == VIDEOBUF_DONE) {
-                       rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count,
-                                       retval, vbihack, nonblocking);
-                       if (rc < 0) {
-                               retval = rc;
-                               break;
-                       }
-                       retval      += rc;
-                       count       -= rc;
-                       q->read_off += rc;
-               } else {
-                       /* some error */
-                       q->read_off = q->read_buf->size;
-                       if (0 == retval)
-                               retval = -EIO;
-               }
-
-               /* requeue buffer when done with copying */
-               if (q->read_off == q->read_buf->size) {
-                       list_add_tail(&q->read_buf->stream,
-                                     &q->stream);
-                       spin_lock_irqsave(q->irqlock, flags);
-                       q->ops->buf_queue(q, q->read_buf);
-                       spin_unlock_irqrestore(q->irqlock, flags);
-                       q->read_buf = NULL;
-               }
-               if (retval < 0)
-                       break;
-       }
-
-done:
-       videobuf_queue_unlock(q);
-       return retval;
-}
-EXPORT_SYMBOL_GPL(videobuf_read_stream);
-
-__poll_t videobuf_poll_stream(struct file *file,
-                             struct videobuf_queue *q,
-                             poll_table *wait)
-{
-       __poll_t req_events = poll_requested_events(wait);
-       struct videobuf_buffer *buf = NULL;
-       __poll_t rc = 0;
-
-       videobuf_queue_lock(q);
-       if (q->streaming) {
-               if (!list_empty(&q->stream))
-                       buf = list_entry(q->stream.next,
-                                        struct videobuf_buffer, stream);
-       } else if (req_events & (EPOLLIN | EPOLLRDNORM)) {
-               if (!q->reading)
-                       __videobuf_read_start(q);
-               if (!q->reading) {
-                       rc = EPOLLERR;
-               } else if (NULL == q->read_buf) {
-                       q->read_buf = list_entry(q->stream.next,
-                                                struct videobuf_buffer,
-                                                stream);
-                       list_del(&q->read_buf->stream);
-                       q->read_off = 0;
-               }
-               buf = q->read_buf;
-       }
-       if (buf)
-               poll_wait(file, &buf->done, wait);
-       else
-               rc = EPOLLERR;
-
-       if (0 == rc) {
-               if (buf->state == VIDEOBUF_DONE ||
-                   buf->state == VIDEOBUF_ERROR) {
-                       switch (q->type) {
-                       case V4L2_BUF_TYPE_VIDEO_OUTPUT:
-                       case V4L2_BUF_TYPE_VBI_OUTPUT:
-                       case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
-                       case V4L2_BUF_TYPE_SDR_OUTPUT:
-                               rc = EPOLLOUT | EPOLLWRNORM;
-                               break;
-                       default:
-                               rc = EPOLLIN | EPOLLRDNORM;
-                               break;
-                       }
-               }
-       }
-       videobuf_queue_unlock(q);
-       return rc;
-}
-EXPORT_SYMBOL_GPL(videobuf_poll_stream);
-
-int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
-{
-       int rc = -EINVAL;
-       int i;
-
-       MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
-
-       if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) {
-               dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
-               return -EINVAL;
-       }
-
-       videobuf_queue_lock(q);
-       for (i = 0; i < VIDEO_MAX_FRAME; i++) {
-               struct videobuf_buffer *buf = q->bufs[i];
-
-               if (buf && buf->memory == V4L2_MEMORY_MMAP &&
-                               buf->boff == (vma->vm_pgoff << PAGE_SHIFT)) {
-                       rc = CALL(q, mmap_mapper, q, buf, vma);
-                       break;
-               }
-       }
-       videobuf_queue_unlock(q);
-
-       return rc;
-}
-EXPORT_SYMBOL_GPL(videobuf_mmap_mapper);
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
deleted file mode 100644 (file)
index 4c2ec7a..0000000
+++ /dev/null
@@ -1,402 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * helper functions for physically contiguous capture buffers
- *
- * The functions support hardware lacking scatter gather support
- * (i.e. the buffers must be linear in physical memory)
- *
- * Copyright (c) 2008 Magnus Damm
- *
- * Based on videobuf-vmalloc.c,
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/pagemap.h>
-#include <linux/dma-mapping.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <media/videobuf-dma-contig.h>
-
-struct videobuf_dma_contig_memory {
-       u32 magic;
-       void *vaddr;
-       dma_addr_t dma_handle;
-       unsigned long size;
-};
-
-#define MAGIC_DC_MEM 0x0733ac61
-#define MAGIC_CHECK(is, should)                                                    \
-       if (unlikely((is) != (should))) {                                   \
-               pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
-               BUG();                                                      \
-       }
-
-static int __videobuf_dc_alloc(struct device *dev,
-                              struct videobuf_dma_contig_memory *mem,
-                              unsigned long size)
-{
-       mem->size = size;
-       mem->vaddr = dma_alloc_coherent(dev, mem->size, &mem->dma_handle,
-                                       GFP_KERNEL);
-       if (!mem->vaddr) {
-               dev_err(dev, "memory alloc size %ld failed\n", mem->size);
-               return -ENOMEM;
-       }
-
-       dev_dbg(dev, "dma mapped data is at %p (%ld)\n", mem->vaddr, mem->size);
-
-       return 0;
-}
-
-static void __videobuf_dc_free(struct device *dev,
-                              struct videobuf_dma_contig_memory *mem)
-{
-       dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle);
-
-       mem->vaddr = NULL;
-}
-
-static void videobuf_vm_open(struct vm_area_struct *vma)
-{
-       struct videobuf_mapping *map = vma->vm_private_data;
-
-       dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
-               map, map->count, vma->vm_start, vma->vm_end);
-
-       map->count++;
-}
-
-static void videobuf_vm_close(struct vm_area_struct *vma)
-{
-       struct videobuf_mapping *map = vma->vm_private_data;
-       struct videobuf_queue *q = map->q;
-       int i;
-
-       dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
-               map, map->count, vma->vm_start, vma->vm_end);
-
-       map->count--;
-       if (0 == map->count) {
-               struct videobuf_dma_contig_memory *mem;
-
-               dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
-               videobuf_queue_lock(q);
-
-               /* We need first to cancel streams, before unmapping */
-               if (q->streaming)
-                       videobuf_queue_cancel(q);
-
-               for (i = 0; i < VIDEO_MAX_FRAME; i++) {
-                       if (NULL == q->bufs[i])
-                               continue;
-
-                       if (q->bufs[i]->map != map)
-                               continue;
-
-                       mem = q->bufs[i]->priv;
-                       if (mem) {
-                               /* This callback is called only if kernel has
-                                  allocated memory and this memory is mmapped.
-                                  In this case, memory should be freed,
-                                  in order to do memory unmap.
-                                */
-
-                               MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
-
-                               /* vfree is not atomic - can't be
-                                  called with IRQ's disabled
-                                */
-                               dev_dbg(q->dev, "buf[%d] freeing %p\n",
-                                       i, mem->vaddr);
-
-                               __videobuf_dc_free(q->dev, mem);
-                               mem->vaddr = NULL;
-                       }
-
-                       q->bufs[i]->map = NULL;
-                       q->bufs[i]->baddr = 0;
-               }
-
-               kfree(map);
-
-               videobuf_queue_unlock(q);
-       }
-}
-
-static const struct vm_operations_struct videobuf_vm_ops = {
-       .open   = videobuf_vm_open,
-       .close  = videobuf_vm_close,
-};
-
-/**
- * videobuf_dma_contig_user_put() - reset pointer to user space buffer
- * @mem: per-buffer private videobuf-dma-contig data
- *
- * This function resets the user space pointer
- */
-static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
-{
-       mem->dma_handle = 0;
-       mem->size = 0;
-}
-
-/**
- * videobuf_dma_contig_user_get() - setup user space memory pointer
- * @mem: per-buffer private videobuf-dma-contig data
- * @vb: video buffer to map
- *
- * This function validates and sets up a pointer to user space memory.
- * Only physically contiguous pfn-mapped memory is accepted.
- *
- * Returns 0 if successful.
- */
-static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
-                                       struct videobuf_buffer *vb)
-{
-       unsigned long untagged_baddr = untagged_addr(vb->baddr);
-       struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
-       unsigned long prev_pfn, this_pfn;
-       unsigned long pages_done, user_address;
-       unsigned int offset;
-       int ret;
-
-       offset = untagged_baddr & ~PAGE_MASK;
-       mem->size = PAGE_ALIGN(vb->size + offset);
-       ret = -EINVAL;
-
-       mmap_read_lock(mm);
-
-       vma = find_vma(mm, untagged_baddr);
-       if (!vma)
-               goto out_up;
-
-       if ((untagged_baddr + mem->size) > vma->vm_end)
-               goto out_up;
-
-       pages_done = 0;
-       prev_pfn = 0; /* kill warning */
-       user_address = untagged_baddr;
-
-       while (pages_done < (mem->size >> PAGE_SHIFT)) {
-               ret = follow_pfn(vma, user_address, &this_pfn);
-               if (ret)
-                       break;
-
-               if (pages_done == 0)
-                       mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
-               else if (this_pfn != (prev_pfn + 1))
-                       ret = -EFAULT;
-
-               if (ret)
-                       break;
-
-               prev_pfn = this_pfn;
-               user_address += PAGE_SIZE;
-               pages_done++;
-       }
-
-out_up:
-       mmap_read_unlock(current->mm);
-
-       return ret;
-}
-
-static struct videobuf_buffer *__videobuf_alloc(size_t size)
-{
-       struct videobuf_dma_contig_memory *mem;
-       struct videobuf_buffer *vb;
-
-       vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
-       if (vb) {
-               vb->priv = ((char *)vb) + size;
-               mem = vb->priv;
-               mem->magic = MAGIC_DC_MEM;
-       }
-
-       return vb;
-}
-
-static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
-{
-       struct videobuf_dma_contig_memory *mem = buf->priv;
-
-       BUG_ON(!mem);
-       MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
-
-       return mem->vaddr;
-}
-
-static int __videobuf_iolock(struct videobuf_queue *q,
-                            struct videobuf_buffer *vb,
-                            struct v4l2_framebuffer *fbuf)
-{
-       struct videobuf_dma_contig_memory *mem = vb->priv;
-
-       BUG_ON(!mem);
-       MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
-
-       switch (vb->memory) {
-       case V4L2_MEMORY_MMAP:
-               dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
-
-               /* All handling should be done by __videobuf_mmap_mapper() */
-               if (!mem->vaddr) {
-                       dev_err(q->dev, "memory is not allocated/mmapped.\n");
-                       return -EINVAL;
-               }
-               break;
-       case V4L2_MEMORY_USERPTR:
-               dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
-
-               /* handle pointer from user space */
-               if (vb->baddr)
-                       return videobuf_dma_contig_user_get(mem, vb);
-
-               /* allocate memory for the read() method */
-               if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size)))
-                       return -ENOMEM;
-               break;
-       case V4L2_MEMORY_OVERLAY:
-       default:
-               dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n", __func__);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int __videobuf_mmap_mapper(struct videobuf_queue *q,
-                                 struct videobuf_buffer *buf,
-                                 struct vm_area_struct *vma)
-{
-       struct videobuf_dma_contig_memory *mem;
-       struct videobuf_mapping *map;
-       int retval;
-
-       dev_dbg(q->dev, "%s\n", __func__);
-
-       /* create mapping + update buffer list */
-       map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
-       if (!map)
-               return -ENOMEM;
-
-       buf->map = map;
-       map->q = q;
-
-       buf->baddr = vma->vm_start;
-
-       mem = buf->priv;
-       BUG_ON(!mem);
-       MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
-
-       if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize)))
-               goto error;
-
-       /* the "vm_pgoff" is just used in v4l2 to find the
-        * corresponding buffer data structure which is allocated
-        * earlier and it does not mean the offset from the physical
-        * buffer start address as usual. So set it to 0 to pass
-        * the sanity check in dma_mmap_coherent().
-        */
-       vma->vm_pgoff = 0;
-       retval = dma_mmap_coherent(q->dev, vma, mem->vaddr, mem->dma_handle,
-                                  mem->size);
-       if (retval) {
-               dev_err(q->dev, "mmap: remap failed with error %d. ",
-                       retval);
-               dma_free_coherent(q->dev, mem->size,
-                                 mem->vaddr, mem->dma_handle);
-               goto error;
-       }
-
-       vma->vm_ops = &videobuf_vm_ops;
-       vm_flags_set(vma, VM_DONTEXPAND);
-       vma->vm_private_data = map;
-
-       dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
-               map, q, vma->vm_start, vma->vm_end,
-               (long int)buf->bsize, vma->vm_pgoff, buf->i);
-
-       videobuf_vm_open(vma);
-
-       return 0;
-
-error:
-       kfree(map);
-       return -ENOMEM;
-}
-
-static struct videobuf_qtype_ops qops = {
-       .magic          = MAGIC_QTYPE_OPS,
-       .alloc_vb       = __videobuf_alloc,
-       .iolock         = __videobuf_iolock,
-       .mmap_mapper    = __videobuf_mmap_mapper,
-       .vaddr          = __videobuf_to_vaddr,
-};
-
-void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
-                                   const struct videobuf_queue_ops *ops,
-                                   struct device *dev,
-                                   spinlock_t *irqlock,
-                                   enum v4l2_buf_type type,
-                                   enum v4l2_field field,
-                                   unsigned int msize,
-                                   void *priv,
-                                   struct mutex *ext_lock)
-{
-       videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
-                                priv, &qops, ext_lock);
-}
-EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
-
-dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
-{
-       struct videobuf_dma_contig_memory *mem = buf->priv;
-
-       BUG_ON(!mem);
-       MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
-
-       return mem->dma_handle;
-}
-EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
-
-void videobuf_dma_contig_free(struct videobuf_queue *q,
-                             struct videobuf_buffer *buf)
-{
-       struct videobuf_dma_contig_memory *mem = buf->priv;
-
-       /* mmapped memory can't be freed here, otherwise mmapped region
-          would be released, while still needed. In this case, the memory
-          release should happen inside videobuf_vm_close().
-          So, it should free memory only if the memory were allocated for
-          read() operation.
-        */
-       if (buf->memory != V4L2_MEMORY_USERPTR)
-               return;
-
-       if (!mem)
-               return;
-
-       MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
-
-       /* handle user space pointer case */
-       if (buf->baddr) {
-               videobuf_dma_contig_user_put(mem);
-               return;
-       }
-
-       /* read() method */
-       if (mem->vaddr) {
-               __videobuf_dc_free(q->dev, mem);
-               mem->vaddr = NULL;
-       }
-}
-EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
-
-MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
-MODULE_AUTHOR("Magnus Damm");
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
deleted file mode 100644 (file)
index 405b89e..0000000
+++ /dev/null
@@ -1,681 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * helper functions for SG DMA video4linux capture buffers
- *
- * The functions expect the hardware being able to scatter gather
- * (i.e. the buffers are not linear in physical memory, but fragmented
- * into PAGE_SIZE chunks).  They also assume the driver does not need
- * to touch the video data.
- *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
- *
- * Highly based on video-buf written originally by:
- * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
- * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
- * (c) 2006 Ted Walther and John Sokol
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/sched/mm.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/pgtable.h>
-
-#include <linux/dma-mapping.h>
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
-#include <linux/scatterlist.h>
-#include <asm/page.h>
-
-#include <media/videobuf-dma-sg.h>
-
-#define MAGIC_DMABUF 0x19721112
-#define MAGIC_SG_MEM 0x17890714
-
-#define MAGIC_CHECK(is, should)                                                \
-       if (unlikely((is) != (should))) {                               \
-               printk(KERN_ERR "magic mismatch: %x (expected %x)\n",   \
-                               is, should);                            \
-               BUG();                                                  \
-       }
-
-static int debug;
-module_param(debug, int, 0644);
-
-MODULE_DESCRIPTION("helper module to manage video4linux dma sg buffers");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
-MODULE_LICENSE("GPL");
-
-#define dprintk(level, fmt, arg...)                                    \
-       if (debug >= level)                                             \
-               printk(KERN_DEBUG "vbuf-sg: " fmt , ## arg)
-
-/* --------------------------------------------------------------------- */
-
-/*
- * Return a scatterlist for some page-aligned vmalloc()'ed memory
- * block (NULL on errors).  Memory for the scatterlist is allocated
- * using kmalloc.  The caller must free the memory.
- */
-static struct scatterlist *videobuf_vmalloc_to_sg(unsigned char *virt,
-                                                 int nr_pages)
-{
-       struct scatterlist *sglist;
-       struct page *pg;
-       int i;
-
-       sglist = vzalloc(array_size(nr_pages, sizeof(*sglist)));
-       if (NULL == sglist)
-               return NULL;
-       sg_init_table(sglist, nr_pages);
-       for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
-               pg = vmalloc_to_page(virt);
-               if (NULL == pg)
-                       goto err;
-               BUG_ON(PageHighMem(pg));
-               sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
-       }
-       return sglist;
-
-err:
-       vfree(sglist);
-       return NULL;
-}
-
-/*
- * Return a scatterlist for a an array of userpages (NULL on errors).
- * Memory for the scatterlist is allocated using kmalloc.  The caller
- * must free the memory.
- */
-static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
-                                       int nr_pages, int offset, size_t size)
-{
-       struct scatterlist *sglist;
-       int i;
-
-       if (NULL == pages[0])
-               return NULL;
-       sglist = vmalloc(array_size(nr_pages, sizeof(*sglist)));
-       if (NULL == sglist)
-               return NULL;
-       sg_init_table(sglist, nr_pages);
-
-       if (PageHighMem(pages[0]))
-               /* DMA to highmem pages might not work */
-               goto highmem;
-       sg_set_page(&sglist[0], pages[0],
-                       min_t(size_t, PAGE_SIZE - offset, size), offset);
-       size -= min_t(size_t, PAGE_SIZE - offset, size);
-       for (i = 1; i < nr_pages; i++) {
-               if (NULL == pages[i])
-                       goto nopage;
-               if (PageHighMem(pages[i]))
-                       goto highmem;
-               sg_set_page(&sglist[i], pages[i], min_t(size_t, PAGE_SIZE, size), 0);
-               size -= min_t(size_t, PAGE_SIZE, size);
-       }
-       return sglist;
-
-nopage:
-       dprintk(2, "sgl: oops - no page\n");
-       vfree(sglist);
-       return NULL;
-
-highmem:
-       dprintk(2, "sgl: oops - highmem page\n");
-       vfree(sglist);
-       return NULL;
-}
-
-/* --------------------------------------------------------------------- */
-
-struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf)
-{
-       struct videobuf_dma_sg_memory *mem = buf->priv;
-       BUG_ON(!mem);
-
-       MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
-
-       return &mem->dma;
-}
-EXPORT_SYMBOL_GPL(videobuf_to_dma);
-
-static void videobuf_dma_init(struct videobuf_dmabuf *dma)
-{
-       memset(dma, 0, sizeof(*dma));
-       dma->magic = MAGIC_DMABUF;
-}
-
-static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma,
-                       int direction, unsigned long data, unsigned long size)
-{
-       unsigned int gup_flags = FOLL_LONGTERM;
-       unsigned long first, last;
-       int err;
-
-       dma->direction = direction;
-       switch (dma->direction) {
-       case DMA_FROM_DEVICE:
-               gup_flags |= FOLL_WRITE;
-               break;
-       case DMA_TO_DEVICE:
-               break;
-       default:
-               BUG();
-       }
-
-       first = (data          & PAGE_MASK) >> PAGE_SHIFT;
-       last  = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT;
-       dma->offset = data & ~PAGE_MASK;
-       dma->size = size;
-       dma->nr_pages = last-first+1;
-       dma->pages = kmalloc_array(dma->nr_pages, sizeof(struct page *),
-                                  GFP_KERNEL);
-       if (NULL == dma->pages)
-               return -ENOMEM;
-
-       dprintk(1, "init user [0x%lx+0x%lx => %lu pages]\n",
-               data, size, dma->nr_pages);
-
-       err = pin_user_pages(data & PAGE_MASK, dma->nr_pages, gup_flags,
-                            dma->pages);
-
-       if (err != dma->nr_pages) {
-               dma->nr_pages = (err >= 0) ? err : 0;
-               dprintk(1, "pin_user_pages: err=%d [%lu]\n", err,
-                       dma->nr_pages);
-               return err < 0 ? err : -EINVAL;
-       }
-       return 0;
-}
-
-static int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction,
-                          unsigned long data, unsigned long size)
-{
-       int ret;
-
-       mmap_read_lock(current->mm);
-       ret = videobuf_dma_init_user_locked(dma, direction, data, size);
-       mmap_read_unlock(current->mm);
-
-       return ret;
-}
-
-static int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction,
-                                   unsigned long nr_pages)
-{
-       int i;
-
-       dprintk(1, "init kernel [%lu pages]\n", nr_pages);
-
-       dma->direction = direction;
-       dma->vaddr_pages = kcalloc(nr_pages, sizeof(*dma->vaddr_pages),
-                                  GFP_KERNEL);
-       if (!dma->vaddr_pages)
-               return -ENOMEM;
-
-       dma->dma_addr = kcalloc(nr_pages, sizeof(*dma->dma_addr), GFP_KERNEL);
-       if (!dma->dma_addr) {
-               kfree(dma->vaddr_pages);
-               return -ENOMEM;
-       }
-       for (i = 0; i < nr_pages; i++) {
-               void *addr;
-
-               addr = dma_alloc_coherent(dma->dev, PAGE_SIZE,
-                                         &(dma->dma_addr[i]), GFP_KERNEL);
-               if (addr == NULL)
-                       goto out_free_pages;
-
-               dma->vaddr_pages[i] = virt_to_page(addr);
-       }
-       dma->vaddr = vmap(dma->vaddr_pages, nr_pages, VM_MAP | VM_IOREMAP,
-                         PAGE_KERNEL);
-       if (NULL == dma->vaddr) {
-               dprintk(1, "vmalloc_32(%lu pages) failed\n", nr_pages);
-               goto out_free_pages;
-       }
-
-       dprintk(1, "vmalloc is at addr %p, size=%lu\n",
-               dma->vaddr, nr_pages << PAGE_SHIFT);
-
-       memset(dma->vaddr, 0, nr_pages << PAGE_SHIFT);
-       dma->nr_pages = nr_pages;
-
-       return 0;
-out_free_pages:
-       while (i > 0) {
-               void *addr;
-
-               i--;
-               addr = page_address(dma->vaddr_pages[i]);
-               dma_free_coherent(dma->dev, PAGE_SIZE, addr, dma->dma_addr[i]);
-       }
-       kfree(dma->dma_addr);
-       dma->dma_addr = NULL;
-       kfree(dma->vaddr_pages);
-       dma->vaddr_pages = NULL;
-
-       return -ENOMEM;
-
-}
-
-static int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction,
-                             dma_addr_t addr, unsigned long nr_pages)
-{
-       dprintk(1, "init overlay [%lu pages @ bus 0x%lx]\n",
-               nr_pages, (unsigned long)addr);
-       dma->direction = direction;
-
-       if (0 == addr)
-               return -EINVAL;
-
-       dma->bus_addr = addr;
-       dma->nr_pages = nr_pages;
-
-       return 0;
-}
-
-static int videobuf_dma_map(struct device *dev, struct videobuf_dmabuf *dma)
-{
-       MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
-       BUG_ON(0 == dma->nr_pages);
-
-       if (dma->pages) {
-               dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages,
-                                                  dma->offset, dma->size);
-       }
-       if (dma->vaddr) {
-               dma->sglist = videobuf_vmalloc_to_sg(dma->vaddr,
-                                                    dma->nr_pages);
-       }
-       if (dma->bus_addr) {
-               dma->sglist = vmalloc(sizeof(*dma->sglist));
-               if (NULL != dma->sglist) {
-                       dma->sglen = 1;
-                       sg_dma_address(&dma->sglist[0]) = dma->bus_addr
-                                                       & PAGE_MASK;
-                       dma->sglist[0].offset = dma->bus_addr & ~PAGE_MASK;
-                       sg_dma_len(&dma->sglist[0]) = dma->nr_pages * PAGE_SIZE;
-               }
-       }
-       if (NULL == dma->sglist) {
-               dprintk(1, "scatterlist is NULL\n");
-               return -ENOMEM;
-       }
-       if (!dma->bus_addr) {
-               dma->sglen = dma_map_sg(dev, dma->sglist,
-                                       dma->nr_pages, dma->direction);
-               if (0 == dma->sglen) {
-                       printk(KERN_WARNING
-                              "%s: videobuf_map_sg failed\n", __func__);
-                       vfree(dma->sglist);
-                       dma->sglist = NULL;
-                       dma->sglen = 0;
-                       return -ENOMEM;
-               }
-       }
-
-       return 0;
-}
-
-int videobuf_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma)
-{
-       MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
-
-       if (!dma->sglen)
-               return 0;
-
-       dma_unmap_sg(dev, dma->sglist, dma->nr_pages, dma->direction);
-
-       vfree(dma->sglist);
-       dma->sglist = NULL;
-       dma->sglen = 0;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(videobuf_dma_unmap);
-
-int videobuf_dma_free(struct videobuf_dmabuf *dma)
-{
-       int i;
-       MAGIC_CHECK(dma->magic, MAGIC_DMABUF);
-       BUG_ON(dma->sglen);
-
-       if (dma->pages) {
-               unpin_user_pages_dirty_lock(dma->pages, dma->nr_pages,
-                                           dma->direction == DMA_FROM_DEVICE);
-               kfree(dma->pages);
-               dma->pages = NULL;
-       }
-
-       if (dma->dma_addr) {
-               for (i = 0; i < dma->nr_pages; i++) {
-                       void *addr;
-
-                       addr = page_address(dma->vaddr_pages[i]);
-                       dma_free_coherent(dma->dev, PAGE_SIZE, addr,
-                                         dma->dma_addr[i]);
-               }
-               kfree(dma->dma_addr);
-               dma->dma_addr = NULL;
-               kfree(dma->vaddr_pages);
-               dma->vaddr_pages = NULL;
-               vunmap(dma->vaddr);
-               dma->vaddr = NULL;
-       }
-
-       if (dma->bus_addr)
-               dma->bus_addr = 0;
-       dma->direction = DMA_NONE;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(videobuf_dma_free);
-
-/* --------------------------------------------------------------------- */
-
-static void videobuf_vm_open(struct vm_area_struct *vma)
-{
-       struct videobuf_mapping *map = vma->vm_private_data;
-
-       dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map,
-               map->count, vma->vm_start, vma->vm_end);
-
-       map->count++;
-}
-
-static void videobuf_vm_close(struct vm_area_struct *vma)
-{
-       struct videobuf_mapping *map = vma->vm_private_data;
-       struct videobuf_queue *q = map->q;
-       struct videobuf_dma_sg_memory *mem;
-       int i;
-
-       dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map,
-               map->count, vma->vm_start, vma->vm_end);
-
-       map->count--;
-       if (0 == map->count) {
-               dprintk(1, "munmap %p q=%p\n", map, q);
-               videobuf_queue_lock(q);
-               for (i = 0; i < VIDEO_MAX_FRAME; i++) {
-                       if (NULL == q->bufs[i])
-                               continue;
-                       mem = q->bufs[i]->priv;
-                       if (!mem)
-                               continue;
-
-                       MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
-
-                       if (q->bufs[i]->map != map)
-                               continue;
-                       q->bufs[i]->map   = NULL;
-                       q->bufs[i]->baddr = 0;
-                       q->ops->buf_release(q, q->bufs[i]);
-               }
-               videobuf_queue_unlock(q);
-               kfree(map);
-       }
-}
-
-/*
- * Get a anonymous page for the mapping.  Make sure we can DMA to that
- * memory location with 32bit PCI devices (i.e. don't use highmem for
- * now ...).  Bounce buffers don't work very well for the data rates
- * video capture has.
- */
-static vm_fault_t videobuf_vm_fault(struct vm_fault *vmf)
-{
-       struct vm_area_struct *vma = vmf->vma;
-       struct page *page;
-
-       dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n",
-               vmf->address, vma->vm_start, vma->vm_end);
-
-       page = alloc_page(GFP_USER | __GFP_DMA32);
-       if (!page)
-               return VM_FAULT_OOM;
-       clear_user_highpage(page, vmf->address);
-       vmf->page = page;
-
-       return 0;
-}
-
-static const struct vm_operations_struct videobuf_vm_ops = {
-       .open   = videobuf_vm_open,
-       .close  = videobuf_vm_close,
-       .fault  = videobuf_vm_fault,
-};
-
-/* ---------------------------------------------------------------------
- * SG handlers for the generic methods
- */
-
-/* Allocated area consists on 3 parts:
-       struct video_buffer
-       struct <driver>_buffer (cx88_buffer, saa7134_buf, ...)
-       struct videobuf_dma_sg_memory
- */
-
-static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
-{
-       struct videobuf_dma_sg_memory *mem;
-       struct videobuf_buffer *vb;
-
-       vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
-       if (!vb)
-               return vb;
-
-       mem = vb->priv = ((char *)vb) + size;
-       mem->magic = MAGIC_SG_MEM;
-
-       videobuf_dma_init(&mem->dma);
-
-       dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
-               __func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
-               mem, (long)sizeof(*mem));
-
-       return vb;
-}
-
-static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
-{
-       struct videobuf_dma_sg_memory *mem = buf->priv;
-       BUG_ON(!mem);
-
-       MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
-
-       return mem->dma.vaddr;
-}
-
-static int __videobuf_iolock(struct videobuf_queue *q,
-                            struct videobuf_buffer *vb,
-                            struct v4l2_framebuffer *fbuf)
-{
-       struct videobuf_dma_sg_memory *mem = vb->priv;
-       unsigned long pages;
-       dma_addr_t bus;
-       int err;
-
-       BUG_ON(!mem);
-
-       MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
-
-       if (!mem->dma.dev)
-               mem->dma.dev = q->dev;
-       else
-               WARN_ON(mem->dma.dev != q->dev);
-
-       switch (vb->memory) {
-       case V4L2_MEMORY_MMAP:
-       case V4L2_MEMORY_USERPTR:
-               if (0 == vb->baddr) {
-                       /* no userspace addr -- kernel bounce buffer */
-                       pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
-                       err = videobuf_dma_init_kernel(&mem->dma,
-                                                      DMA_FROM_DEVICE,
-                                                      pages);
-                       if (0 != err)
-                               return err;
-               } else if (vb->memory == V4L2_MEMORY_USERPTR) {
-                       /* dma directly to userspace */
-                       err = videobuf_dma_init_user(&mem->dma,
-                                                    DMA_FROM_DEVICE,
-                                                    vb->baddr, vb->bsize);
-                       if (0 != err)
-                               return err;
-               } else {
-                       /* NOTE: HACK: videobuf_iolock on V4L2_MEMORY_MMAP
-                       buffers can only be called from videobuf_qbuf
-                       we take current->mm->mmap_lock there, to prevent
-                       locking inversion, so don't take it here */
-
-                       err = videobuf_dma_init_user_locked(&mem->dma,
-                                                     DMA_FROM_DEVICE,
-                                                     vb->baddr, vb->bsize);
-                       if (0 != err)
-                               return err;
-               }
-               break;
-       case V4L2_MEMORY_OVERLAY:
-               if (NULL == fbuf)
-                       return -EINVAL;
-               /* FIXME: need sanity checks for vb->boff */
-               /*
-                * Using a double cast to avoid compiler warnings when
-                * building for PAE. Compiler doesn't like direct casting
-                * of a 32 bit ptr to 64 bit integer.
-                */
-               bus   = (dma_addr_t)(unsigned long)fbuf->base + vb->boff;
-               pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT;
-               err = videobuf_dma_init_overlay(&mem->dma, DMA_FROM_DEVICE,
-                                               bus, pages);
-               if (0 != err)
-                       return err;
-               break;
-       default:
-               BUG();
-       }
-       err = videobuf_dma_map(q->dev, &mem->dma);
-       if (0 != err)
-               return err;
-
-       return 0;
-}
-
-static int __videobuf_sync(struct videobuf_queue *q,
-                          struct videobuf_buffer *buf)
-{
-       struct videobuf_dma_sg_memory *mem = buf->priv;
-       BUG_ON(!mem || !mem->dma.sglen);
-
-       MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
-       MAGIC_CHECK(mem->dma.magic, MAGIC_DMABUF);
-
-       dma_sync_sg_for_cpu(q->dev, mem->dma.sglist,
-                           mem->dma.nr_pages, mem->dma.direction);
-
-       return 0;
-}
-
-static int __videobuf_mmap_mapper(struct videobuf_queue *q,
-                                 struct videobuf_buffer *buf,
-                                 struct vm_area_struct *vma)
-{
-       struct videobuf_dma_sg_memory *mem = buf->priv;
-       struct videobuf_mapping *map;
-       unsigned int first, last, size = 0, i;
-       int retval;
-
-       retval = -EINVAL;
-
-       BUG_ON(!mem);
-       MAGIC_CHECK(mem->magic, MAGIC_SG_MEM);
-
-       /* look for first buffer to map */
-       for (first = 0; first < VIDEO_MAX_FRAME; first++) {
-               if (buf == q->bufs[first]) {
-                       size = PAGE_ALIGN(q->bufs[first]->bsize);
-                       break;
-               }
-       }
-
-       /* paranoia, should never happen since buf is always valid. */
-       if (!size) {
-               dprintk(1, "mmap app bug: offset invalid [offset=0x%lx]\n",
-                               (vma->vm_pgoff << PAGE_SHIFT));
-               goto done;
-       }
-
-       last = first;
-
-       /* create mapping + update buffer list */
-       retval = -ENOMEM;
-       map = kmalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
-       if (NULL == map)
-               goto done;
-
-       size = 0;
-       for (i = first; i <= last; i++) {
-               if (NULL == q->bufs[i])
-                       continue;
-               q->bufs[i]->map   = map;
-               q->bufs[i]->baddr = vma->vm_start + size;
-               size += PAGE_ALIGN(q->bufs[i]->bsize);
-       }
-
-       map->count    = 1;
-       map->q        = q;
-       vma->vm_ops   = &videobuf_vm_ops;
-       /* using shared anonymous pages */
-       vm_flags_mod(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_IO);
-       vma->vm_private_data = map;
-       dprintk(1, "mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n",
-               map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last);
-       retval = 0;
-
-done:
-       return retval;
-}
-
-static struct videobuf_qtype_ops sg_ops = {
-       .magic        = MAGIC_QTYPE_OPS,
-
-       .alloc_vb     = __videobuf_alloc_vb,
-       .iolock       = __videobuf_iolock,
-       .sync         = __videobuf_sync,
-       .mmap_mapper  = __videobuf_mmap_mapper,
-       .vaddr        = __videobuf_to_vaddr,
-};
-
-void *videobuf_sg_alloc(size_t size)
-{
-       struct videobuf_queue q;
-
-       /* Required to make generic handler to call __videobuf_alloc */
-       q.int_ops = &sg_ops;
-
-       q.msize = size;
-
-       return videobuf_alloc_vb(&q);
-}
-EXPORT_SYMBOL_GPL(videobuf_sg_alloc);
-
-void videobuf_queue_sg_init(struct videobuf_queue *q,
-                        const struct videobuf_queue_ops *ops,
-                        struct device *dev,
-                        spinlock_t *irqlock,
-                        enum v4l2_buf_type type,
-                        enum v4l2_field field,
-                        unsigned int msize,
-                        void *priv,
-                        struct mutex *ext_lock)
-{
-       videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
-                                priv, &sg_ops, ext_lock);
-}
-EXPORT_SYMBOL_GPL(videobuf_queue_sg_init);
-
diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c
deleted file mode 100644 (file)
index 85c7090..0000000
+++ /dev/null
@@ -1,326 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * helper functions for vmalloc video4linux capture buffers
- *
- * The functions expect the hardware being able to scatter gather
- * (i.e. the buffers are not linear in physical memory, but fragmented
- * into PAGE_SIZE chunks).  They also assume the driver does not need
- * to touch the video data.
- *
- * (c) 2007 Mauro Carvalho Chehab <mchehab@kernel.org>
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/pgtable.h>
-
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
-#include <asm/page.h>
-
-#include <media/videobuf-vmalloc.h>
-
-#define MAGIC_DMABUF   0x17760309
-#define MAGIC_VMAL_MEM 0x18221223
-
-#define MAGIC_CHECK(is, should)                                                \
-       if (unlikely((is) != (should))) {                               \
-               printk(KERN_ERR "magic mismatch: %x (expected %x)\n",   \
-                               is, should);                            \
-               BUG();                                                  \
-       }
-
-static int debug;
-module_param(debug, int, 0644);
-
-MODULE_DESCRIPTION("helper module to manage video4linux vmalloc buffers");
-MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
-MODULE_LICENSE("GPL");
-
-#define dprintk(level, fmt, arg...)                                    \
-       if (debug >= level)                                             \
-               printk(KERN_DEBUG "vbuf-vmalloc: " fmt , ## arg)
-
-
-/***************************************************************************/
-
-static void videobuf_vm_open(struct vm_area_struct *vma)
-{
-       struct videobuf_mapping *map = vma->vm_private_data;
-
-       dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
-               map->count, vma->vm_start, vma->vm_end);
-
-       map->count++;
-}
-
-static void videobuf_vm_close(struct vm_area_struct *vma)
-{
-       struct videobuf_mapping *map = vma->vm_private_data;
-       struct videobuf_queue *q = map->q;
-       int i;
-
-       dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
-               map->count, vma->vm_start, vma->vm_end);
-
-       map->count--;
-       if (0 == map->count) {
-               struct videobuf_vmalloc_memory *mem;
-
-               dprintk(1, "munmap %p q=%p\n", map, q);
-               videobuf_queue_lock(q);
-
-               /* We need first to cancel streams, before unmapping */
-               if (q->streaming)
-                       videobuf_queue_cancel(q);
-
-               for (i = 0; i < VIDEO_MAX_FRAME; i++) {
-                       if (NULL == q->bufs[i])
-                               continue;
-
-                       if (q->bufs[i]->map != map)
-                               continue;
-
-                       mem = q->bufs[i]->priv;
-                       if (mem) {
-                               /* This callback is called only if kernel has
-                                  allocated memory and this memory is mmapped.
-                                  In this case, memory should be freed,
-                                  in order to do memory unmap.
-                                */
-
-                               MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
-
-                               /* vfree is not atomic - can't be
-                                  called with IRQ's disabled
-                                */
-                               dprintk(1, "%s: buf[%d] freeing (%p)\n",
-                                       __func__, i, mem->vaddr);
-
-                               vfree(mem->vaddr);
-                               mem->vaddr = NULL;
-                       }
-
-                       q->bufs[i]->map   = NULL;
-                       q->bufs[i]->baddr = 0;
-               }
-
-               kfree(map);
-
-               videobuf_queue_unlock(q);
-       }
-
-       return;
-}
-
-static const struct vm_operations_struct videobuf_vm_ops = {
-       .open     = videobuf_vm_open,
-       .close    = videobuf_vm_close,
-};
-
-/* ---------------------------------------------------------------------
- * vmalloc handlers for the generic methods
- */
-
-/* Allocated area consists on 3 parts:
-       struct video_buffer
-       struct <driver>_buffer (cx88_buffer, saa7134_buf, ...)
-       struct videobuf_dma_sg_memory
- */
-
-static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
-{
-       struct videobuf_vmalloc_memory *mem;
-       struct videobuf_buffer *vb;
-
-       vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
-       if (!vb)
-               return vb;
-
-       mem = vb->priv = ((char *)vb) + size;
-       mem->magic = MAGIC_VMAL_MEM;
-
-       dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n",
-               __func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb),
-               mem, (long)sizeof(*mem));
-
-       return vb;
-}
-
-static int __videobuf_iolock(struct videobuf_queue *q,
-                            struct videobuf_buffer *vb,
-                            struct v4l2_framebuffer *fbuf)
-{
-       struct videobuf_vmalloc_memory *mem = vb->priv;
-       int pages;
-
-       BUG_ON(!mem);
-
-       MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
-
-       switch (vb->memory) {
-       case V4L2_MEMORY_MMAP:
-               dprintk(1, "%s memory method MMAP\n", __func__);
-
-               /* All handling should be done by __videobuf_mmap_mapper() */
-               if (!mem->vaddr) {
-                       printk(KERN_ERR "memory is not allocated/mmapped.\n");
-                       return -EINVAL;
-               }
-               break;
-       case V4L2_MEMORY_USERPTR:
-               pages = PAGE_ALIGN(vb->size);
-
-               dprintk(1, "%s memory method USERPTR\n", __func__);
-
-               if (vb->baddr) {
-                       printk(KERN_ERR "USERPTR is currently not supported\n");
-                       return -EINVAL;
-               }
-
-               /* The only USERPTR currently supported is the one needed for
-                * read() method.
-                */
-
-               mem->vaddr = vmalloc_user(pages);
-               if (!mem->vaddr) {
-                       printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
-                       return -ENOMEM;
-               }
-               dprintk(1, "vmalloc is at addr %p (%d pages)\n",
-                       mem->vaddr, pages);
-               break;
-       case V4L2_MEMORY_OVERLAY:
-       default:
-               dprintk(1, "%s memory method OVERLAY/unknown\n", __func__);
-
-               /* Currently, doesn't support V4L2_MEMORY_OVERLAY */
-               printk(KERN_ERR "Memory method currently unsupported.\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int __videobuf_mmap_mapper(struct videobuf_queue *q,
-                                 struct videobuf_buffer *buf,
-                                 struct vm_area_struct *vma)
-{
-       struct videobuf_vmalloc_memory *mem;
-       struct videobuf_mapping *map;
-       int retval, pages;
-
-       dprintk(1, "%s\n", __func__);
-
-       /* create mapping + update buffer list */
-       map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
-       if (NULL == map)
-               return -ENOMEM;
-
-       buf->map = map;
-       map->q     = q;
-
-       buf->baddr = vma->vm_start;
-
-       mem = buf->priv;
-       BUG_ON(!mem);
-       MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
-
-       pages = PAGE_ALIGN(vma->vm_end - vma->vm_start);
-       mem->vaddr = vmalloc_user(pages);
-       if (!mem->vaddr) {
-               printk(KERN_ERR "vmalloc (%d pages) failed\n", pages);
-               goto error;
-       }
-       dprintk(1, "vmalloc is at addr %p (%d pages)\n", mem->vaddr, pages);
-
-       /* Try to remap memory */
-       retval = remap_vmalloc_range(vma, mem->vaddr, 0);
-       if (retval < 0) {
-               printk(KERN_ERR "mmap: remap failed with error %d. ", retval);
-               vfree(mem->vaddr);
-               goto error;
-       }
-
-       vma->vm_ops          = &videobuf_vm_ops;
-       vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
-       vma->vm_private_data = map;
-
-       dprintk(1, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
-               map, q, vma->vm_start, vma->vm_end,
-               (long int)buf->bsize,
-               vma->vm_pgoff, buf->i);
-
-       videobuf_vm_open(vma);
-
-       return 0;
-
-error:
-       mem = NULL;
-       kfree(map);
-       return -ENOMEM;
-}
-
-static struct videobuf_qtype_ops qops = {
-       .magic        = MAGIC_QTYPE_OPS,
-
-       .alloc_vb     = __videobuf_alloc_vb,
-       .iolock       = __videobuf_iolock,
-       .mmap_mapper  = __videobuf_mmap_mapper,
-       .vaddr        = videobuf_to_vmalloc,
-};
-
-void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
-                        const struct videobuf_queue_ops *ops,
-                        struct device *dev,
-                        spinlock_t *irqlock,
-                        enum v4l2_buf_type type,
-                        enum v4l2_field field,
-                        unsigned int msize,
-                        void *priv,
-                        struct mutex *ext_lock)
-{
-       videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
-                                priv, &qops, ext_lock);
-}
-EXPORT_SYMBOL_GPL(videobuf_queue_vmalloc_init);
-
-void *videobuf_to_vmalloc(struct videobuf_buffer *buf)
-{
-       struct videobuf_vmalloc_memory *mem = buf->priv;
-       BUG_ON(!mem);
-       MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
-
-       return mem->vaddr;
-}
-EXPORT_SYMBOL_GPL(videobuf_to_vmalloc);
-
-void videobuf_vmalloc_free(struct videobuf_buffer *buf)
-{
-       struct videobuf_vmalloc_memory *mem = buf->priv;
-
-       /* mmapped memory can't be freed here, otherwise mmapped region
-          would be released, while still needed. In this case, the memory
-          release should happen inside videobuf_vm_close().
-          So, it should free memory only if the memory were allocated for
-          read() operation.
-        */
-       if ((buf->memory != V4L2_MEMORY_USERPTR) || buf->baddr)
-               return;
-
-       if (!mem)
-               return;
-
-       MAGIC_CHECK(mem->magic, MAGIC_VMAL_MEM);
-
-       vfree(mem->vaddr);
-       mem->vaddr = NULL;
-
-       return;
-}
-EXPORT_SYMBOL_GPL(videobuf_vmalloc_free);
-
index 67d6e70b4eab11e14800629a5479a3f864f2ff90..a083921a8968ba8471b440fe1259e00ac4f3c812 100644 (file)
@@ -979,35 +979,6 @@ static int tegra_mc_probe(struct platform_device *pdev)
                }
        }
 
-       if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && !mc->soc->smmu) {
-               mc->gart = tegra_gart_probe(&pdev->dev, mc);
-               if (IS_ERR(mc->gart)) {
-                       dev_err(&pdev->dev, "failed to probe GART: %ld\n",
-                               PTR_ERR(mc->gart));
-                       mc->gart = NULL;
-               }
-       }
-
-       return 0;
-}
-
-static int __maybe_unused tegra_mc_suspend(struct device *dev)
-{
-       struct tegra_mc *mc = dev_get_drvdata(dev);
-
-       if (mc->soc->ops && mc->soc->ops->suspend)
-               return mc->soc->ops->suspend(mc);
-
-       return 0;
-}
-
-static int __maybe_unused tegra_mc_resume(struct device *dev)
-{
-       struct tegra_mc *mc = dev_get_drvdata(dev);
-
-       if (mc->soc->ops && mc->soc->ops->resume)
-               return mc->soc->ops->resume(mc);
-
        return 0;
 }
 
@@ -1020,15 +991,10 @@ static void tegra_mc_sync_state(struct device *dev)
                icc_sync_state(dev);
 }
 
-static const struct dev_pm_ops tegra_mc_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(tegra_mc_suspend, tegra_mc_resume)
-};
-
 static struct platform_driver tegra_mc_driver = {
        .driver = {
                .name = "tegra-mc",
                .of_match_table = tegra_mc_of_match,
-               .pm = &tegra_mc_pm_ops,
                .suppress_bind_attrs = true,
                .sync_state = tegra_mc_sync_state,
        },
index 544bfd216a220bac539a5044066e4d41ad074271..aa4b97d5e73236f2c5185430d06a6fcf69f32c09 100644 (file)
@@ -688,32 +688,6 @@ static int tegra20_mc_probe(struct tegra_mc *mc)
        return 0;
 }
 
-static int tegra20_mc_suspend(struct tegra_mc *mc)
-{
-       int err;
-
-       if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && mc->gart) {
-               err = tegra_gart_suspend(mc->gart);
-               if (err < 0)
-                       return err;
-       }
-
-       return 0;
-}
-
-static int tegra20_mc_resume(struct tegra_mc *mc)
-{
-       int err;
-
-       if (IS_ENABLED(CONFIG_TEGRA_IOMMU_GART) && mc->gart) {
-               err = tegra_gart_resume(mc->gart);
-               if (err < 0)
-                       return err;
-       }
-
-       return 0;
-}
-
 static irqreturn_t tegra20_mc_handle_irq(int irq, void *data)
 {
        struct tegra_mc *mc = data;
@@ -789,8 +763,6 @@ static irqreturn_t tegra20_mc_handle_irq(int irq, void *data)
 
 static const struct tegra_mc_ops tegra20_mc_ops = {
        .probe = tegra20_mc_probe,
-       .suspend = tegra20_mc_suspend,
-       .resume = tegra20_mc_resume,
        .handle_irq = tegra20_mc_handle_irq,
 };
 
index dbbf7db4ff2f49b885bd113b0d232cbf6715e3de..f61a80597a22d01fd7741f0491f088545b97cb29 100644 (file)
@@ -92,7 +92,7 @@ struct at24_data {
         * them for us.
         */
        u8 bank_addr_shift;
-       struct regmap *client_regmaps[];
+       struct regmap *client_regmaps[] __counted_by(num_addresses);
 };
 
 /*
@@ -191,9 +191,13 @@ AT24_CHIP_DATA(at24_data_24c16, 16384 / 8, 0);
 AT24_CHIP_DATA(at24_data_24cs16, 16,
        AT24_FLAG_SERIAL | AT24_FLAG_READONLY);
 AT24_CHIP_DATA(at24_data_24c32, 32768 / 8, AT24_FLAG_ADDR16);
+/* M24C32-D Additional Write lockable page (M24C32-D order codes) */
+AT24_CHIP_DATA(at24_data_24c32d_wlp, 32, AT24_FLAG_ADDR16);
 AT24_CHIP_DATA(at24_data_24cs32, 16,
        AT24_FLAG_ADDR16 | AT24_FLAG_SERIAL | AT24_FLAG_READONLY);
 AT24_CHIP_DATA(at24_data_24c64, 65536 / 8, AT24_FLAG_ADDR16);
+/* M24C64-D Additional Write lockable page (M24C64-D order codes) */
+AT24_CHIP_DATA(at24_data_24c64d_wlp, 32, AT24_FLAG_ADDR16);
 AT24_CHIP_DATA(at24_data_24cs64, 16,
        AT24_FLAG_ADDR16 | AT24_FLAG_SERIAL | AT24_FLAG_READONLY);
 AT24_CHIP_DATA(at24_data_24c128, 131072 / 8, AT24_FLAG_ADDR16);
@@ -222,8 +226,10 @@ static const struct i2c_device_id at24_ids[] = {
        { "24c16",      (kernel_ulong_t)&at24_data_24c16 },
        { "24cs16",     (kernel_ulong_t)&at24_data_24cs16 },
        { "24c32",      (kernel_ulong_t)&at24_data_24c32 },
+       { "24c32d-wl",  (kernel_ulong_t)&at24_data_24c32d_wlp },
        { "24cs32",     (kernel_ulong_t)&at24_data_24cs32 },
        { "24c64",      (kernel_ulong_t)&at24_data_24c64 },
+       { "24c64-wl",   (kernel_ulong_t)&at24_data_24c64d_wlp },
        { "24cs64",     (kernel_ulong_t)&at24_data_24cs64 },
        { "24c128",     (kernel_ulong_t)&at24_data_24c128 },
        { "24c256",     (kernel_ulong_t)&at24_data_24c256 },
@@ -252,8 +258,10 @@ static const struct of_device_id at24_of_match[] = {
        { .compatible = "atmel,24c16",          .data = &at24_data_24c16 },
        { .compatible = "atmel,24cs16",         .data = &at24_data_24cs16 },
        { .compatible = "atmel,24c32",          .data = &at24_data_24c32 },
+       { .compatible = "atmel,24c32d-wl",      .data = &at24_data_24c32d_wlp },
        { .compatible = "atmel,24cs32",         .data = &at24_data_24cs32 },
        { .compatible = "atmel,24c64",          .data = &at24_data_24c64 },
+       { .compatible = "atmel,24c64d-wl",      .data = &at24_data_24c64d_wlp },
        { .compatible = "atmel,24cs64",         .data = &at24_data_24cs64 },
        { .compatible = "atmel,24c128",         .data = &at24_data_24c128 },
        { .compatible = "atmel,24c256",         .data = &at24_data_24c256 },
@@ -509,32 +517,6 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count)
        return 0;
 }
 
-static const struct at24_chip_data *at24_get_chip_data(struct device *dev)
-{
-       struct device_node *of_node = dev->of_node;
-       const struct at24_chip_data *cdata;
-       const struct i2c_device_id *id;
-
-       id = i2c_match_id(at24_ids, to_i2c_client(dev));
-
-       /*
-        * The I2C core allows OF nodes compatibles to match against the
-        * I2C device ID table as a fallback, so check not only if an OF
-        * node is present but also if it matches an OF device ID entry.
-        */
-       if (of_node && of_match_device(at24_of_match, dev))
-               cdata = of_device_get_match_data(dev);
-       else if (id)
-               cdata = (void *)id->driver_data;
-       else
-               cdata = acpi_device_get_match_data(dev);
-
-       if (!cdata)
-               return ERR_PTR(-ENODEV);
-
-       return cdata;
-}
-
 static int at24_make_dummy_client(struct at24_data *at24, unsigned int index,
                                  struct i2c_client *base_client,
                                  struct regmap_config *regmap_config)
@@ -601,9 +583,9 @@ static int at24_probe(struct i2c_client *client)
        i2c_fn_block = i2c_check_functionality(client->adapter,
                                               I2C_FUNC_SMBUS_WRITE_I2C_BLOCK);
 
-       cdata = at24_get_chip_data(dev);
-       if (IS_ERR(cdata))
-               return PTR_ERR(cdata);
+       cdata = i2c_get_match_data(client);
+       if (!cdata)
+               return -ENODEV;
 
        err = device_property_read_u32(dev, "pagesize", &page_size);
        if (err)
index fc28714ae3a610e44c361e8c608a70148fe02f91..6a33889d0902af7430d0d51451148787e8cbacf0 100644 (file)
@@ -68,12 +68,20 @@ static void lkdtm_CFI_FORWARD_PROTO(void)
 #define no_pac_addr(addr)      \
        ((__force __typeof__(addr))((uintptr_t)(addr) | PAGE_OFFSET))
 
+#ifdef CONFIG_RISCV
+/* https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#frame-pointer-convention */
+#define FRAME_RA_OFFSET                (-1)
+#else
+#define FRAME_RA_OFFSET                1
+#endif
+
 /* The ultimate ROP gadget. */
 static noinline __no_ret_protection
 void set_return_addr_unchecked(unsigned long *expected, unsigned long *addr)
 {
        /* Use of volatile is to make sure final write isn't seen as a dead store. */
-       unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
+       unsigned long * volatile *ret_addr =
+               (unsigned long **)__builtin_frame_address(0) + FRAME_RA_OFFSET;
 
        /* Make sure we've found the right place on the stack before writing it. */
        if (no_pac_addr(*ret_addr) == expected)
@@ -88,7 +96,8 @@ static noinline
 void set_return_addr(unsigned long *expected, unsigned long *addr)
 {
        /* Use of volatile is to make sure final write isn't seen as a dead store. */
-       unsigned long * volatile *ret_addr = (unsigned long **)__builtin_frame_address(0) + 1;
+       unsigned long * volatile *ret_addr =
+               (unsigned long **)__builtin_frame_address(0) + FRAME_RA_OFFSET;
 
        /* Make sure we've found the right place on the stack before writing it. */
        if (no_pac_addr(*ret_addr) == expected)
index 3a8f27c3e310a5cd7dea54cac77774166b561122..152dfe593c43a55c2bbd3468a62b8c2504661962 100644 (file)
@@ -2381,8 +2381,10 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
                        }
                        ret = mmc_blk_cqe_issue_flush(mq, req);
                        break;
-               case REQ_OP_READ:
                case REQ_OP_WRITE:
+                       card->written_flag = true;
+                       fallthrough;
+               case REQ_OP_READ:
                        if (host->cqe_enabled)
                                ret = mmc_blk_cqe_issue_rw_rq(mq, req);
                        else
index 4edf9057fa79d320ef24dd47f066f1eb32c47600..b7754a1b8d9788c7f672da0ce8d443cf3cd40968 100644 (file)
@@ -280,4 +280,8 @@ static inline int mmc_card_broken_sd_cache(const struct mmc_card *c)
        return c->quirks & MMC_QUIRK_BROKEN_SD_CACHE;
 }
 
+static inline int mmc_card_broken_cache_flush(const struct mmc_card *c)
+{
+       return c->quirks & MMC_QUIRK_BROKEN_CACHE_FLUSH;
+}
 #endif
index 8180983bd4029de769a6bf331e2b0a907266cbb4..705942edacc6a8b77e8208b910360758ebccd2c0 100644 (file)
@@ -104,7 +104,7 @@ static int mmc_decode_cid(struct mmc_card *card)
        case 3: /* MMC v3.1 - v3.3 */
        case 4: /* MMC v4 */
                card->cid.manfid        = UNSTUFF_BITS(resp, 120, 8);
-               card->cid.oemid         = UNSTUFF_BITS(resp, 104, 8);
+               card->cid.oemid         = UNSTUFF_BITS(resp, 104, 16);
                card->cid.prod_name[0]  = UNSTUFF_BITS(resp, 96, 8);
                card->cid.prod_name[1]  = UNSTUFF_BITS(resp, 88, 8);
                card->cid.prod_name[2]  = UNSTUFF_BITS(resp, 80, 8);
@@ -2086,13 +2086,17 @@ static int _mmc_flush_cache(struct mmc_host *host)
 {
        int err = 0;
 
+       if (mmc_card_broken_cache_flush(host->card) && !host->card->written_flag)
+               return 0;
+
        if (_mmc_cache_enabled(host)) {
                err = mmc_switch(host->card, EXT_CSD_CMD_SET_NORMAL,
                                 EXT_CSD_FLUSH_CACHE, 1,
                                 CACHE_FLUSH_TIMEOUT_MS);
                if (err)
-                       pr_err("%s: cache flush error %d\n",
-                              mmc_hostname(host), err);
+                       pr_err("%s: cache flush error %d\n", mmc_hostname(host), err);
+               else
+                       host->card->written_flag = false;
        }
 
        return err;
index 32b64b564fb1fd774b1546d0f21c918647e71159..cca71867bc4ad6075d24ed617aa83fbe9752f6dd 100644 (file)
@@ -110,11 +110,12 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
                  MMC_QUIRK_TRIM_BROKEN),
 
        /*
-        * Micron MTFC4GACAJCN-1M advertises TRIM but it does not seems to
-        * support being used to offload WRITE_ZEROES.
+        * Micron MTFC4GACAJCN-1M supports TRIM but does not appear to support
+        * WRITE_ZEROES offloading. It also supports caching, but the cache can
+        * only be flushed after a write has occurred.
         */
        MMC_FIXUP("Q2J54A", CID_MANFID_MICRON, 0x014e, add_quirk_mmc,
-                 MMC_QUIRK_TRIM_BROKEN),
+                 MMC_QUIRK_TRIM_BROKEN | MMC_QUIRK_BROKEN_CACHE_FLUSH),
 
        /*
         * Kingston EMMC04G-M627 advertises TRIM but it does not seems to
index d83261e857a5d674a129a80dd7d7098e623d4b15..d8a991b349a823487554394a489edb7a2dd85f66 100644 (file)
@@ -28,6 +28,9 @@
 #define PCI_GLI_9750_PM_CTRL   0xFC
 #define   PCI_GLI_9750_PM_STATE          GENMASK(1, 0)
 
+#define PCI_GLI_9750_CORRERR_MASK                              0x214
+#define   PCI_GLI_9750_CORRERR_MASK_REPLAY_TIMER_TIMEOUT         BIT(12)
+
 #define SDHCI_GLI_9750_CFG2          0x848
 #define   SDHCI_GLI_9750_CFG2_L1DLY    GENMASK(28, 24)
 #define   GLI_9750_CFG2_L1DLY_VALUE    0x1F
 #define PCI_GLI_9755_PM_CTRL     0xFC
 #define   PCI_GLI_9755_PM_STATE    GENMASK(1, 0)
 
+#define PCI_GLI_9755_CORRERR_MASK                              0x214
+#define   PCI_GLI_9755_CORRERR_MASK_REPLAY_TIMER_TIMEOUT         BIT(12)
+
 #define SDHCI_GLI_9767_GM_BURST_SIZE                   0x510
 #define   SDHCI_GLI_9767_GM_BURST_SIZE_AXI_ALWAYS_SET    BIT(8)
 
@@ -561,6 +567,11 @@ static void gl9750_hw_setting(struct sdhci_host *host)
        value &= ~PCI_GLI_9750_PM_STATE;
        pci_write_config_dword(pdev, PCI_GLI_9750_PM_CTRL, value);
 
+       /* mask the replay timer timeout of AER */
+       pci_read_config_dword(pdev, PCI_GLI_9750_CORRERR_MASK, &value);
+       value |= PCI_GLI_9750_CORRERR_MASK_REPLAY_TIMER_TIMEOUT;
+       pci_write_config_dword(pdev, PCI_GLI_9750_CORRERR_MASK, value);
+
        gl9750_wt_off(host);
 }
 
@@ -770,6 +781,11 @@ static void gl9755_hw_setting(struct sdhci_pci_slot *slot)
        value &= ~PCI_GLI_9755_PM_STATE;
        pci_write_config_dword(pdev, PCI_GLI_9755_PM_CTRL, value);
 
+       /* mask the replay timer timeout of AER */
+       pci_read_config_dword(pdev, PCI_GLI_9755_CORRERR_MASK, &value);
+       value |= PCI_GLI_9755_CORRERR_MASK_REPLAY_TIMER_TIMEOUT;
+       pci_write_config_dword(pdev, PCI_GLI_9755_CORRERR_MASK, value);
+
        gl9755_wt_off(pdev);
 }
 
index c125485ba80e9c0bf2d8107ad0e3ad2bb41c780c..967bd2dfcda1b257f518fcde33566a9aefe20a79 100644 (file)
@@ -598,7 +598,7 @@ static int sdhci_am654_get_otap_delay(struct sdhci_host *host,
                return 0;
        }
 
-       for (i = MMC_TIMING_MMC_HS; i <= MMC_TIMING_MMC_HS400; i++) {
+       for (i = MMC_TIMING_LEGACY; i <= MMC_TIMING_MMC_HS400; i++) {
 
                ret = device_property_read_u32(dev, td[i].otap_binding,
                                               &sdhci_am654->otap_del_sel[i]);
index de3f443f5fdcc15879d07a0138193c69666d8e6c..fd67c0682b38ae0c4a4ed2dac2fe1f66e5af5f60 100644 (file)
@@ -2309,6 +2309,7 @@ static int vub300_probe(struct usb_interface *interface,
                vub300->read_only =
                        (0x0010 & vub300->system_port_status.port_flags) ? 1 : 0;
        } else {
+               retval = -EINVAL;
                goto error5;
        }
        usb_set_intfdata(interface, vub300);
index 11b06fefaa0e297ef82e88401bedd48a1c95e657..c10693ba265baef4be26f38e705755d333794c1c 100644 (file)
@@ -422,9 +422,25 @@ read_pri_intelext(struct map_info *map, __u16 adr)
                extra_size = 0;
 
                /* Protection Register info */
-               if (extp->NumProtectionFields)
+               if (extp->NumProtectionFields) {
+                       struct cfi_intelext_otpinfo *otp =
+                               (struct cfi_intelext_otpinfo *)&extp->extra[0];
+
                        extra_size += (extp->NumProtectionFields - 1) *
-                                     sizeof(struct cfi_intelext_otpinfo);
+                               sizeof(struct cfi_intelext_otpinfo);
+
+                       if (extp_size >= sizeof(*extp) + extra_size) {
+                               int i;
+
+                               /* Do some byteswapping if necessary */
+                               for (i = 0; i < extp->NumProtectionFields - 1; i++) {
+                                       otp->ProtRegAddr = le32_to_cpu(otp->ProtRegAddr);
+                                       otp->FactGroups = le16_to_cpu(otp->FactGroups);
+                                       otp->UserGroups = le16_to_cpu(otp->UserGroups);
+                                       otp++;
+                               }
+                       }
+               }
        }
 
        if (extp->MinorVersion >= '1') {
index e8dd6496927ef659d2fc1dc65006e62705910f12..f9d3e32ef8e968a0da31534a27354d030184d539 100644 (file)
@@ -70,12 +70,16 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
        mtd->_read = mapram_read;
        mtd->_write = mapram_write;
        mtd->_panic_write = mapram_write;
-       mtd->_point = mapram_point;
        mtd->_sync = mapram_nop;
-       mtd->_unpoint = mapram_unpoint;
        mtd->flags = MTD_CAP_RAM;
        mtd->writesize = 1;
 
+       /* Disable direct access when NO_XIP is set */
+       if (map->phys != NO_XIP) {
+               mtd->_point = mapram_point;
+               mtd->_unpoint = mapram_unpoint;
+       }
+
        mtd->erasesize = PAGE_SIZE;
        while(mtd->size & (mtd->erasesize - 1))
                mtd->erasesize >>= 1;
index 3af50db8b21b4f274e65a7cffa0acc6441fc7ade..74f559bf8dfb0a9d52dd73a21e0f18d58f1e4f3f 100644 (file)
@@ -357,19 +357,17 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int bcm47xxsflash_bcma_remove(struct platform_device *pdev)
+static void bcm47xxsflash_bcma_remove(struct platform_device *pdev)
 {
        struct bcm47xxsflash *b47s = platform_get_drvdata(pdev);
 
        mtd_device_unregister(&b47s->mtd);
        iounmap(b47s->window);
-
-       return 0;
 }
 
 static struct platform_driver bcma_sflash_driver = {
        .probe  = bcm47xxsflash_bcma_probe,
-       .remove = bcm47xxsflash_bcma_remove,
+       .remove_new = bcm47xxsflash_bcma_remove,
        .driver = {
                .name = "bcma_sflash",
        },
index 22e73dd6118b937ca1b052bc90e446f620cffadf..a2b643af70194c21fd8b0e9d8059101d0adc6ddf 100644 (file)
@@ -2046,7 +2046,7 @@ err_probe:
  *
  * Returns 0
  */
-static int docg3_release(struct platform_device *pdev)
+static void docg3_release(struct platform_device *pdev)
 {
        struct docg3_cascade *cascade = platform_get_drvdata(pdev);
        struct docg3 *docg3 = cascade->floors[0]->priv;
@@ -2058,7 +2058,6 @@ static int docg3_release(struct platform_device *pdev)
                        doc_release_device(cascade->floors[floor]);
 
        bch_free(docg3->cascade->bch);
-       return 0;
 }
 
 #ifdef CONFIG_OF
@@ -2076,7 +2075,7 @@ static struct platform_driver g3_driver = {
        },
        .suspend        = docg3_suspend,
        .resume         = docg3_resume,
-       .remove         = docg3_release,
+       .remove_new     = docg3_release,
 };
 
 module_platform_driver_probe(g3_driver, docg3_probe);
index 208bd4d871f42cc3e6adf0beaf4769d50b1736ac..1bf192f229d7126f125c346852aeafa65a0a5feb 100644 (file)
@@ -388,20 +388,18 @@ static int phram_probe(struct platform_device *pdev)
                               PAGE_SIZE);
 }
 
-static int phram_remove(struct platform_device *pdev)
+static void phram_remove(struct platform_device *pdev)
 {
        struct phram_mtd_list *phram = platform_get_drvdata(pdev);
 
        mtd_device_unregister(&phram->mtd);
        phram_unmap(phram);
        kfree(phram);
-
-       return 0;
 }
 
 static struct platform_driver phram_driver = {
        .probe          = phram_probe,
-       .remove         = phram_remove,
+       .remove_new     = phram_remove,
        .driver         = {
                .name           = "phram",
                .of_match_table = of_match_ptr(phram_of_match),
index 36e060386e59dfee5225bb4d4df151d47778df5b..66044f4f5bade81e4cd8dcf698fa2500a13fbbf0 100644 (file)
@@ -265,14 +265,12 @@ static int powernv_flash_probe(struct platform_device *pdev)
  *
  * Returns 0
  */
-static int powernv_flash_release(struct platform_device *pdev)
+static void powernv_flash_release(struct platform_device *pdev)
 {
        struct powernv_flash *data = dev_get_drvdata(&(pdev->dev));
 
        /* All resources should be freed automatically */
        WARN_ON(mtd_device_unregister(&data->mtd));
-
-       return 0;
 }
 
 static const struct of_device_id powernv_flash_match[] = {
@@ -285,7 +283,7 @@ static struct platform_driver powernv_flash_driver = {
                .name           = "powernv_flash",
                .of_match_table = powernv_flash_match,
        },
-       .remove         = powernv_flash_release,
+       .remove_new     = powernv_flash_release,
        .probe          = powernv_flash_probe,
 };
 
index 0a35e5236ae593753e79e8c078f52c2c7c92dbd9..1574296d47e2f46a884b79f75a5a6615f791ae9c 100644 (file)
@@ -1031,7 +1031,7 @@ err:
  *
  * free all allocations and delete the partitions.
  */
-static int spear_smi_remove(struct platform_device *pdev)
+static void spear_smi_remove(struct platform_device *pdev)
 {
        struct spear_smi *dev;
        struct spear_snor_flash *flash;
@@ -1048,8 +1048,6 @@ static int spear_smi_remove(struct platform_device *pdev)
                /* clean up mtd stuff */
                WARN_ON(mtd_device_unregister(&flash->mtd));
        }
-
-       return 0;
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -1095,7 +1093,7 @@ static struct platform_driver spear_smi_driver = {
                .pm = &spear_smi_pm_ops,
        },
        .probe = spear_smi_probe,
-       .remove = spear_smi_remove,
+       .remove_new = spear_smi_remove,
 };
 module_platform_driver(spear_smi_driver);
 
index 95530cbbb1e0d2d2444938ed7fb47dd8fcd03812..3268de5fc7802c73d45b499af341d41fda646a78 100644 (file)
@@ -2097,13 +2097,11 @@ static int stfsm_probe(struct platform_device *pdev)
        return mtd_device_register(&fsm->mtd, NULL, 0);
 }
 
-static int stfsm_remove(struct platform_device *pdev)
+static void stfsm_remove(struct platform_device *pdev)
 {
        struct stfsm *fsm = platform_get_drvdata(pdev);
 
        WARN_ON(mtd_device_unregister(&fsm->mtd));
-
-       return 0;
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -2134,7 +2132,7 @@ MODULE_DEVICE_TABLE(of, stfsm_match);
 
 static struct platform_driver stfsm_driver = {
        .probe          = stfsm_probe,
-       .remove         = stfsm_remove,
+       .remove_new     = stfsm_remove,
        .driver         = {
                .name   = "st-spi-fsm",
                .of_match_table = stfsm_match,
index a6161ce340d4eb5b26a9d236721d7a6c0ac93048..dbe3eb361cca28a8b9480f5f314714fbd0769ed6 100644 (file)
@@ -229,7 +229,7 @@ disable_mux:
        return ret;
 }
 
-static int am654_hbmc_remove(struct platform_device *pdev)
+static void am654_hbmc_remove(struct platform_device *pdev)
 {
        struct am654_hbmc_priv *priv = platform_get_drvdata(pdev);
        struct am654_hbmc_device_priv *dev_priv = priv->hbdev.priv;
@@ -241,8 +241,6 @@ static int am654_hbmc_remove(struct platform_device *pdev)
 
        if (dev_priv->rx_chan)
                dma_release_channel(dev_priv->rx_chan);
-
-       return 0;
 }
 
 static const struct of_device_id am654_hbmc_dt_ids[] = {
@@ -256,7 +254,7 @@ MODULE_DEVICE_TABLE(of, am654_hbmc_dt_ids);
 
 static struct platform_driver am654_hbmc_platform_driver = {
        .probe = am654_hbmc_probe,
-       .remove = am654_hbmc_remove,
+       .remove_new = am654_hbmc_remove,
        .driver = {
                .name = "hbmc-am654",
                .of_match_table = am654_hbmc_dt_ids,
index ef32fca5f785e3766f3e08c8ddb84f9d7dc43856..b22aa57119f238765266f5c89c12c17981b0e03f 100644 (file)
@@ -154,20 +154,18 @@ out_disable_rpm:
        return error;
 }
 
-static int rpcif_hb_remove(struct platform_device *pdev)
+static void rpcif_hb_remove(struct platform_device *pdev)
 {
        struct rpcif_hyperbus *hyperbus = platform_get_drvdata(pdev);
 
        hyperbus_unregister_device(&hyperbus->hbdev);
 
        pm_runtime_disable(hyperbus->rpc.dev);
-
-       return 0;
 }
 
 static struct platform_driver rpcif_platform_driver = {
        .probe  = rpcif_hb_probe,
-       .remove = rpcif_hb_remove,
+       .remove_new = rpcif_hb_remove,
        .driver = {
                .name   = "rpc-if-hyperflash",
        },
index f4e5174b2449c1111a756956fdeefb315facb6f7..9169e1155dbbd7abb01f092866e5eb456039314b 100644 (file)
@@ -476,11 +476,9 @@ static int lpddr2_nvm_probe(struct platform_device *pdev)
 /*
  * lpddr2_nvm driver remove method
  */
-static int lpddr2_nvm_remove(struct platform_device *pdev)
+static void lpddr2_nvm_remove(struct platform_device *pdev)
 {
        WARN_ON(mtd_device_unregister(dev_get_drvdata(&pdev->dev)));
-
-       return 0;
 }
 
 /* Initialize platform_driver data structure for lpddr2_nvm */
@@ -489,7 +487,7 @@ static struct platform_driver lpddr2_nvm_drv = {
                .name   = "lpddr2_nvm",
        },
        .probe          = lpddr2_nvm_probe,
-       .remove         = lpddr2_nvm_remove,
+       .remove_new     = lpddr2_nvm_remove,
 };
 
 module_platform_driver(lpddr2_nvm_drv);
index 3c3939bc2dadc627dba29fbf1085bcdd76068e78..14e36ae71958f9b3436e1b868a9843981304c947 100644 (file)
@@ -61,7 +61,7 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
                mtd->_point = lpddr_point;
                mtd->_unpoint = lpddr_unpoint;
        }
-       mtd->size = 1 << lpddr->qinfo->DevSizeShift;
+       mtd->size = 1ULL << lpddr->qinfo->DevSizeShift;
        mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
        mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
 
index a1da1c8973c02a7f0a808a62872f00e5b5c6a47e..124b13c5d74758a423fbdbe5e09d402db88cd657 100644 (file)
@@ -166,8 +166,7 @@ err_destroy:
        return err;
 }
 
-static int
-ltq_mtd_remove(struct platform_device *pdev)
+static void ltq_mtd_remove(struct platform_device *pdev)
 {
        struct ltq_mtd *ltq_mtd = platform_get_drvdata(pdev);
 
@@ -175,7 +174,6 @@ ltq_mtd_remove(struct platform_device *pdev)
                mtd_device_unregister(ltq_mtd->mtd);
                map_destroy(ltq_mtd->mtd);
        }
-       return 0;
 }
 
 static const struct of_device_id ltq_mtd_match[] = {
@@ -186,7 +184,7 @@ MODULE_DEVICE_TABLE(of, ltq_mtd_match);
 
 static struct platform_driver ltq_mtd_driver = {
        .probe = ltq_mtd_probe,
-       .remove = ltq_mtd_remove,
+       .remove_new = ltq_mtd_remove,
        .driver = {
                .name = "ltq-nor",
                .of_match_table = ltq_mtd_match,
index fc872133928247de91cb810122c8fa805e569c4b..746a27d15d44006c810edc186ee1cdaffc4a2907 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/slab.h>
 #include <linux/device.h>
 #include <linux/platform_device.h>
+#include <linux/property.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/map.h>
 #include <linux/mtd/partitions.h>
@@ -37,7 +38,7 @@
 #include <linux/mtd/concat.h>
 #include <linux/mtd/cfi_endian.h>
 #include <linux/io.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/pm_runtime.h>
 #include <linux/gpio/consumer.h>
 
@@ -62,7 +63,7 @@ struct physmap_flash_info {
        unsigned int            win_order;
 };
 
-static int physmap_flash_remove(struct platform_device *dev)
+static void physmap_flash_remove(struct platform_device *dev)
 {
        struct physmap_flash_info *info;
        struct physmap_flash_data *physmap_data;
@@ -88,7 +89,6 @@ static int physmap_flash_remove(struct platform_device *dev)
 
        pm_runtime_put(&dev->dev);
        pm_runtime_disable(&dev->dev);
-       return 0;
 }
 
 static void physmap_set_vpp(struct map_info *map, int state)
@@ -296,14 +296,9 @@ static const char * const *of_get_part_probes(struct platform_device *dev)
 static const char *of_select_probe_type(struct platform_device *dev)
 {
        struct device_node *dp = dev->dev.of_node;
-       const struct of_device_id *match;
        const char *probe_type;
 
-       match = of_match_device(of_flash_match, &dev->dev);
-       if (!match)
-               return NULL;
-
-       probe_type = match->data;
+       probe_type = device_get_match_data(&dev->dev);
        if (probe_type)
                return probe_type;
 
@@ -626,7 +621,7 @@ static void physmap_flash_shutdown(struct platform_device *dev)
 
 static struct platform_driver physmap_flash_driver = {
        .probe          = physmap_flash_probe,
-       .remove         = physmap_flash_remove,
+       .remove_new     = physmap_flash_remove,
        .shutdown       = physmap_flash_shutdown,
        .driver         = {
                .name   = "physmap-flash",
index 4c921dce739664bc8d94f37a58d483b06ef852dc..8b736f029f817f4138d4299d2ae66b342e6d83e6 100644 (file)
@@ -65,14 +65,14 @@ static inline void platram_setrw(struct platram_info *info, int to)
  * called to remove the device from the driver's control
 */
 
-static int platram_remove(struct platform_device *pdev)
+static void platram_remove(struct platform_device *pdev)
 {
        struct platram_info *info = to_platram_info(pdev);
 
        dev_dbg(&pdev->dev, "removing device\n");
 
        if (info == NULL)
-               return 0;
+               return;
 
        if (info->mtd) {
                mtd_device_unregister(info->mtd);
@@ -84,8 +84,6 @@ static int platram_remove(struct platform_device *pdev)
        platram_setrw(info, PLATRAM_RO);
 
        kfree(info);
-
-       return 0;
 }
 
 /* platram_probe
@@ -207,7 +205,7 @@ MODULE_ALIAS("platform:mtd-ram");
 
 static struct platform_driver platram_driver = {
        .probe          = platram_probe,
-       .remove         = platram_remove,
+       .remove_new     = platram_remove,
        .driver         = {
                .name   = "mtd-ram",
        },
index 62a5bf41a6d72b4eb67c1ea3811abbf2db53af48..f2a2d4706f1fb9befdc8c8ff552d6745309194c0 100644 (file)
@@ -98,7 +98,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int pxa2xx_flash_remove(struct platform_device *dev)
+static void pxa2xx_flash_remove(struct platform_device *dev)
 {
        struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
 
@@ -109,7 +109,6 @@ static int pxa2xx_flash_remove(struct platform_device *dev)
        if (info->map.cached)
                iounmap(info->map.cached);
        kfree(info);
-       return 0;
 }
 
 #ifdef CONFIG_PM
@@ -129,7 +128,7 @@ static struct platform_driver pxa2xx_flash_driver = {
                .name           = "pxa2xx-flash",
        },
        .probe          = pxa2xx_flash_probe,
-       .remove         = pxa2xx_flash_remove,
+       .remove_new     = pxa2xx_flash_remove,
        .shutdown       = pxa2xx_flash_shutdown,
 };
 
index d3d4e987c163e23cd5e72acd1a1a2368ff10b0c9..d4ce2376d33f4a1a438c71490a10ee23e82bae5b 100644 (file)
@@ -285,19 +285,17 @@ static int sa1100_mtd_probe(struct platform_device *pdev)
        return err;
 }
 
-static int sa1100_mtd_remove(struct platform_device *pdev)
+static void sa1100_mtd_remove(struct platform_device *pdev)
 {
        struct sa_info *info = platform_get_drvdata(pdev);
        struct flash_platform_data *plat = dev_get_platdata(&pdev->dev);
 
        sa1100_destroy(info, plat);
-
-       return 0;
 }
 
 static struct platform_driver sa1100_mtd_driver = {
        .probe          = sa1100_mtd_probe,
-       .remove         = sa1100_mtd_remove,
+       .remove_new     = sa1100_mtd_remove,
        .driver         = {
                .name   = "sa1100-mtd",
        },
index 2bfdf1b7e18a10fff90e8ac3ae9847a2a77624dd..f58cfb15d6e85e55a339faef62cae0c5eaa3719d 100644 (file)
@@ -118,7 +118,7 @@ static int uflash_probe(struct platform_device *op)
        return uflash_devinit(op, dp);
 }
 
-static int uflash_remove(struct platform_device *op)
+static void uflash_remove(struct platform_device *op)
 {
        struct uflash_dev *up = dev_get_drvdata(&op->dev);
 
@@ -132,8 +132,6 @@ static int uflash_remove(struct platform_device *op)
        }
 
        kfree(up);
-
-       return 0;
 }
 
 static const struct of_device_id uflash_match[] = {
@@ -151,7 +149,7 @@ static struct platform_driver uflash_driver = {
                .of_match_table = uflash_match,
        },
        .probe          = uflash_probe,
-       .remove         = uflash_remove,
+       .remove_new     = uflash_remove,
 };
 
 module_platform_driver(uflash_driver);
index 74dd1b74008d8cce8eedda8422e5f5c0e4783be8..bb0759ca12f1c9df8052f770b375ab9b80c423e4 100644 (file)
@@ -1506,6 +1506,8 @@ int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
        ret = mtd_read_oob(mtd, from, &ops);
        *retlen = ops.retlen;
 
+       WARN_ON_ONCE(*retlen != len && mtd_is_bitflip_or_eccerr(ret));
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(mtd_read);
index 23483db8f30c90e17f832c9cb7cab7bdb2610ce6..6811a714349d7f1014f7d8717c2db72e0b9b258c 100644 (file)
@@ -426,7 +426,11 @@ int add_mtd_partitions(struct mtd_info *parent,
                mtd_add_partition_attrs(child);
 
                /* Look for subpartitions */
-               parse_mtd_partitions(child, parts[i].types, NULL);
+               ret = parse_mtd_partitions(child, parts[i].types, NULL);
+               if (ret < 0) {
+                       pr_err("Failed to parse subpartitions: %d\n", ret);
+                       goto err_del_partitions;
+               }
 
                cur_offset = child->part.offset + child->part.size;
        }
index a492051c46f5968cd4b0564a2eb125250ee36fc4..2ff1d2b13e3c3a4f8f5ab4b6b18ab801b1844662 100644 (file)
@@ -481,7 +481,7 @@ static int anfc_read_page_hw_ecc(struct nand_chip *chip, u8 *buf,
                }
 
                bf = nand_check_erased_ecc_chunk(raw_buf, chip->ecc.size,
-                                                NULL, 0, NULL, 0,
+                                                anand->hw_ecc, chip->ecc.bytes, NULL, 0,
                                                 chip->ecc.strength);
                if (bf > 0) {
                        mtd->ecc_stats.corrected += bf;
index 3f494f7c7ecbdb8e464d873909dfe70d370ea57e..4cb478bbee4a480ebb32113c9fe4eb8250101796 100644 (file)
@@ -165,7 +165,7 @@ struct atmel_nand {
        struct atmel_pmecc_user *pmecc;
        struct gpio_desc *cdgpio;
        int numcs;
-       struct atmel_nand_cs cs[];
+       struct atmel_nand_cs cs[] __counted_by(numcs);
 };
 
 static inline struct atmel_nand *to_atmel_nand(struct nand_chip *chip)
index 034ec564c2edb0a592ad4f4ac5331af69ae29ca7..04f84d87c657dd2a451221556ed0f30f75052983 100644 (file)
 #include <linux/module.h>
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/rawnand.h>
-#include <linux/of_device.h>
 #include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
 #include <linux/slab.h>
 
 /*
@@ -526,7 +528,7 @@ struct cdns_nand_chip {
        /* ECC strength index. */
        u8 corr_str_idx;
 
-       u8 cs[];
+       u8 cs[] __counted_by(nsels);
 };
 
 struct ecc_info {
@@ -2995,15 +2997,11 @@ static int cadence_nand_dt_probe(struct platform_device *ofdev)
        struct cadence_nand_dt *dt;
        struct cdns_nand_ctrl *cdns_ctrl;
        int ret;
-       const struct of_device_id *of_id;
        const struct cadence_nand_dt_devdata *devdata;
        u32 val;
 
-       of_id = of_match_device(cadence_nand_dt_ids, &ofdev->dev);
-       if (of_id) {
-               ofdev->id_entry = of_id->data;
-               devdata = of_id->data;
-       } else {
+       devdata = device_get_match_data(&ofdev->dev);
+       if (!devdata) {
                pr_err("Failed to find the right device id.\n");
                return -ENOMEM;
        }
index ac46eb7956ce81b328ac7bcdd7f85659ee13bf84..5f2fab022fc5cc193c577bde7a02327339b66e9c 100644 (file)
@@ -328,7 +328,7 @@ struct denali_chip {
        struct nand_chip chip;
        struct list_head node;
        unsigned int nsels;
-       struct denali_chip_sel sels[];
+       struct denali_chip_sel sels[] __counted_by(nsels);
 };
 
 /**
index c816dc1372454ca87bb60ba6a0449cbb7fc16373..0e7dd9ca4b2bfe155e6d58ac77ac0a61739dac68 100644 (file)
@@ -46,7 +46,7 @@ struct ingenic_nfc {
        struct nand_controller controller;
        unsigned int num_banks;
        struct list_head chips;
-       struct ingenic_nand_cs cs[];
+       struct ingenic_nand_cs cs[] __counted_by(num_banks);
 };
 
 struct ingenic_nand {
index cb5d88f42297b2a0b45188f8baec981bde4c7c3c..f0ad2308f6d50319cfd9e836503d9f27063c3aed 100644 (file)
@@ -619,6 +619,11 @@ static int ebu_nand_probe(struct platform_device *pdev)
        ebu_host->cs_num = cs;
 
        resname = devm_kasprintf(dev, GFP_KERNEL, "nand_cs%d", cs);
+       if (!resname) {
+               ret = -ENOMEM;
+               goto err_of_node_put;
+       }
+
        ebu_host->cs[cs].chipaddr = devm_platform_ioremap_resource_byname(pdev,
                                                                          resname);
        if (IS_ERR(ebu_host->cs[cs].chipaddr)) {
@@ -649,6 +654,11 @@ static int ebu_nand_probe(struct platform_device *pdev)
        }
 
        resname = devm_kasprintf(dev, GFP_KERNEL, "addr_sel%d", cs);
+       if (!resname) {
+               ret = -ENOMEM;
+               goto err_cleanup_dma;
+       }
+
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
        if (!res) {
                ret = -EINVAL;
index e9932da18bdd56939d03f0c09418d60f9dbcedf6..b7162ced9efa61742374438025bb395b39f08446 100644 (file)
@@ -106,7 +106,6 @@ int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
                               int oob_required, int page);
 int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
                                int oob_required, int page);
-int nand_exit_status_op(struct nand_chip *chip);
 int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
                            unsigned int len);
 void nand_decode_ext_id(struct nand_chip *chip);
index b841a81cb12822d2fba717faf50e25f0f23d9350..a466987448502e0b576b612f42139f878d4014ff 100644 (file)
@@ -348,7 +348,7 @@ struct marvell_nand_chip {
        int addr_cyc;
        int selected_die;
        unsigned int nsels;
-       struct marvell_nand_chip_sel sels[];
+       struct marvell_nand_chip_sel sels[] __counted_by(nsels);
 };
 
 static inline struct marvell_nand_chip *to_marvell_nand(struct nand_chip *chip)
index 25e3c1cb605e7f95318181616b5470b7cace104d..71ec4052e52a689d056aff07802dde7b738f74ef 100644 (file)
@@ -128,7 +128,7 @@ struct meson_nfc_nand_chip {
        u8 *data_buf;
        __le64 *info_buf;
        u32 nsels;
-       u8 sels[];
+       u8 sels[] __counted_by(nsels);
 };
 
 struct meson_nand_ecc {
@@ -1134,6 +1134,9 @@ static int meson_nfc_clk_init(struct meson_nfc *nfc)
        init.name = devm_kasprintf(nfc->dev,
                                   GFP_KERNEL, "%s#div",
                                   dev_name(nfc->dev));
+       if (!init.name)
+               return -ENOMEM;
+
        init.ops = &clk_divider_ops;
        nfc_divider_parent_data[0].fw_name = "device";
        init.parent_data = nfc_divider_parent_data;
index 29c8bddde67ff6df528fe496566eb6927543c53a..60198e33d2d553f39aa09636ace80899392d30b3 100644 (file)
@@ -130,7 +130,7 @@ struct mtk_nfc_nand_chip {
        u32 spare_per_sector;
 
        int nsels;
-       u8 sels[];
+       u8 sels[] __counted_by(nsels);
        /* nothing after this field */
 };
 
index 1fcac403cee60f70348014f41d0cea32f6fcf5b3..9e24bedffd89a0ef0247675af14239aa35d3c7b3 100644 (file)
@@ -42,7 +42,6 @@
 #include <linux/io.h>
 #include <linux/mtd/partitions.h>
 #include <linux/of.h>
-#include <linux/of_gpio.h>
 #include <linux/gpio/consumer.h>
 
 #include "internals.h"
index c45bef6158e7ac773b0bf0bbe8a3835746a14198..cf76afc6c0edc2d7699b6b5e0b72058140a71520 100644 (file)
@@ -1881,8 +1881,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
 
        case NAND_OMAP_PREFETCH_IRQ:
                info->gpmc_irq_fifo = platform_get_irq(info->pdev, 0);
-               if (info->gpmc_irq_fifo <= 0)
-                       return -ENODEV;
+               if (info->gpmc_irq_fifo < 0)
+                       return info->gpmc_irq_fifo;
                err = devm_request_irq(dev, info->gpmc_irq_fifo,
                                       omap_nand_irq, IRQF_SHARED,
                                       "gpmc-nand-fifo", info);
@@ -1894,8 +1894,8 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
                }
 
                info->gpmc_irq_count = platform_get_irq(info->pdev, 1);
-               if (info->gpmc_irq_count <= 0)
-                       return -ENODEV;
+               if (info->gpmc_irq_count < 0)
+                       return info->gpmc_irq_count;
                err = devm_request_irq(dev, info->gpmc_irq_count,
                                       omap_nand_irq, IRQF_SHARED,
                                       "gpmc-nand-count", info);
index 589021ea9eb2acf10bb8ccca557fc46aafe1803f..c9a01feff8dfe53f98e3ca23be4da1672c21da18 100644 (file)
@@ -210,7 +210,7 @@ struct rnand_chip {
        u32 tim_gen_seq1;
        u32 tim_gen_seq2;
        u32 tim_gen_seq3;
-       struct rnand_chip_sel sels[];
+       struct rnand_chip_sel sels[] __counted_by(nsels);
 };
 
 struct rnandc {
index 5bc90ffa721f0d7c685da2d769e3afd3f62a4d38..596cf9a782749a7576265f01ffd4d464677c12e8 100644 (file)
@@ -158,8 +158,7 @@ struct rk_nfc_nand_chip {
        u32 timing;
 
        u8 nsels;
-       u8 sels[];
-       /* Nothing after this field. */
+       u8 sels[] __counted_by(nsels);
 };
 
 struct rk_nfc {
@@ -1119,7 +1118,7 @@ static int rk_nfc_nand_chip_init(struct device *dev, struct rk_nfc *nfc,
                return -EINVAL;
        }
 
-       rknand = devm_kzalloc(dev, sizeof(*rknand) + nsels * sizeof(u8),
+       rknand = devm_kzalloc(dev, struct_size(rknand, sels, nsels),
                              GFP_KERNEL);
        if (!rknand)
                return -ENOMEM;
index 3e5df75cbc98237548fdfc411c32569a985cf07b..2a8164efb273d7d67b2e869fde122f096f2237ff 100644 (file)
@@ -1215,6 +1215,7 @@ static void flctl_remove(struct platform_device *pdev)
 }
 
 static struct platform_driver flctl_driver = {
+       .probe          = flctl_probe,
        .remove_new     = flctl_remove,
        .driver = {
                .name   = "sh_flctl",
@@ -1222,7 +1223,7 @@ static struct platform_driver flctl_driver = {
        },
 };
 
-module_platform_driver_probe(flctl_driver, flctl_probe);
+module_platform_driver(flctl_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Yoshihiro Shimoda");
index 9abf38049d35f3a25e22a5cd82f440e88a2c9cdd..4ec17c8bce5a1b34d4fc3433b8f29e575911722b 100644 (file)
@@ -197,7 +197,7 @@ struct sunxi_nand_chip {
        u32 timing_cfg;
        u32 timing_ctl;
        int nsels;
-       struct sunxi_nand_chip_sel sels[];
+       struct sunxi_nand_chip_sel sels[] __counted_by(nsels);
 };
 
 static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
index eb0b9d16e8dae98e856cc1737da85c82f850f1d4..a553e3ac8ff41d063fbb1f7bcb27345c11b7fb35 100644 (file)
@@ -1197,6 +1197,10 @@ static int tegra_nand_probe(struct platform_device *pdev)
        init_completion(&ctrl->dma_complete);
 
        ctrl->irq = platform_get_irq(pdev, 0);
+       if (ctrl->irq < 0) {
+               err = ctrl->irq;
+               goto err_put_pm;
+       }
        err = devm_request_irq(&pdev->dev, ctrl->irq, tegra_nand_irq, 0,
                               dev_name(&pdev->dev), ctrl);
        if (err) {
index 3f783b8f76c968c249e866d14b0deff5788e721d..f31d23219f914895b3dfca541b75f1b1821807a9 100644 (file)
@@ -29,8 +29,9 @@
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/rawnand.h>
 #include <linux/mtd/partitions.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 #include <linux/platform_device.h>
+#include <linux/property.h>
 #include <linux/slab.h>
 #include <linux/swab.h>
 
@@ -810,7 +811,6 @@ static int vf610_nfc_probe(struct platform_device *pdev)
        struct mtd_info *mtd;
        struct nand_chip *chip;
        struct device_node *child;
-       const struct of_device_id *of_id;
        int err;
        int irq;
 
@@ -840,12 +840,10 @@ static int vf610_nfc_probe(struct platform_device *pdev)
                return PTR_ERR(nfc->clk);
        }
 
-       of_id = of_match_device(vf610_nfc_dt_ids, &pdev->dev);
-       if (!of_id)
+       nfc->variant = (enum vf610_nfc_variant)device_get_match_data(&pdev->dev);
+       if (!nfc->variant)
                return -ENODEV;
 
-       nfc->variant = (uintptr_t)of_id->data;
-
        for_each_available_child_of_node(nfc->dev->of_node, child) {
                if (of_device_is_compatible(child, "fsl,vf610-nfc-nandcs")) {
 
index 51d802a165edf0fe5f51c4fbc57e0c8bb350cd50..008549011fb91c6330fb00c5990c1b4d167bae02 100644 (file)
@@ -6,7 +6,6 @@
  */
 
 #include <linux/mtd/rawnand.h>
-#include <linux/of_gpio.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
 
index cd8b66bf77405cae2cb0fa311802cc05e1ee44da..19cc77288ebbcabfd71c8ed094095f4bc3f29e0c 100644 (file)
@@ -1,4 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
-spinand-objs := core.o alliancememory.o ato.o esmt.o gigadevice.o macronix.o
+spinand-objs := core.o alliancememory.o ato.o esmt.o foresee.o gigadevice.o macronix.o
 spinand-objs += micron.o paragon.o toshiba.o winbond.o xtx.o
 obj-$(CONFIG_MTD_SPI_NAND) += spinand.o
index 393ff37f0d23c15567765457a248ac9c0cbbdb77..849ccfedbc72dfb27cd6277d3cacc90a6289c66d 100644 (file)
@@ -940,6 +940,7 @@ static const struct spinand_manufacturer *spinand_manufacturers[] = {
        &alliancememory_spinand_manufacturer,
        &ato_spinand_manufacturer,
        &esmt_c8_spinand_manufacturer,
+       &foresee_spinand_manufacturer,
        &gigadevice_spinand_manufacturer,
        &macronix_spinand_manufacturer,
        &micron_spinand_manufacturer,
diff --git a/drivers/mtd/nand/spi/foresee.c b/drivers/mtd/nand/spi/foresee.c
new file mode 100644 (file)
index 0000000..e0d2d92
--- /dev/null
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2023, SberDevices. All Rights Reserved.
+ *
+ * Author: Martin Kurbanov <mmkurbanov@salutedevices.com>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mtd/spinand.h>
+
+#define SPINAND_MFR_FORESEE            0xCD
+
+static SPINAND_OP_VARIANTS(read_cache_variants,
+               SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
+               SPINAND_PAGE_READ_FROM_CACHE_X2_OP(0, 1, NULL, 0),
+               SPINAND_PAGE_READ_FROM_CACHE_OP(true, 0, 1, NULL, 0),
+               SPINAND_PAGE_READ_FROM_CACHE_OP(false, 0, 1, NULL, 0));
+
+static SPINAND_OP_VARIANTS(write_cache_variants,
+               SPINAND_PROG_LOAD_X4(true, 0, NULL, 0),
+               SPINAND_PROG_LOAD(true, 0, NULL, 0));
+
+static SPINAND_OP_VARIANTS(update_cache_variants,
+               SPINAND_PROG_LOAD_X4(false, 0, NULL, 0),
+               SPINAND_PROG_LOAD(false, 0, NULL, 0));
+
+static int f35sqa002g_ooblayout_ecc(struct mtd_info *mtd, int section,
+                                   struct mtd_oob_region *region)
+{
+       return -ERANGE;
+}
+
+static int f35sqa002g_ooblayout_free(struct mtd_info *mtd, int section,
+                                    struct mtd_oob_region *region)
+{
+       if (section)
+               return -ERANGE;
+
+       /* Reserve 2 bytes for the BBM. */
+       region->offset = 2;
+       region->length = 62;
+
+       return 0;
+}
+
+static const struct mtd_ooblayout_ops f35sqa002g_ooblayout = {
+       .ecc = f35sqa002g_ooblayout_ecc,
+       .free = f35sqa002g_ooblayout_free,
+};
+
+static int f35sqa002g_ecc_get_status(struct spinand_device *spinand, u8 status)
+{
+       struct nand_device *nand = spinand_to_nand(spinand);
+
+       switch (status & STATUS_ECC_MASK) {
+       case STATUS_ECC_NO_BITFLIPS:
+               return 0;
+
+       case STATUS_ECC_HAS_BITFLIPS:
+               return nanddev_get_ecc_conf(nand)->strength;
+
+       default:
+               break;
+       }
+
+       /* More than 1-bit error was detected in one or more sectors and
+        * cannot be corrected.
+        */
+       return -EBADMSG;
+}
+
+static const struct spinand_info foresee_spinand_table[] = {
+       SPINAND_INFO("F35SQA002G",
+                    SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0x72, 0x72),
+                    NAND_MEMORG(1, 2048, 64, 64, 2048, 40, 1, 1, 1),
+                    NAND_ECCREQ(1, 512),
+                    SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+                                             &write_cache_variants,
+                                             &update_cache_variants),
+                    SPINAND_HAS_QE_BIT,
+                    SPINAND_ECCINFO(&f35sqa002g_ooblayout,
+                                    f35sqa002g_ecc_get_status)),
+};
+
+static const struct spinand_manufacturer_ops foresee_spinand_manuf_ops = {
+};
+
+const struct spinand_manufacturer foresee_spinand_manufacturer = {
+       .id = SPINAND_MFR_FORESEE,
+       .name = "FORESEE",
+       .chips = foresee_spinand_table,
+       .nchips = ARRAY_SIZE(foresee_spinand_table),
+       .ops = &foresee_spinand_manuf_ops,
+};
index f507e37593012d18f71b285a58027fbff4e0d339..1a473021cca5110ac045d98963b58ddf5f940f90 100644 (file)
@@ -169,6 +169,51 @@ static const struct spinand_info winbond_spinand_table[] = {
                                              &update_cache_variants),
                     0,
                     SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)),
+       SPINAND_INFO("W25N01JW",
+                    SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbc, 0x21),
+                    NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+                    NAND_ECCREQ(4, 512),
+                    SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+                                             &write_cache_variants,
+                                             &update_cache_variants),
+                    0,
+                    SPINAND_ECCINFO(&w25m02gv_ooblayout, w25n02kv_ecc_get_status)),
+       SPINAND_INFO("W25N02JWZEIF",
+                    SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xbf, 0x22),
+                    NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 2, 1),
+                    NAND_ECCREQ(4, 512),
+                    SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+                                             &write_cache_variants,
+                                             &update_cache_variants),
+                    0,
+                    SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)),
+       SPINAND_INFO("W25N512GW",
+                    SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x20),
+                    NAND_MEMORG(1, 2048, 64, 64, 512, 10, 1, 1, 1),
+                    NAND_ECCREQ(4, 512),
+                    SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+                                             &write_cache_variants,
+                                             &update_cache_variants),
+                    0,
+                    SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)),
+       SPINAND_INFO("W25N02KWZEIR",
+                    SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x22),
+                    NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+                    NAND_ECCREQ(8, 512),
+                    SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+                                             &write_cache_variants,
+                                             &update_cache_variants),
+                    0,
+                    SPINAND_ECCINFO(&w25n02kv_ooblayout, w25n02kv_ecc_get_status)),
+       SPINAND_INFO("W25N01GWZEIG",
+                    SPINAND_ID(SPINAND_READID_METHOD_OPCODE_DUMMY, 0xba, 0x21),
+                    NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1),
+                    NAND_ECCREQ(4, 512),
+                    SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+                                             &write_cache_variants,
+                                             &update_cache_variants),
+                    0,
+                    SPINAND_ECCINFO(&w25m02gv_ooblayout, w25n02kv_ecc_get_status)),
 };
 
 static int winbond_spinand_init(struct spinand_device *spinand)
index 3911520f718c8d843e8a1dbe6fdc53f5c9e43dc0..66a4255bdf06617f76f502ddaa873c1334c24ea4 100644 (file)
@@ -4,6 +4,7 @@
  * Felix Matouschek <felix@matouschek.org>
  */
 
+#include <linux/bitfield.h>
 #include <linux/device.h>
 #include <linux/kernel.h>
 #include <linux/mtd/spinand.h>
 #define XT26G0XA_STATUS_ECC_8_CORRECTED        (3 << 4)
 #define XT26G0XA_STATUS_ECC_UNCOR_ERROR        (2 << 4)
 
+#define XT26XXXD_STATUS_ECC3_ECC2_MASK     GENMASK(7, 6)
+#define XT26XXXD_STATUS_ECC_NO_DETECTED     (0)
+#define XT26XXXD_STATUS_ECC_1_7_CORRECTED   (1)
+#define XT26XXXD_STATUS_ECC_8_CORRECTED     (3)
+#define XT26XXXD_STATUS_ECC_UNCOR_ERROR     (2)
+
 static SPINAND_OP_VARIANTS(read_cache_variants,
                SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(0, 1, NULL, 0),
                SPINAND_PAGE_READ_FROM_CACHE_X4_OP(0, 1, NULL, 0),
@@ -84,6 +91,53 @@ static int xt26g0xa_ecc_get_status(struct spinand_device *spinand,
        return status >> 2;
 }
 
+static int xt26xxxd_ooblayout_ecc(struct mtd_info *mtd, int section,
+                                 struct mtd_oob_region *region)
+{
+       if (section)
+               return -ERANGE;
+
+       region->offset = mtd->oobsize / 2;
+       region->length = mtd->oobsize / 2;
+
+       return 0;
+}
+
+static int xt26xxxd_ooblayout_free(struct mtd_info *mtd, int section,
+                                  struct mtd_oob_region *region)
+{
+       if (section)
+               return -ERANGE;
+
+       region->offset = 2;
+       region->length = mtd->oobsize / 2 - 2;
+
+       return 0;
+}
+
+static const struct mtd_ooblayout_ops xt26xxxd_ooblayout = {
+       .ecc = xt26xxxd_ooblayout_ecc,
+       .free = xt26xxxd_ooblayout_free,
+};
+
+static int xt26xxxd_ecc_get_status(struct spinand_device *spinand,
+                                  u8 status)
+{
+       switch (FIELD_GET(STATUS_ECC_MASK, status)) {
+       case XT26XXXD_STATUS_ECC_NO_DETECTED:
+               return 0;
+       case XT26XXXD_STATUS_ECC_UNCOR_ERROR:
+               return -EBADMSG;
+       case XT26XXXD_STATUS_ECC_1_7_CORRECTED:
+               return 4 + FIELD_GET(XT26XXXD_STATUS_ECC3_ECC2_MASK, status);
+       case XT26XXXD_STATUS_ECC_8_CORRECTED:
+               return 8;
+       default:
+               break;
+       }
+
+       return -EINVAL;
+}
 static const struct spinand_info xtx_spinand_table[] = {
        SPINAND_INFO("XT26G01A",
                     SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0xE1),
@@ -115,6 +169,86 @@ static const struct spinand_info xtx_spinand_table[] = {
                     SPINAND_HAS_QE_BIT,
                     SPINAND_ECCINFO(&xt26g0xa_ooblayout,
                                     xt26g0xa_ecc_get_status)),
+       SPINAND_INFO("XT26G01D",
+                    SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x31),
+                    NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+                    NAND_ECCREQ(8, 512),
+                    SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+                                             &write_cache_variants,
+                                             &update_cache_variants),
+                    0,
+                    SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+                                    xt26xxxd_ecc_get_status)),
+       SPINAND_INFO("XT26G11D",
+                    SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x34),
+                    NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+                    NAND_ECCREQ(8, 512),
+                    SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+                                             &write_cache_variants,
+                                             &update_cache_variants),
+                    0,
+                    SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+                                    xt26xxxd_ecc_get_status)),
+       SPINAND_INFO("XT26Q01D",
+                    SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x51),
+                    NAND_MEMORG(1, 2048, 128, 64, 1024, 20, 1, 1, 1),
+                    NAND_ECCREQ(8, 512),
+                    SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+                                             &write_cache_variants,
+                                             &update_cache_variants),
+                    0,
+                    SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+                                    xt26xxxd_ecc_get_status)),
+       SPINAND_INFO("XT26G02D",
+                    SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x32),
+                    NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+                    NAND_ECCREQ(8, 512),
+                    SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+                                             &write_cache_variants,
+                                             &update_cache_variants),
+                    0,
+                    SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+                                    xt26xxxd_ecc_get_status)),
+       SPINAND_INFO("XT26G12D",
+                    SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x35),
+                    NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+                    NAND_ECCREQ(8, 512),
+                    SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+                                             &write_cache_variants,
+                                             &update_cache_variants),
+                    0,
+                    SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+                                    xt26xxxd_ecc_get_status)),
+       SPINAND_INFO("XT26Q02D",
+                    SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x52),
+                    NAND_MEMORG(1, 2048, 128, 64, 2048, 40, 1, 1, 1),
+                    NAND_ECCREQ(8, 512),
+                    SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+                                             &write_cache_variants,
+                                             &update_cache_variants),
+                    0,
+                    SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+                                    xt26xxxd_ecc_get_status)),
+       SPINAND_INFO("XT26G04D",
+                    SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x33),
+                    NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+                    NAND_ECCREQ(8, 512),
+                    SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+                                             &write_cache_variants,
+                                             &update_cache_variants),
+                    0,
+                    SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+                                    xt26xxxd_ecc_get_status)),
+       SPINAND_INFO("XT26Q04D",
+                    SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x53),
+                    NAND_MEMORG(1, 4096, 256, 64, 2048, 40, 1, 1, 1),
+                    NAND_ECCREQ(8, 512),
+                    SPINAND_INFO_OP_VARIANTS(&read_cache_variants,
+                                             &write_cache_variants,
+                                             &update_cache_variants),
+                    0,
+                    SPINAND_ECCINFO(&xt26xxxd_ooblayout,
+                                    xt26xxxd_ecc_get_status)),
 };
 
 static const struct spinand_manufacturer_ops xtx_spinand_manuf_ops = {
index 60738edcd5d561d54bb33466c6862d016ed96361..da03ab6efe04c6ed16affabbdec2dd7fa7a3eb17 100644 (file)
@@ -1,9 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
-config MTD_AR7_PARTS
-       tristate "TI AR7 partitioning parser"
-       help
-         TI AR7 partitioning parser support
-
 config MTD_BCM47XX_PARTS
        tristate "BCM47XX partitioning parser"
        depends on BCM47XX || ARCH_BCM_5301X
index 0e70b621a1d84e8d13d10240c548290bdc1ed7a8..9b00c62b837ae7372286f39013e78450b4194850 100644 (file)
@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_MTD_AR7_PARTS)            += ar7part.o
 obj-$(CONFIG_MTD_BCM47XX_PARTS)                += bcm47xxpart.o
 obj-$(CONFIG_MTD_BCM63XX_PARTS)                += bcm63xxpart.o
 obj-$(CONFIG_MTD_BRCM_U_BOOT)          += brcm_u-boot.o
diff --git a/drivers/mtd/parsers/ar7part.c b/drivers/mtd/parsers/ar7part.c
deleted file mode 100644 (file)
index 8cd6837..0000000
+++ /dev/null
@@ -1,129 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright © 2007 Eugene Konev <ejka@openwrt.org>
- *
- * TI AR7 flash partition table.
- * Based on ar7 map by Felix Fietkau <nbd@openwrt.org>
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/memblock.h>
-#include <linux/module.h>
-
-#include <uapi/linux/magic.h>
-
-#define AR7_PARTS      4
-#define ROOT_OFFSET    0xe0000
-
-#define LOADER_MAGIC1  le32_to_cpu(0xfeedfa42)
-#define LOADER_MAGIC2  le32_to_cpu(0xfeed1281)
-
-struct ar7_bin_rec {
-       unsigned int checksum;
-       unsigned int length;
-       unsigned int address;
-};
-
-static int create_mtd_partitions(struct mtd_info *master,
-                                const struct mtd_partition **pparts,
-                                struct mtd_part_parser_data *data)
-{
-       struct ar7_bin_rec header;
-       unsigned int offset;
-       size_t len;
-       unsigned int pre_size = master->erasesize, post_size = 0;
-       unsigned int root_offset = ROOT_OFFSET;
-
-       int retries = 10;
-       struct mtd_partition *ar7_parts;
-
-       ar7_parts = kcalloc(AR7_PARTS, sizeof(*ar7_parts), GFP_KERNEL);
-       if (!ar7_parts)
-               return -ENOMEM;
-       ar7_parts[0].name = "loader";
-       ar7_parts[0].offset = 0;
-       ar7_parts[0].size = master->erasesize;
-       ar7_parts[0].mask_flags = MTD_WRITEABLE;
-
-       ar7_parts[1].name = "config";
-       ar7_parts[1].offset = 0;
-       ar7_parts[1].size = master->erasesize;
-       ar7_parts[1].mask_flags = 0;
-
-       do { /* Try 10 blocks starting from master->erasesize */
-               offset = pre_size;
-               mtd_read(master, offset, sizeof(header), &len,
-                        (uint8_t *)&header);
-               if (!strncmp((char *)&header, "TIENV0.8", 8))
-                       ar7_parts[1].offset = pre_size;
-               if (header.checksum == LOADER_MAGIC1)
-                       break;
-               if (header.checksum == LOADER_MAGIC2)
-                       break;
-               pre_size += master->erasesize;
-       } while (retries--);
-
-       pre_size = offset;
-
-       if (!ar7_parts[1].offset) {
-               ar7_parts[1].offset = master->size - master->erasesize;
-               post_size = master->erasesize;
-       }
-
-       switch (header.checksum) {
-       case LOADER_MAGIC1:
-               while (header.length) {
-                       offset += sizeof(header) + header.length;
-                       mtd_read(master, offset, sizeof(header), &len,
-                                (uint8_t *)&header);
-               }
-               root_offset = offset + sizeof(header) + 4;
-               break;
-       case LOADER_MAGIC2:
-               while (header.length) {
-                       offset += sizeof(header) + header.length;
-                       mtd_read(master, offset, sizeof(header), &len,
-                                (uint8_t *)&header);
-               }
-               root_offset = offset + sizeof(header) + 4 + 0xff;
-               root_offset &= ~(uint32_t)0xff;
-               break;
-       default:
-               printk(KERN_WARNING "Unknown magic: %08x\n", header.checksum);
-               break;
-       }
-
-       mtd_read(master, root_offset, sizeof(header), &len, (u8 *)&header);
-       if (header.checksum != SQUASHFS_MAGIC) {
-               root_offset += master->erasesize - 1;
-               root_offset &= ~(master->erasesize - 1);
-       }
-
-       ar7_parts[2].name = "linux";
-       ar7_parts[2].offset = pre_size;
-       ar7_parts[2].size = master->size - pre_size - post_size;
-       ar7_parts[2].mask_flags = 0;
-
-       ar7_parts[3].name = "rootfs";
-       ar7_parts[3].offset = root_offset;
-       ar7_parts[3].size = master->size - root_offset - post_size;
-       ar7_parts[3].mask_flags = 0;
-
-       *pparts = ar7_parts;
-       return AR7_PARTS;
-}
-
-static struct mtd_part_parser ar7_parser = {
-       .parse_fn = create_mtd_partitions,
-       .name = "ar7part",
-};
-module_mtd_part_parser(ar7_parser);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR( "Felix Fietkau <nbd@openwrt.org>, "
-               "Eugene Konev <ejka@openwrt.org>");
-MODULE_DESCRIPTION("MTD partitioning for TI AR7");
index e347b435a038ec5301ab6fdc47b0764ac1f409d1..5e68468b72fcad08bfff5c37660034af82ad497c 100644 (file)
@@ -2,11 +2,9 @@
 
 spi-nor-objs                   := core.o sfdp.o swp.o otp.o sysfs.o
 spi-nor-objs                   += atmel.o
-spi-nor-objs                   += catalyst.o
 spi-nor-objs                   += eon.o
 spi-nor-objs                   += esmt.o
 spi-nor-objs                   += everspin.o
-spi-nor-objs                   += fujitsu.o
 spi-nor-objs                   += gigadevice.o
 spi-nor-objs                   += intel.o
 spi-nor-objs                   += issi.o
index 58968c1e7d2f8ee668641bae12945bb885e4bbcb..e13b8d2dd50af060f62a5cd45db7128c92ed9721 100644 (file)
@@ -163,49 +163,84 @@ static const struct spi_nor_fixups atmel_nor_global_protection_fixups = {
 };
 
 static const struct flash_info atmel_nor_parts[] = {
-       /* Atmel -- some are (confusingly) marketed as "DataFlash" */
-       { "at25fs010",  INFO(0x1f6601, 0, 32 * 1024,   4)
-               FLAGS(SPI_NOR_HAS_LOCK)
-               NO_SFDP_FLAGS(SECT_4K)
-               .fixups = &at25fs_nor_fixups },
-       { "at25fs040",  INFO(0x1f6604, 0, 64 * 1024,   8)
-               FLAGS(SPI_NOR_HAS_LOCK)
-               NO_SFDP_FLAGS(SECT_4K)
-               .fixups = &at25fs_nor_fixups },
-       { "at25df041a", INFO(0x1f4401, 0, 64 * 1024,   8)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
-               NO_SFDP_FLAGS(SECT_4K)
-               .fixups = &atmel_nor_global_protection_fixups },
-       { "at25df321",  INFO(0x1f4700, 0, 64 * 1024,  64)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
-               NO_SFDP_FLAGS(SECT_4K)
-               .fixups = &atmel_nor_global_protection_fixups },
-       { "at25df321a", INFO(0x1f4701, 0, 64 * 1024,  64)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
-               NO_SFDP_FLAGS(SECT_4K)
-               .fixups = &atmel_nor_global_protection_fixups },
-       { "at25df641",  INFO(0x1f4800, 0, 64 * 1024, 128)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
-               NO_SFDP_FLAGS(SECT_4K)
-               .fixups = &atmel_nor_global_protection_fixups },
-       { "at25sl321",  INFO(0x1f4216, 0, 64 * 1024, 64)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-       { "at26f004",   INFO(0x1f0400, 0, 64 * 1024,  8)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
-               NO_SFDP_FLAGS(SECT_4K)
-               .fixups = &atmel_nor_global_protection_fixups },
-       { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
-               NO_SFDP_FLAGS(SECT_4K)
-               .fixups = &atmel_nor_global_protection_fixups },
-       { "at26df321",  INFO(0x1f4700, 0, 64 * 1024, 64)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
-               NO_SFDP_FLAGS(SECT_4K)
-               .fixups = &atmel_nor_global_protection_fixups },
-       { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16)
-               NO_SFDP_FLAGS(SECT_4K) },
+       {
+               .id = SNOR_ID(0x1f, 0x04, 0x00),
+               .name = "at26f004",
+               .size = SZ_512K,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0x1f, 0x25, 0x00),
+               .name = "at45db081d",
+               .size = SZ_1M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0x1f, 0x42, 0x16),
+               .name = "at25sl321",
+               .size = SZ_4M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0x1f, 0x44, 0x01),
+               .name = "at25df041a",
+               .size = SZ_512K,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+               .no_sfdp_flags = SECT_4K,
+               .fixups = &atmel_nor_global_protection_fixups,
+       }, {
+               .id = SNOR_ID(0x1f, 0x45, 0x01),
+               .name = "at26df081a",
+               .size = SZ_1M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+               .no_sfdp_flags = SECT_4K,
+               .fixups = &atmel_nor_global_protection_fixups
+       }, {
+               .id = SNOR_ID(0x1f, 0x46, 0x01),
+               .name = "at26df161a",
+               .size = SZ_2M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+               .no_sfdp_flags = SECT_4K,
+               .fixups = &atmel_nor_global_protection_fixups
+       }, {
+               .id = SNOR_ID(0x1f, 0x47, 0x00),
+               .name = "at25df321",
+               .size = SZ_4M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+               .no_sfdp_flags = SECT_4K,
+               .fixups = &atmel_nor_global_protection_fixups
+       }, {
+               .id = SNOR_ID(0x1f, 0x47, 0x01),
+               .name = "at25df321a",
+               .size = SZ_4M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+               .no_sfdp_flags = SECT_4K,
+               .fixups = &atmel_nor_global_protection_fixups
+       }, {
+               .id = SNOR_ID(0x1f, 0x47, 0x08),
+               .name = "at25ff321a",
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+               .fixups = &atmel_nor_global_protection_fixups
+       }, {
+               .id = SNOR_ID(0x1f, 0x48, 0x00),
+               .name = "at25df641",
+               .size = SZ_8M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+               .no_sfdp_flags = SECT_4K,
+               .fixups = &atmel_nor_global_protection_fixups
+       }, {
+               .id = SNOR_ID(0x1f, 0x66, 0x01),
+               .name = "at25fs010",
+               .sector_size = SZ_32K,
+               .size = SZ_128K,
+               .flags = SPI_NOR_HAS_LOCK,
+               .no_sfdp_flags = SECT_4K,
+               .fixups = &at25fs_nor_fixups
+       }, {
+               .id = SNOR_ID(0x1f, 0x66, 0x04),
+               .name = "at25fs040",
+               .size = SZ_512K,
+               .flags = SPI_NOR_HAS_LOCK,
+               .no_sfdp_flags = SECT_4K,
+               .fixups = &at25fs_nor_fixups
+       },
 };
 
 const struct spi_nor_manufacturer spi_nor_atmel = {
diff --git a/drivers/mtd/spi-nor/catalyst.c b/drivers/mtd/spi-nor/catalyst.c
deleted file mode 100644 (file)
index 6d31081..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2005, Intec Automation Inc.
- * Copyright (C) 2014, Freescale Semiconductor, Inc.
- */
-
-#include <linux/mtd/spi-nor.h>
-
-#include "core.h"
-
-static const struct flash_info catalyst_nor_parts[] = {
-       /* Catalyst / On Semiconductor -- non-JEDEC */
-       { "cat25c11", CAT25_INFO(16, 8, 16, 1) },
-       { "cat25c03", CAT25_INFO(32, 8, 16, 2) },
-       { "cat25c09", CAT25_INFO(128, 8, 32, 2) },
-       { "cat25c17", CAT25_INFO(256, 8, 32, 2) },
-       { "cat25128", CAT25_INFO(2048, 8, 64, 2) },
-};
-
-const struct spi_nor_manufacturer spi_nor_catalyst = {
-       .name = "catalyst",
-       .parts = catalyst_nor_parts,
-       .nparts = ARRAY_SIZE(catalyst_nor_parts),
-};
index 5070d72835ec0a8a0f3f59f0f522d17386779e1a..89a7f0bbc4b3227b3b318c5baa0bd7708ced311f 100644 (file)
@@ -468,13 +468,12 @@ static int hisi_spi_nor_probe(struct platform_device *pdev)
        return ret;
 }
 
-static int hisi_spi_nor_remove(struct platform_device *pdev)
+static void hisi_spi_nor_remove(struct platform_device *pdev)
 {
        struct hifmc_host *host = platform_get_drvdata(pdev);
 
        hisi_spi_nor_unregister_all(host);
        mutex_destroy(&host->lock);
-       return 0;
 }
 
 static const struct of_device_id hisi_spi_nor_dt_ids[] = {
@@ -489,7 +488,7 @@ static struct platform_driver hisi_spi_nor_driver = {
                .of_match_table = hisi_spi_nor_dt_ids,
        },
        .probe  = hisi_spi_nor_probe,
-       .remove = hisi_spi_nor_remove,
+       .remove_new = hisi_spi_nor_remove,
 };
 module_platform_driver(hisi_spi_nor_driver);
 
index 5d8f47ab146fd9b9388c2cda1454564cba52f920..5aee62f51031f19cf1c1f4056d97f11cde5d1ed4 100644 (file)
@@ -431,13 +431,11 @@ static int nxp_spifi_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int nxp_spifi_remove(struct platform_device *pdev)
+static void nxp_spifi_remove(struct platform_device *pdev)
 {
        struct nxp_spifi *spifi = platform_get_drvdata(pdev);
 
        mtd_device_unregister(&spifi->nor.mtd);
-
-       return 0;
 }
 
 static const struct of_device_id nxp_spifi_match[] = {
@@ -448,7 +446,7 @@ MODULE_DEVICE_TABLE(of, nxp_spifi_match);
 
 static struct platform_driver nxp_spifi_driver = {
        .probe  = nxp_spifi_probe,
-       .remove = nxp_spifi_remove,
+       .remove_new = nxp_spifi_remove,
        .driver = {
                .name = "nxp-spifi",
                .of_match_table = nxp_spifi_match,
index 1b0c6770c14e46419d9362bc7111128873e71780..1c443fe568cfb45d07b8cda123e5feb0f21d98e5 100644 (file)
@@ -1999,11 +1999,9 @@ int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor)
 
 static const struct spi_nor_manufacturer *manufacturers[] = {
        &spi_nor_atmel,
-       &spi_nor_catalyst,
        &spi_nor_eon,
        &spi_nor_esmt,
        &spi_nor_everspin,
-       &spi_nor_fujitsu,
        &spi_nor_gigadevice,
        &spi_nor_intel,
        &spi_nor_issi,
@@ -2019,13 +2017,6 @@ static const struct spi_nor_manufacturer *manufacturers[] = {
 
 static const struct flash_info spi_nor_generic_flash = {
        .name = "spi-nor-generic",
-       .n_banks = 1,
-       /*
-        * JESD216 rev A doesn't specify the page size, therefore we need a
-        * sane default.
-        */
-       .page_size = 256,
-       .parse_sfdp = true,
 };
 
 static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
@@ -2037,8 +2028,8 @@ static const struct flash_info *spi_nor_match_id(struct spi_nor *nor,
        for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
                for (j = 0; j < manufacturers[i]->nparts; j++) {
                        part = &manufacturers[i]->parts[j];
-                       if (part->id_len &&
-                           !memcmp(part->id, id, part->id_len)) {
+                       if (part->id &&
+                           !memcmp(part->id->bytes, id, part->id->len)) {
                                nor->manufacturer = manufacturers[i];
                                return part;
                        }
@@ -2520,13 +2511,6 @@ static int spi_nor_select_pp(struct spi_nor *nor,
 /**
  * spi_nor_select_uniform_erase() - select optimum uniform erase type
  * @map:               the erase map of the SPI NOR
- * @wanted_size:       the erase type size to search for. Contains the value of
- *                     info->sector_size, the "small sector" size in case
- *                     CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is defined or 0 if
- *                     there is no information about the sector size. The
- *                     latter is the case if the flash parameters are parsed
- *                     solely by SFDP, then the largest supported erase type
- *                     is selected.
  *
  * Once the optimum uniform sector erase command is found, disable all the
  * other.
@@ -2534,13 +2518,16 @@ static int spi_nor_select_pp(struct spi_nor *nor,
  * Return: pointer to erase type on success, NULL otherwise.
  */
 static const struct spi_nor_erase_type *
-spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
-                            const u32 wanted_size)
+spi_nor_select_uniform_erase(struct spi_nor_erase_map *map)
 {
        const struct spi_nor_erase_type *tested_erase, *erase = NULL;
        int i;
        u8 uniform_erase_type = map->uniform_erase_type;
 
+       /*
+        * Search for the biggest erase size, except for when compiled
+        * to use 4k erases.
+        */
        for (i = SNOR_ERASE_TYPE_MAX - 1; i >= 0; i--) {
                if (!(uniform_erase_type & BIT(i)))
                        continue;
@@ -2552,10 +2539,11 @@ spi_nor_select_uniform_erase(struct spi_nor_erase_map *map,
                        continue;
 
                /*
-                * If the current erase size is the one, stop here:
+                * If the current erase size is the 4k one, stop here,
                 * we have found the right uniform Sector Erase command.
                 */
-               if (tested_erase->size == wanted_size) {
+               if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS) &&
+                   tested_erase->size == SZ_4K) {
                        erase = tested_erase;
                        break;
                }
@@ -2583,7 +2571,6 @@ static int spi_nor_select_erase(struct spi_nor *nor)
        struct spi_nor_erase_map *map = &nor->params->erase_map;
        const struct spi_nor_erase_type *erase = NULL;
        struct mtd_info *mtd = &nor->mtd;
-       u32 wanted_size = nor->info->sector_size;
        int i;
 
        /*
@@ -2594,13 +2581,8 @@ static int spi_nor_select_erase(struct spi_nor *nor)
         * manage the SPI flash memory as uniform with a single erase sector
         * size, when possible.
         */
-#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
-       /* prefer "small sector" erase if possible */
-       wanted_size = 4096u;
-#endif
-
        if (spi_nor_has_uniform_erase(nor)) {
-               erase = spi_nor_select_uniform_erase(map, wanted_size);
+               erase = spi_nor_select_uniform_erase(map);
                if (!erase)
                        return -EINVAL;
                nor->erase_opcode = erase->opcode;
@@ -2773,7 +2755,8 @@ static void spi_nor_no_sfdp_init_params(struct spi_nor *nor)
 {
        struct spi_nor_flash_parameter *params = nor->params;
        struct spi_nor_erase_map *map = &params->erase_map;
-       const u8 no_sfdp_flags = nor->info->no_sfdp_flags;
+       const struct flash_info *info = nor->info;
+       const u8 no_sfdp_flags = info->no_sfdp_flags;
        u8 i, erase_mask;
 
        if (no_sfdp_flags & SPI_NOR_DUAL_READ) {
@@ -2827,7 +2810,8 @@ static void spi_nor_no_sfdp_init_params(struct spi_nor *nor)
                i++;
        }
        erase_mask |= BIT(i);
-       spi_nor_set_erase_type(&map->erase_type[i], nor->info->sector_size,
+       spi_nor_set_erase_type(&map->erase_type[i],
+                              info->sector_size ?: SPI_NOR_DEFAULT_SECTOR_SIZE,
                               SPINOR_OP_SE);
        spi_nor_init_uniform_erase_map(map, erase_mask, params->size);
 }
@@ -2869,7 +2853,7 @@ static void spi_nor_init_flags(struct spi_nor *nor)
        if (flags & NO_CHIP_ERASE)
                nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
 
-       if (flags & SPI_NOR_RWW && nor->info->n_banks > 1 &&
+       if (flags & SPI_NOR_RWW && nor->params->n_banks > 1 &&
            !nor->controller_ops)
                nor->flags |= SNOR_F_RWW;
 }
@@ -2933,8 +2917,8 @@ static int spi_nor_late_init_params(struct spi_nor *nor)
        if (nor->flags & SNOR_F_HAS_LOCK && !nor->params->locking_ops)
                spi_nor_init_default_locking_ops(nor);
 
-       if (nor->info->n_banks > 1)
-               params->bank_size = div64_u64(params->size, nor->info->n_banks);
+       if (params->n_banks > 1)
+               params->bank_size = div64_u64(params->size, params->n_banks);
 
        return 0;
 }
@@ -2994,16 +2978,17 @@ static void spi_nor_init_default_params(struct spi_nor *nor)
        struct device_node *np = spi_nor_get_flash_node(nor);
 
        params->quad_enable = spi_nor_sr2_bit1_quad_enable;
-       params->otp.org = &info->otp_org;
+       params->otp.org = info->otp;
 
        /* Default to 16-bit Write Status (01h) Command */
        nor->flags |= SNOR_F_HAS_16BIT_SR;
 
        /* Set SPI NOR sizes. */
        params->writesize = 1;
-       params->size = (u64)info->sector_size * info->n_sectors;
+       params->size = info->size;
        params->bank_size = params->size;
-       params->page_size = info->page_size;
+       params->page_size = info->page_size ?: SPI_NOR_DEFAULT_PAGE_SIZE;
+       params->n_banks = info->n_banks ?: SPI_NOR_DEFAULT_N_BANKS;
 
        if (!(info->flags & SPI_NOR_NO_FR)) {
                /* Default to Fast Read for DT and non-DT platform devices. */
@@ -3083,7 +3068,7 @@ static int spi_nor_init_params(struct spi_nor *nor)
 
        spi_nor_init_default_params(nor);
 
-       if (nor->info->parse_sfdp) {
+       if (spi_nor_needs_sfdp(nor)) {
                ret = spi_nor_parse_sfdp(nor);
                if (ret) {
                        dev_err(nor->dev, "BFPT parsing failed. Please consider using SPI_NOR_SKIP_SFDP when declaring the flash\n");
@@ -3385,7 +3370,7 @@ static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
         * If caller has specified name of flash model that can normally be
         * detected using JEDEC, let's verify it.
         */
-       if (name && info->id_len) {
+       if (name && info->id) {
                const struct flash_info *jinfo;
 
                jinfo = spi_nor_detect(nor);
index 9217379b9cfefd2c8b8e4637c85a262b87238f0f..93cd2fc3606d9dc918c1fd22e8d5ca1800c4c3f4 100644 (file)
 #include "sfdp.h"
 
 #define SPI_NOR_MAX_ID_LEN     6
+/*
+ * 256 bytes is a sane default for most older flashes. Newer flashes will
+ * have the page size defined within their SFDP tables.
+ */
+#define SPI_NOR_DEFAULT_PAGE_SIZE 256
+#define SPI_NOR_DEFAULT_N_BANKS 1
+#define SPI_NOR_DEFAULT_SECTOR_SIZE SZ_64K
 
 /* Standard SPI NOR flash operations. */
 #define SPI_NOR_READID_OP(naddr, ndummy, buf, len)                     \
@@ -353,6 +360,7 @@ struct spi_nor_otp {
  *                     in octal DTR mode.
  * @rdsr_addr_nbytes:  dummy address bytes needed for Read Status Register
  *                     command in octal DTR mode.
+ * @n_banks:           number of banks.
  * @n_dice:            number of dice in the flash memory.
  * @vreg_offset:       volatile register offset for each die.
  * @hwcaps:            describes the read and page program hardware
@@ -389,6 +397,7 @@ struct spi_nor_flash_parameter {
        u8                              addr_mode_nbytes;
        u8                              rdsr_dummy;
        u8                              rdsr_addr_nbytes;
+       u8                              n_banks;
        u8                              n_dice;
        u32                             *vreg_offset;
 
@@ -437,22 +446,32 @@ struct spi_nor_fixups {
        int (*late_init)(struct spi_nor *nor);
 };
 
+/**
+ * struct spi_nor_id - SPI NOR flash ID.
+ *
+ * @bytes: the bytes returned by the flash when issuing command 9F. Typically,
+ *         the first byte is the manufacturer ID code (see JEP106) and the next
+ *         two bytes are a flash part specific ID.
+ * @len:   the number of bytes of ID.
+ */
+struct spi_nor_id {
+       const u8 *bytes;
+       u8 len;
+};
+
 /**
  * struct flash_info - SPI NOR flash_info entry.
+ * @id:   pointer to struct spi_nor_id or NULL, which means "no ID" (mostly
+ *        older chips).
  * @name: the name of the flash.
- * @id:             the flash's ID bytes. The first three bytes are the
- *                  JEDIC ID. JEDEC ID zero means "no ID" (mostly older chips).
- * @id_len:         the number of bytes of ID.
- * @sector_size:    the size listed here is what works with SPINOR_OP_SE, which
- *                  isn't necessarily called a "sector" by the vendor.
- * @n_sectors:      the number of sectors.
- * @n_banks:        the number of banks.
- * @page_size:      the flash's page size.
+ * @size:           the size of the flash in bytes.
+ * @sector_size:    (optional) the size listed here is what works with
+ *                  SPINOR_OP_SE, which isn't necessarily called a "sector" by
+ *                  the vendor. Defaults to 64k.
+ * @n_banks:        (optional) the number of banks. Defaults to 1.
+ * @page_size:      (optional) the flash's page size. Defaults to 256.
  * @addr_nbytes:    number of address bytes to send.
  *
- * @parse_sfdp:     true when flash supports SFDP tables. The false value has no
- *                  meaning. If one wants to skip the SFDP tables, one should
- *                  instead use the SPI_NOR_SKIP_SFDP sfdp_flag.
  * @flags:          flags that indicate support that is not defined by the
  *                  JESD216 standard in its SFDP tables. Flag meanings:
  *   SPI_NOR_HAS_LOCK:        flash supports lock/unlock via SR
@@ -503,15 +522,13 @@ struct spi_nor_fixups {
  */
 struct flash_info {
        char *name;
-       u8 id[SPI_NOR_MAX_ID_LEN];
-       u8 id_len;
+       const struct spi_nor_id *id;
+       size_t size;
        unsigned sector_size;
-       u16 n_sectors;
        u16 page_size;
        u8 n_banks;
        u8 addr_nbytes;
 
-       bool parse_sfdp;
        u16 flags;
 #define SPI_NOR_HAS_LOCK               BIT(0)
 #define SPI_NOR_HAS_TB                 BIT(1)
@@ -540,70 +557,23 @@ struct flash_info {
 
        u8 mfr_flags;
 
-       const struct spi_nor_otp_organization otp_org;
+       const struct spi_nor_otp_organization *otp;
        const struct spi_nor_fixups *fixups;
 };
 
-#define SPI_NOR_ID_2ITEMS(_id) ((_id) >> 8) & 0xff, (_id) & 0xff
-#define SPI_NOR_ID_3ITEMS(_id) ((_id) >> 16) & 0xff, SPI_NOR_ID_2ITEMS(_id)
-
-#define SPI_NOR_ID(_jedec_id, _ext_id)                                 \
-       .id = { SPI_NOR_ID_3ITEMS(_jedec_id), SPI_NOR_ID_2ITEMS(_ext_id) }, \
-       .id_len = !(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))
-
-#define SPI_NOR_ID6(_jedec_id, _ext_id)                                        \
-       .id = { SPI_NOR_ID_3ITEMS(_jedec_id), SPI_NOR_ID_3ITEMS(_ext_id) }, \
-       .id_len = 6
-
-#define SPI_NOR_GEOMETRY(_sector_size, _n_sectors, _n_banks)           \
-       .sector_size = (_sector_size),                                  \
-       .n_sectors = (_n_sectors),                                      \
-       .page_size = 256,                                               \
-       .n_banks = (_n_banks)
-
-/* Used when the "_ext_id" is two bytes at most */
-#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors)             \
-       SPI_NOR_ID((_jedec_id), (_ext_id)),                             \
-       SPI_NOR_GEOMETRY((_sector_size), (_n_sectors), 1),
-
-#define INFOB(_jedec_id, _ext_id, _sector_size, _n_sectors, _n_banks)  \
-       SPI_NOR_ID((_jedec_id), (_ext_id)),                             \
-       SPI_NOR_GEOMETRY((_sector_size), (_n_sectors), (_n_banks)),
-
-#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors)            \
-       SPI_NOR_ID6((_jedec_id), (_ext_id)),                            \
-       SPI_NOR_GEOMETRY((_sector_size), (_n_sectors), 1),
+#define SNOR_ID(...)                                                   \
+       (&(const struct spi_nor_id){                                    \
+               .bytes = (const u8[]){ __VA_ARGS__ },                   \
+               .len = sizeof((u8[]){ __VA_ARGS__ }),                   \
+       })
 
-#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_nbytes) \
-               .sector_size = (_sector_size),                          \
-               .n_sectors = (_n_sectors),                              \
-               .page_size = (_page_size),                              \
-               .n_banks = 1,                                           \
-               .addr_nbytes = (_addr_nbytes),                          \
-               .flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR,              \
-
-#define OTP_INFO(_len, _n_regions, _base, _offset)                     \
-               .otp_org = {                                            \
-                       .len = (_len),                                  \
-                       .base = (_base),                                \
-                       .offset = (_offset),                            \
-                       .n_regions = (_n_regions),                      \
-               },
-
-#define PARSE_SFDP                                                     \
-       .parse_sfdp = true,                                             \
-
-#define FLAGS(_flags)                                                  \
-               .flags = (_flags),                                      \
-
-#define NO_SFDP_FLAGS(_no_sfdp_flags)                                  \
-               .no_sfdp_flags = (_no_sfdp_flags),                      \
-
-#define FIXUP_FLAGS(_fixup_flags)                                      \
-               .fixup_flags = (_fixup_flags),                          \
-
-#define MFR_FLAGS(_mfr_flags)                                          \
-               .mfr_flags = (_mfr_flags),                              \
+#define SNOR_OTP(_len, _n_regions, _base, _offset)                     \
+       (&(const struct spi_nor_otp_organization){                      \
+               .len = (_len),                                          \
+               .base = (_base),                                        \
+               .offset = (_offset),                                    \
+               .n_regions = (_n_regions),                              \
+       })
 
 /**
  * struct spi_nor_manufacturer - SPI NOR manufacturer object
@@ -631,11 +601,9 @@ struct sfdp {
 
 /* Manufacturer drivers. */
 extern const struct spi_nor_manufacturer spi_nor_atmel;
-extern const struct spi_nor_manufacturer spi_nor_catalyst;
 extern const struct spi_nor_manufacturer spi_nor_eon;
 extern const struct spi_nor_manufacturer spi_nor_esmt;
 extern const struct spi_nor_manufacturer spi_nor_everspin;
-extern const struct spi_nor_manufacturer spi_nor_fujitsu;
 extern const struct spi_nor_manufacturer spi_nor_gigadevice;
 extern const struct spi_nor_manufacturer spi_nor_intel;
 extern const struct spi_nor_manufacturer spi_nor_issi;
@@ -734,6 +702,22 @@ static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
        return container_of(mtd, struct spi_nor, mtd);
 }
 
+/**
+ * spi_nor_needs_sfdp() - returns true if SFDP parsing is used for this flash.
+ *
+ * Return: true if SFDP parsing is needed
+ */
+static inline bool spi_nor_needs_sfdp(const struct spi_nor *nor)
+{
+       /*
+        * The flash size is one property parsed by the SFDP. We use it as an
+        * indicator whether we need SFDP parsing for a particular flash. I.e.
+        * non-legacy flash entries in flash_info will have a size of zero iff
+        * SFDP should be used.
+        */
+       return !nor->info->size;
+}
+
 #ifdef CONFIG_DEBUG_FS
 void spi_nor_debugfs_register(struct spi_nor *nor);
 void spi_nor_debugfs_shutdown(void);
index 50a11053711f7bcd467f740f46bdeb1ad3d816b1..c1ddf662f782777f120df30c2e03804d9e5c3fab 100644 (file)
@@ -9,26 +9,60 @@
 #include "core.h"
 
 static const struct flash_info eon_nor_parts[] = {
-       /* EON -- en25xxx */
-       { "en25f32",    INFO(0x1c3116, 0, 64 * 1024,   64)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "en25p32",    INFO(0x1c2016, 0, 64 * 1024,   64) },
-       { "en25q32b",   INFO(0x1c3016, 0, 64 * 1024,   64) },
-       { "en25p64",    INFO(0x1c2017, 0, 64 * 1024,  128) },
-       { "en25q64",    INFO(0x1c3017, 0, 64 * 1024,  128)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "en25q80a",   INFO(0x1c3014, 0, 64 * 1024,   16)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
-       { "en25qh16",   INFO(0x1c7015, 0, 64 * 1024,   32)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
-       { "en25qh32",   INFO(0x1c7016, 0, 64 * 1024,   64) },
-       { "en25qh64",   INFO(0x1c7017, 0, 64 * 1024,  128)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
-       { "en25qh128",  INFO(0x1c7018, 0, 64 * 1024,  256) },
-       { "en25qh256",  INFO(0x1c7019, 0, 64 * 1024,  512)
-               PARSE_SFDP },
-       { "en25s64",    INFO(0x1c3817, 0, 64 * 1024,  128)
-               NO_SFDP_FLAGS(SECT_4K) },
+       {
+               .id = SNOR_ID(0x1c, 0x20, 0x16),
+               .name = "en25p32",
+               .size = SZ_4M,
+       }, {
+               .id = SNOR_ID(0x1c, 0x20, 0x17),
+               .name = "en25p64",
+               .size = SZ_8M,
+       }, {
+               .id = SNOR_ID(0x1c, 0x30, 0x14),
+               .name = "en25q80a",
+               .size = SZ_1M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+       }, {
+               .id = SNOR_ID(0x1c, 0x30, 0x16),
+               .name = "en25q32b",
+               .size = SZ_4M,
+       }, {
+               .id = SNOR_ID(0x1c, 0x30, 0x17),
+               .name = "en25q64",
+               .size = SZ_8M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0x1c, 0x31, 0x16),
+               .name = "en25f32",
+               .size = SZ_4M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .name = "en25s64",
+               .id = SNOR_ID(0x1c, 0x38, 0x17),
+               .size = SZ_8M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0x1c, 0x70, 0x15),
+               .name = "en25qh16",
+               .size = SZ_2M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+       }, {
+               .id = SNOR_ID(0x1c, 0x70, 0x16),
+               .name = "en25qh32",
+               .size = SZ_4M,
+       }, {
+               .id = SNOR_ID(0x1c, 0x70, 0x17),
+               .name = "en25qh64",
+               .size = SZ_8M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+       }, {
+               .id = SNOR_ID(0x1c, 0x70, 0x18),
+               .name = "en25qh128",
+               .size = SZ_16M,
+       }, {
+               .id = SNOR_ID(0x1c, 0x70, 0x19),
+               .name = "en25qh256",
+       },
 };
 
 const struct spi_nor_manufacturer spi_nor_eon = {
index fcc3b0e7cda9e6bf0ced412ead56ba4a4e809f49..089fcd1aa794eaf520a481318bdaa93fd96f9ea2 100644 (file)
@@ -9,16 +9,25 @@
 #include "core.h"
 
 static const struct flash_info esmt_nor_parts[] = {
-       /* ESMT */
-       { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "f25l32qa-2s", INFO(0x8c4116, 0, 64 * 1024, 64)
-               FLAGS(SPI_NOR_HAS_LOCK)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128)
-               FLAGS(SPI_NOR_HAS_LOCK)
-               NO_SFDP_FLAGS(SECT_4K) },
+       {
+               .id = SNOR_ID(0x8c, 0x20, 0x16),
+               .name = "f25l32pa",
+               .size = SZ_4M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0x8c, 0x41, 0x16),
+               .name = "f25l32qa-2s",
+               .size = SZ_4M,
+               .flags = SPI_NOR_HAS_LOCK,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0x8c, 0x41, 0x17),
+               .name = "f25l64qa",
+               .size = SZ_8M,
+               .flags = SPI_NOR_HAS_LOCK,
+               .no_sfdp_flags = SECT_4K,
+       }
 };
 
 const struct spi_nor_manufacturer spi_nor_esmt = {
index 84a07c2e0536f5835095474f61f27de8d0ec10c8..5f321e24ae7d44ed6946dba0e738918f202270ab 100644 (file)
@@ -9,11 +9,29 @@
 #include "core.h"
 
 static const struct flash_info everspin_nor_parts[] = {
-       /* Everspin */
-       { "mr25h128", CAT25_INFO(16 * 1024, 1, 256, 2) },
-       { "mr25h256", CAT25_INFO(32 * 1024, 1, 256, 2) },
-       { "mr25h10",  CAT25_INFO(128 * 1024, 1, 256, 3) },
-       { "mr25h40",  CAT25_INFO(512 * 1024, 1, 256, 3) },
+       {
+               .name = "mr25h128",
+               .size = SZ_16K,
+               .sector_size = SZ_16K,
+               .addr_nbytes = 2,
+               .flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR,
+       }, {
+               .name = "mr25h256",
+               .size = SZ_32K,
+               .sector_size = SZ_32K,
+               .addr_nbytes = 2,
+               .flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR,
+       }, {
+               .name = "mr25h10",
+               .size = SZ_128K,
+               .sector_size = SZ_128K,
+               .flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR,
+       }, {
+               .name = "mr25h40",
+               .size = SZ_512K,
+               .sector_size = SZ_512K,
+               .flags = SPI_NOR_NO_ERASE | SPI_NOR_NO_FR,
+       }
 };
 
 const struct spi_nor_manufacturer spi_nor_everspin = {
diff --git a/drivers/mtd/spi-nor/fujitsu.c b/drivers/mtd/spi-nor/fujitsu.c
deleted file mode 100644 (file)
index 69cffc5..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2005, Intec Automation Inc.
- * Copyright (C) 2014, Freescale Semiconductor, Inc.
- */
-
-#include <linux/mtd/spi-nor.h>
-
-#include "core.h"
-
-static const struct flash_info fujitsu_nor_parts[] = {
-       /* Fujitsu */
-       { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1)
-               FLAGS(SPI_NOR_NO_ERASE) },
-};
-
-const struct spi_nor_manufacturer spi_nor_fujitsu = {
-       .name = "fujitsu",
-       .parts = fujitsu_nor_parts,
-       .nparts = ARRAY_SIZE(fujitsu_nor_parts),
-};
index d57ddaf1525b3605174be1fc614f100bd94d3adf..ef1edd0add70e6ca501620798a779d621d6bb00d 100644 (file)
@@ -34,39 +34,55 @@ static const struct spi_nor_fixups gd25q256_fixups = {
 };
 
 static const struct flash_info gigadevice_nor_parts[] = {
-       { "gd25q16", INFO(0xc84015, 0, 64 * 1024,  32)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "gd25q32", INFO(0xc84016, 0, 64 * 1024,  64)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "gd25lq64c", INFO(0xc86017, 0, 64 * 1024, 128)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "gd25lq128d", INFO(0xc86018, 0, 64 * 1024, 256)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512)
-               PARSE_SFDP
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6)
-               FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
-               .fixups = &gd25q256_fixups },
+       {
+               .id = SNOR_ID(0xc8, 0x40, 0x15),
+               .name = "gd25q16",
+               .size = SZ_2M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xc8, 0x40, 0x16),
+               .name = "gd25q32",
+               .size = SZ_4M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xc8, 0x40, 0x17),
+               .name = "gd25q64",
+               .size = SZ_8M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xc8, 0x40, 0x18),
+               .name = "gd25q128",
+               .size = SZ_16M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xc8, 0x40, 0x19),
+               .name = "gd25q256",
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_TB_SR_BIT6,
+               .fixups = &gd25q256_fixups,
+               .fixup_flags = SPI_NOR_4B_OPCODES,
+       }, {
+               .id = SNOR_ID(0xc8, 0x60, 0x16),
+               .name = "gd25lq32",
+               .size = SZ_4M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xc8, 0x60, 0x17),
+               .name = "gd25lq64c",
+               .size = SZ_8M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xc8, 0x60, 0x18),
+               .name = "gd25lq128d",
+               .size = SZ_16M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       },
 };
 
 const struct spi_nor_manufacturer spi_nor_gigadevice = {
index 9179f2d09cbaea69da6db242cc5e414f191127d0..f647359fee7a8fe3ba98bb1f65180ea6719521a8 100644 (file)
@@ -9,13 +9,22 @@
 #include "core.h"
 
 static const struct flash_info intel_nor_parts[] = {
-       /* Intel/Numonyx -- xxxs33b */
-       { "160s33b",  INFO(0x898911, 0, 64 * 1024,  32)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
-       { "320s33b",  INFO(0x898912, 0, 64 * 1024,  64)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
-       { "640s33b",  INFO(0x898913, 0, 64 * 1024, 128)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE) },
+       {
+               .id = SNOR_ID(0x89, 0x89, 0x11),
+               .name = "160s33b",
+               .size = SZ_2M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+       }, {
+               .id = SNOR_ID(0x89, 0x89, 0x12),
+               .name = "320s33b",
+               .size = SZ_4M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+       }, {
+               .id = SNOR_ID(0x89, 0x89, 0x13),
+               .name = "640s33b",
+               .size = SZ_8M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+       }
 };
 
 const struct spi_nor_manufacturer spi_nor_intel = {
index accdf7aa2bfdea90dab2c1cb3a5b92cc564ddb90..18d9a00aa22eb2819986eaa8c31f007f02932e89 100644 (file)
@@ -47,48 +47,86 @@ static const struct spi_nor_fixups pm25lv_nor_fixups = {
 };
 
 static const struct flash_info issi_nor_parts[] = {
-       /* ISSI */
-       { "is25cd512",  INFO(0x7f9d20, 0, 32 * 1024,   2)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "is25lq040b", INFO(0x9d4013, 0, 64 * 1024,   8)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-       { "is25lp016d", INFO(0x9d6015, 0, 64 * 1024,  32)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-       { "is25lp080d", INFO(0x9d6014, 0, 64 * 1024,  16)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-       { "is25lp032",  INFO(0x9d6016, 0, 64 * 1024,  64)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
-       { "is25lp064",  INFO(0x9d6017, 0, 64 * 1024, 128)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
-       { "is25lp128",  INFO(0x9d6018, 0, 64 * 1024, 256)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
-       { "is25lp256",  INFO(0x9d6019, 0, 64 * 1024, 512)
-               PARSE_SFDP
-               FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
-               .fixups = &is25lp256_fixups },
-       { "is25wp032",  INFO(0x9d7016, 0, 64 * 1024,  64)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-       { "is25wp064",  INFO(0x9d7017, 0, 64 * 1024, 128)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-       { "is25wp128",  INFO(0x9d7018, 0, 64 * 1024, 256)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-       { "is25wp256", INFO(0x9d7019, 0, 0, 0)
-               PARSE_SFDP
-               FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
-               FLAGS(SPI_NOR_QUAD_PP)
-               .fixups = &is25lp256_fixups },
-
-       /* PMC */
-       { "pm25lv512",   INFO(0,        0, 32 * 1024,    2)
-               NO_SFDP_FLAGS(SECT_4K)
+       {
+               .name = "pm25lv512",
+               .sector_size = SZ_32K,
+               .size = SZ_64K,
+               .no_sfdp_flags = SECT_4K,
                .fixups = &pm25lv_nor_fixups
-       },
-       { "pm25lv010",   INFO(0,        0, 32 * 1024,    4)
-               NO_SFDP_FLAGS(SECT_4K)
+       }, {
+               .name = "pm25lv010",
+               .sector_size = SZ_32K,
+               .size = SZ_128K,
+               .no_sfdp_flags = SECT_4K,
                .fixups = &pm25lv_nor_fixups
-       },
-       { "pm25lq032",   INFO(0x7f9d46, 0, 64 * 1024,   64)
-               NO_SFDP_FLAGS(SECT_4K) },
+       }, {
+               .id = SNOR_ID(0x7f, 0x9d, 0x20),
+               .name = "is25cd512",
+               .sector_size = SZ_32K,
+               .size = SZ_64K,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0x7f, 0x9d, 0x46),
+               .name = "pm25lq032",
+               .size = SZ_4M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0x9d, 0x40, 0x13),
+               .name = "is25lq040b",
+               .size = SZ_512K,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0x9d, 0x60, 0x14),
+               .name = "is25lp080d",
+               .size = SZ_1M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0x9d, 0x60, 0x15),
+               .name = "is25lp016d",
+               .size = SZ_2M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0x9d, 0x60, 0x16),
+               .name = "is25lp032",
+               .size = SZ_4M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+       }, {
+               .id = SNOR_ID(0x9d, 0x60, 0x17),
+               .name = "is25lp064",
+               .size = SZ_8M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+       }, {
+               .id = SNOR_ID(0x9d, 0x60, 0x18),
+               .name = "is25lp128",
+               .size = SZ_16M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+       }, {
+               .id = SNOR_ID(0x9d, 0x60, 0x19),
+               .name = "is25lp256",
+               .fixups = &is25lp256_fixups,
+               .fixup_flags = SPI_NOR_4B_OPCODES,
+       }, {
+               .id = SNOR_ID(0x9d, 0x70, 0x16),
+               .name = "is25wp032",
+               .size = SZ_4M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0x9d, 0x70, 0x17),
+               .size = SZ_8M,
+               .name = "is25wp064",
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0x9d, 0x70, 0x18),
+               .name = "is25wp128",
+               .size = SZ_16M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0x9d, 0x70, 0x19),
+               .name = "is25wp256",
+               .flags = SPI_NOR_QUAD_PP,
+               .fixups = &is25lp256_fixups,
+               .fixup_flags = SPI_NOR_4B_OPCODES,
+       }
 };
 
 static void issi_nor_default_init(struct spi_nor *nor)
index eb149e517c1fe7ac458db1f2f08a58d556fd1f8f..ea6be95e75a52602bd91b24d7abebb1efe657174 100644 (file)
@@ -33,76 +33,156 @@ static const struct spi_nor_fixups mx25l25635_fixups = {
 };
 
 static const struct flash_info macronix_nor_parts[] = {
-       /* Macronix */
-       { "mx25l512e",   INFO(0xc22010, 0, 64 * 1024,   1)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "mx25l2005a",  INFO(0xc22012, 0, 64 * 1024,   4)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "mx25l4005a",  INFO(0xc22013, 0, 64 * 1024,   8)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "mx25l8005",   INFO(0xc22014, 0, 64 * 1024,  16) },
-       { "mx25l1606e",  INFO(0xc22015, 0, 64 * 1024,  32)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "mx25l3205d",  INFO(0xc22016, 0, 64 * 1024,  64)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "mx25l3255e",  INFO(0xc29e16, 0, 64 * 1024,  64)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "mx25l6405d",  INFO(0xc22017, 0, 64 * 1024, 128)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "mx25u2033e",  INFO(0xc22532, 0, 64 * 1024,   4)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "mx25u3235f",  INFO(0xc22536, 0, 64 * 1024,  64)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "mx25u4035",   INFO(0xc22533, 0, 64 * 1024,   8)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "mx25u8035",   INFO(0xc22534, 0, 64 * 1024,  16)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "mx25u6435f",  INFO(0xc22537, 0, 64 * 1024, 128)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256) },
-       { "mx25r1635f",  INFO(0xc22815, 0, 64 * 1024,  32)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "mx25r3235f",  INFO(0xc22816, 0, 64 * 1024,  64)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "mx25u12835f", INFO(0xc22538, 0, 64 * 1024, 256)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512)
-               NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               .fixups = &mx25l25635_fixups },
-       { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512)
-               NO_SFDP_FLAGS(SECT_4K)
-               FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
-       { "mx25u51245g", INFO(0xc2253a, 0, 64 * 1024, 1024)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
-       { "mx25uw51245g", INFOB(0xc2813a, 0, 0, 0, 4)
-               PARSE_SFDP
-               FLAGS(SPI_NOR_RWW) },
-       { "mx25v8035f",  INFO(0xc22314, 0, 64 * 1024,  16)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512) },
-       { "mx66l51235f", INFO(0xc2201a, 0, 64 * 1024, 1024)
-               NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
-       { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
-       { "mx66l1g45g",  INFO(0xc2201b, 0, 64 * 1024, 2048)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "mx66l1g55g",  INFO(0xc2261b, 0, 64 * 1024, 2048)
-               NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
-       { "mx66u2g45g",  INFO(0xc2253c, 0, 64 * 1024, 4096)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
+       {
+               .id = SNOR_ID(0xc2, 0x20, 0x10),
+               .name = "mx25l512e",
+               .size = SZ_64K,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xc2, 0x20, 0x12),
+               .name = "mx25l2005a",
+               .size = SZ_256K,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xc2, 0x20, 0x13),
+               .name = "mx25l4005a",
+               .size = SZ_512K,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xc2, 0x20, 0x14),
+               .name = "mx25l8005",
+               .size = SZ_1M,
+       }, {
+               .id = SNOR_ID(0xc2, 0x20, 0x15),
+               .name = "mx25l1606e",
+               .size = SZ_2M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xc2, 0x20, 0x16),
+               .name = "mx25l3205d",
+               .size = SZ_4M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xc2, 0x20, 0x17),
+               .name = "mx25l6405d",
+               .size = SZ_8M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xc2, 0x20, 0x18),
+               .name = "mx25l12805d",
+               .size = SZ_16M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xc2, 0x20, 0x19),
+               .name = "mx25l25635e",
+               .size = SZ_32M,
+               .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .fixups = &mx25l25635_fixups
+       }, {
+               .id = SNOR_ID(0xc2, 0x20, 0x1a),
+               .name = "mx66l51235f",
+               .size = SZ_64M,
+               .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .fixup_flags = SPI_NOR_4B_OPCODES,
+       }, {
+               .id = SNOR_ID(0xc2, 0x20, 0x1b),
+               .name = "mx66l1g45g",
+               .size = SZ_128M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xc2, 0x23, 0x14),
+               .name = "mx25v8035f",
+               .size = SZ_1M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xc2, 0x25, 0x32),
+               .name = "mx25u2033e",
+               .size = SZ_256K,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xc2, 0x25, 0x33),
+               .name = "mx25u4035",
+               .size = SZ_512K,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xc2, 0x25, 0x34),
+               .name = "mx25u8035",
+               .size = SZ_1M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xc2, 0x25, 0x36),
+               .name = "mx25u3235f",
+               .size = SZ_4M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xc2, 0x25, 0x37),
+               .name = "mx25u6435f",
+               .size = SZ_8M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xc2, 0x25, 0x38),
+               .name = "mx25u12835f",
+               .size = SZ_16M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xc2, 0x25, 0x39),
+               .name = "mx25u25635f",
+               .size = SZ_32M,
+               .no_sfdp_flags = SECT_4K,
+               .fixup_flags = SPI_NOR_4B_OPCODES,
+       }, {
+               .id = SNOR_ID(0xc2, 0x25, 0x3a),
+               .name = "mx25u51245g",
+               .size = SZ_64M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .fixup_flags = SPI_NOR_4B_OPCODES,
+       }, {
+               .id = SNOR_ID(0xc2, 0x25, 0x3a),
+               .name = "mx66u51235f",
+               .size = SZ_64M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .fixup_flags = SPI_NOR_4B_OPCODES,
+       }, {
+               .id = SNOR_ID(0xc2, 0x25, 0x3c),
+               .name = "mx66u2g45g",
+               .size = SZ_256M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .fixup_flags = SPI_NOR_4B_OPCODES,
+       }, {
+               .id = SNOR_ID(0xc2, 0x26, 0x18),
+               .name = "mx25l12855e",
+               .size = SZ_16M,
+       }, {
+               .id = SNOR_ID(0xc2, 0x26, 0x19),
+               .name = "mx25l25655e",
+               .size = SZ_32M,
+       }, {
+               .id = SNOR_ID(0xc2, 0x26, 0x1b),
+               .name = "mx66l1g55g",
+               .size = SZ_128M,
+               .no_sfdp_flags = SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xc2, 0x28, 0x15),
+               .name = "mx25r1635f",
+               .size = SZ_2M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xc2, 0x28, 0x16),
+               .name = "mx25r3235f",
+               .size = SZ_4M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xc2, 0x81, 0x3a),
+               .name = "mx25uw51245g",
+               .n_banks = 4,
+               .flags = SPI_NOR_RWW,
+       }, {
+               .id = SNOR_ID(0xc2, 0x9e, 0x16),
+               .name = "mx25l3255e",
+               .size = SZ_4M,
+               .no_sfdp_flags = SECT_4K,
+       }
 };
 
 static void macronix_nor_default_init(struct spi_nor *nor)
index 6ad080c52ab5d6734dd80d5a58409da5357e4164..8920547c12bfd6b7eaad4f8cec9b9294deec8b20 100644 (file)
@@ -78,7 +78,7 @@ static int micron_st_nor_octal_dtr_en(struct spi_nor *nor)
                return ret;
        }
 
-       if (memcmp(buf, nor->info->id, nor->info->id_len))
+       if (memcmp(buf, nor->info->id->bytes, nor->info->id->len))
                return -EINVAL;
 
        return 0;
@@ -114,7 +114,7 @@ static int micron_st_nor_octal_dtr_dis(struct spi_nor *nor)
                return ret;
        }
 
-       if (memcmp(buf, nor->info->id, nor->info->id_len))
+       if (memcmp(buf, nor->info->id->bytes, nor->info->id->len))
                return -EINVAL;
 
        return 0;
@@ -159,148 +159,291 @@ static const struct spi_nor_fixups mt35xu512aba_fixups = {
 };
 
 static const struct flash_info micron_nor_parts[] = {
-       { "mt35xu512aba", INFO(0x2c5b1a, 0, 128 * 1024, 512)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_OCTAL_READ |
-                          SPI_NOR_OCTAL_DTR_READ | SPI_NOR_OCTAL_DTR_PP)
-               FIXUP_FLAGS(SPI_NOR_4B_OPCODES | SPI_NOR_IO_MODE_EN_VOLATILE)
-               MFR_FLAGS(USE_FSR)
-               .fixups = &mt35xu512aba_fixups
-       },
-       { "mt35xu02g", INFO(0x2c5b1c, 0, 128 * 1024, 2048)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_OCTAL_READ)
-               FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
-               MFR_FLAGS(USE_FSR)
+       {
+               .id = SNOR_ID(0x2c, 0x5b, 0x1a),
+               .name = "mt35xu512aba",
+               .sector_size = SZ_128K,
+               .size = SZ_64M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_OCTAL_READ |
+                                SPI_NOR_OCTAL_DTR_READ | SPI_NOR_OCTAL_DTR_PP,
+               .mfr_flags = USE_FSR,
+               .fixup_flags = SPI_NOR_4B_OPCODES | SPI_NOR_IO_MODE_EN_VOLATILE,
+               .fixups = &mt35xu512aba_fixups,
+       }, {
+               .id = SNOR_ID(0x2c, 0x5b, 0x1c),
+               .name = "mt35xu02g",
+               .sector_size = SZ_128K,
+               .size = SZ_256M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_OCTAL_READ,
+               .mfr_flags = USE_FSR,
+               .fixup_flags = SPI_NOR_4B_OPCODES,
        },
 };
 
-static const struct flash_info st_nor_parts[] = {
-       { "n25q016a",    INFO(0x20bb15, 0, 64 * 1024,   32)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
-       { "n25q032",     INFO(0x20ba16, 0, 64 * 1024,   64)
-               NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
-       { "n25q032a",    INFO(0x20bb16, 0, 64 * 1024,   64)
-               NO_SFDP_FLAGS(SPI_NOR_QUAD_READ) },
-       { "n25q064",     INFO(0x20ba17, 0, 64 * 1024,  128)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
-       { "n25q064a",    INFO(0x20bb17, 0, 64 * 1024,  128)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ) },
-       { "n25q128a11",  INFO(0x20bb18, 0, 64 * 1024,  256)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
-                     SPI_NOR_BP3_SR_BIT6)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
-               MFR_FLAGS(USE_FSR)
-       },
-       { "n25q128a13",  INFO(0x20ba18, 0, 64 * 1024,  256)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
-                     SPI_NOR_BP3_SR_BIT6)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
-               MFR_FLAGS(USE_FSR)
-       },
-       { "mt25ql256a",  INFO6(0x20ba19, 0x104400, 64 * 1024,  512)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
-               MFR_FLAGS(USE_FSR)
-       },
-       { "n25q256a",    INFO(0x20ba19, 0, 64 * 1024,  512)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ)
-               MFR_FLAGS(USE_FSR)
-       },
-       { "mt25qu256a",  INFO6(0x20bb19, 0x104400, 64 * 1024,  512)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
-                     SPI_NOR_BP3_SR_BIT6)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
-               MFR_FLAGS(USE_FSR)
-       },
-       { "n25q256ax1",  INFO(0x20bb19, 0, 64 * 1024,  512)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
-               MFR_FLAGS(USE_FSR)
-       },
-       { "mt25ql512a",  INFO6(0x20ba20, 0x104400, 64 * 1024, 1024)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
-               MFR_FLAGS(USE_FSR)
-       },
-       { "n25q512ax3",  INFO(0x20ba20, 0, 64 * 1024, 1024)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
-                     SPI_NOR_BP3_SR_BIT6)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
-               MFR_FLAGS(USE_FSR)
-       },
-       { "mt25qu512a",  INFO6(0x20bb20, 0x104400, 64 * 1024, 1024)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               FIXUP_FLAGS(SPI_NOR_4B_OPCODES)
-               MFR_FLAGS(USE_FSR)
-       },
-       { "n25q512a",    INFO(0x20bb20, 0, 64 * 1024, 1024)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
-                     SPI_NOR_BP3_SR_BIT6)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
-               MFR_FLAGS(USE_FSR)
-       },
-       { "n25q00",      INFO(0x20ba21, 0, 64 * 1024, 2048)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
-                     SPI_NOR_BP3_SR_BIT6 | NO_CHIP_ERASE)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
-               MFR_FLAGS(USE_FSR)
-       },
-       { "n25q00a",     INFO(0x20bb21, 0, 64 * 1024, 2048)
-               FLAGS(NO_CHIP_ERASE)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
-               MFR_FLAGS(USE_FSR)
-       },
-       { "mt25ql02g",   INFO(0x20ba22, 0, 64 * 1024, 4096)
-               FLAGS(NO_CHIP_ERASE)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ)
-               MFR_FLAGS(USE_FSR)
-       },
-       { "mt25qu02g",   INFO(0x20bb22, 0, 64 * 1024, 4096)
-               FLAGS(NO_CHIP_ERASE)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ)
-               MFR_FLAGS(USE_FSR)
-       },
+static int mt25qu512a_post_bfpt_fixup(struct spi_nor *nor,
+                                     const struct sfdp_parameter_header *bfpt_header,
+                                     const struct sfdp_bfpt *bfpt)
+{
+       nor->flags &= ~SNOR_F_HAS_16BIT_SR;
+       return 0;
+}
+
+static struct spi_nor_fixups mt25qu512a_fixups = {
+       .post_bfpt = mt25qu512a_post_bfpt_fixup,
+};
 
-       { "m25p05",  INFO(0x202010,  0,  32 * 1024,   2) },
-       { "m25p10",  INFO(0x202011,  0,  32 * 1024,   4) },
-       { "m25p20",  INFO(0x202012,  0,  64 * 1024,   4) },
-       { "m25p40",  INFO(0x202013,  0,  64 * 1024,   8) },
-       { "m25p80",  INFO(0x202014,  0,  64 * 1024,  16) },
-       { "m25p16",  INFO(0x202015,  0,  64 * 1024,  32) },
-       { "m25p32",  INFO(0x202016,  0,  64 * 1024,  64) },
-       { "m25p64",  INFO(0x202017,  0,  64 * 1024, 128) },
-       { "m25p128", INFO(0x202018,  0, 256 * 1024,  64) },
-
-       { "m25p05-nonjedec",  INFO(0, 0,  32 * 1024,   2) },
-       { "m25p10-nonjedec",  INFO(0, 0,  32 * 1024,   4) },
-       { "m25p20-nonjedec",  INFO(0, 0,  64 * 1024,   4) },
-       { "m25p40-nonjedec",  INFO(0, 0,  64 * 1024,   8) },
-       { "m25p80-nonjedec",  INFO(0, 0,  64 * 1024,  16) },
-       { "m25p16-nonjedec",  INFO(0, 0,  64 * 1024,  32) },
-       { "m25p32-nonjedec",  INFO(0, 0,  64 * 1024,  64) },
-       { "m25p64-nonjedec",  INFO(0, 0,  64 * 1024, 128) },
-       { "m25p128-nonjedec", INFO(0, 0, 256 * 1024,  64) },
-
-       { "m45pe10", INFO(0x204011,  0, 64 * 1024,    2) },
-       { "m45pe80", INFO(0x204014,  0, 64 * 1024,   16) },
-       { "m45pe16", INFO(0x204015,  0, 64 * 1024,   32) },
-
-       { "m25pe20", INFO(0x208012,  0, 64 * 1024,  4) },
-       { "m25pe80", INFO(0x208014,  0, 64 * 1024, 16) },
-       { "m25pe16", INFO(0x208015,  0, 64 * 1024, 32)
-               NO_SFDP_FLAGS(SECT_4K) },
-
-       { "m25px16",    INFO(0x207115,  0, 64 * 1024, 32)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "m25px32",    INFO(0x207116,  0, 64 * 1024, 64)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "m25px32-s0", INFO(0x207316,  0, 64 * 1024, 64)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "m25px32-s1", INFO(0x206316,  0, 64 * 1024, 64)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "m25px64",    INFO(0x207117,  0, 64 * 1024, 128) },
-       { "m25px80",    INFO(0x207114,  0, 64 * 1024, 16) },
+static const struct flash_info st_nor_parts[] = {
+       {
+               .name = "m25p05-nonjedec",
+               .sector_size = SZ_32K,
+               .size = SZ_64K,
+       }, {
+               .name = "m25p10-nonjedec",
+               .sector_size = SZ_32K,
+               .size = SZ_128K,
+       }, {
+               .name = "m25p20-nonjedec",
+               .size = SZ_256K,
+       }, {
+               .name = "m25p40-nonjedec",
+               .size = SZ_512K,
+       }, {
+               .name = "m25p80-nonjedec",
+               .size = SZ_1M,
+       }, {
+               .name = "m25p16-nonjedec",
+               .size = SZ_2M,
+       }, {
+               .name = "m25p32-nonjedec",
+               .size = SZ_4M,
+       }, {
+               .name = "m25p64-nonjedec",
+               .size = SZ_8M,
+       }, {
+               .name = "m25p128-nonjedec",
+               .sector_size = SZ_256K,
+               .size = SZ_16M,
+       }, {
+               .id = SNOR_ID(0x20, 0x20, 0x10),
+               .name = "m25p05",
+               .sector_size = SZ_32K,
+               .size = SZ_64K,
+       }, {
+               .id = SNOR_ID(0x20, 0x20, 0x11),
+               .name = "m25p10",
+               .sector_size = SZ_32K,
+               .size = SZ_128K,
+       }, {
+               .id = SNOR_ID(0x20, 0x20, 0x12),
+               .name = "m25p20",
+               .size = SZ_256K,
+       }, {
+               .id = SNOR_ID(0x20, 0x20, 0x13),
+               .name = "m25p40",
+               .size = SZ_512K,
+       }, {
+               .id = SNOR_ID(0x20, 0x20, 0x14),
+               .name = "m25p80",
+               .size = SZ_1M,
+       }, {
+               .id = SNOR_ID(0x20, 0x20, 0x15),
+               .name = "m25p16",
+               .size = SZ_2M,
+       }, {
+               .id = SNOR_ID(0x20, 0x20, 0x16),
+               .name = "m25p32",
+               .size = SZ_4M,
+       }, {
+               .id = SNOR_ID(0x20, 0x20, 0x17),
+               .name = "m25p64",
+               .size = SZ_8M,
+       }, {
+               .id = SNOR_ID(0x20, 0x20, 0x18),
+               .name = "m25p128",
+               .sector_size = SZ_256K,
+               .size = SZ_16M,
+       }, {
+               .id = SNOR_ID(0x20, 0x40, 0x11),
+               .name = "m45pe10",
+               .size = SZ_128K,
+       }, {
+               .id = SNOR_ID(0x20, 0x40, 0x14),
+               .name = "m45pe80",
+               .size = SZ_1M,
+       }, {
+               .id = SNOR_ID(0x20, 0x40, 0x15),
+               .name = "m45pe16",
+               .size = SZ_2M,
+       }, {
+               .id = SNOR_ID(0x20, 0x63, 0x16),
+               .name = "m25px32-s1",
+               .size = SZ_4M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0x20, 0x71, 0x14),
+               .name = "m25px80",
+               .size = SZ_1M,
+       }, {
+               .id = SNOR_ID(0x20, 0x71, 0x15),
+               .name = "m25px16",
+               .size = SZ_2M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0x20, 0x71, 0x16),
+               .name = "m25px32",
+               .size = SZ_4M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0x20, 0x71, 0x17),
+               .name = "m25px64",
+               .size = SZ_8M,
+       }, {
+               .id = SNOR_ID(0x20, 0x73, 0x16),
+               .name = "m25px32-s0",
+               .size = SZ_4M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0x20, 0x80, 0x12),
+               .name = "m25pe20",
+               .size = SZ_256K,
+       }, {
+               .id = SNOR_ID(0x20, 0x80, 0x14),
+               .name = "m25pe80",
+               .size = SZ_1M,
+       }, {
+               .id = SNOR_ID(0x20, 0x80, 0x15),
+               .name = "m25pe16",
+               .size = SZ_2M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0x20, 0xba, 0x16),
+               .name = "n25q032",
+               .size = SZ_4M,
+               .no_sfdp_flags = SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0x20, 0xba, 0x17),
+               .name = "n25q064",
+               .size = SZ_8M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0x20, 0xba, 0x18),
+               .name = "n25q128a13",
+               .size = SZ_16M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+                        SPI_NOR_BP3_SR_BIT6,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+               .mfr_flags = USE_FSR,
+       }, {
+               .id = SNOR_ID(0x20, 0xba, 0x19, 0x10, 0x44, 0x00),
+               .name = "mt25ql256a",
+               .size = SZ_32M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .fixup_flags = SPI_NOR_4B_OPCODES,
+               .mfr_flags = USE_FSR,
+       }, {
+               .id = SNOR_ID(0x20, 0xba, 0x19),
+               .name = "n25q256a",
+               .size = SZ_32M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .mfr_flags = USE_FSR,
+       }, {
+               .id = SNOR_ID(0x20, 0xba, 0x20, 0x10, 0x44, 0x00),
+               .name = "mt25ql512a",
+               .size = SZ_64M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .fixup_flags = SPI_NOR_4B_OPCODES,
+               .mfr_flags = USE_FSR,
+       }, {
+               .id = SNOR_ID(0x20, 0xba, 0x20),
+               .name = "n25q512ax3",
+               .size = SZ_64M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+                        SPI_NOR_BP3_SR_BIT6,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+               .mfr_flags = USE_FSR,
+       }, {
+               .id = SNOR_ID(0x20, 0xba, 0x21),
+               .name = "n25q00",
+               .size = SZ_128M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+                        SPI_NOR_BP3_SR_BIT6 | NO_CHIP_ERASE,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+               .mfr_flags = USE_FSR,
+       }, {
+               .id = SNOR_ID(0x20, 0xba, 0x22),
+               .name = "mt25ql02g",
+               .size = SZ_256M,
+               .flags = NO_CHIP_ERASE,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+               .mfr_flags = USE_FSR,
+       }, {
+               .id = SNOR_ID(0x20, 0xbb, 0x15),
+               .name = "n25q016a",
+               .size = SZ_2M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0x20, 0xbb, 0x16),
+               .name = "n25q032a",
+               .size = SZ_4M,
+               .no_sfdp_flags = SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0x20, 0xbb, 0x17),
+               .name = "n25q064a",
+               .size = SZ_8M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0x20, 0xbb, 0x18),
+               .name = "n25q128a11",
+               .size = SZ_16M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+                        SPI_NOR_BP3_SR_BIT6,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+               .mfr_flags = USE_FSR,
+       }, {
+               .id = SNOR_ID(0x20, 0xbb, 0x19, 0x10, 0x44, 0x00),
+               .name = "mt25qu256a",
+               .size = SZ_32M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+                        SPI_NOR_BP3_SR_BIT6,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .fixup_flags = SPI_NOR_4B_OPCODES,
+               .mfr_flags = USE_FSR,
+       }, {
+               .id = SNOR_ID(0x20, 0xbb, 0x19),
+               .name = "n25q256ax1",
+               .size = SZ_32M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+               .mfr_flags = USE_FSR,
+       }, {
+               .id = SNOR_ID(0x20, 0xbb, 0x20, 0x10, 0x44, 0x00),
+               .name = "mt25qu512a",
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+                        SPI_NOR_BP3_SR_BIT6,
+               .mfr_flags = USE_FSR,
+               .fixups = &mt25qu512a_fixups,
+       }, {
+               .id = SNOR_ID(0x20, 0xbb, 0x20),
+               .name = "n25q512a",
+               .size = SZ_64M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB | SPI_NOR_4BIT_BP |
+                        SPI_NOR_BP3_SR_BIT6,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+               .mfr_flags = USE_FSR,
+       }, {
+               .id = SNOR_ID(0x20, 0xbb, 0x21),
+               .name = "n25q00a",
+               .size = SZ_128M,
+               .flags = NO_CHIP_ERASE,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_QUAD_READ,
+               .mfr_flags = USE_FSR,
+       }, {
+               .id = SNOR_ID(0x20, 0xbb, 0x22),
+               .name = "mt25qu02g",
+               .size = SZ_256M,
+               .flags = NO_CHIP_ERASE,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .mfr_flags = USE_FSR,
+       }
 };
 
 /**
index 709822fced867f82659b482c718b846261d44c02..12921344373dad66e60a5151cce9e68fb7e1aa6b 100644 (file)
@@ -228,7 +228,7 @@ static int cypress_nor_octal_dtr_en(struct spi_nor *nor)
                return ret;
        }
 
-       if (memcmp(buf, nor->info->id, nor->info->id_len))
+       if (memcmp(buf, nor->info->id->bytes, nor->info->id->len))
                return -EINVAL;
 
        return 0;
@@ -272,7 +272,7 @@ static int cypress_nor_octal_dtr_dis(struct spi_nor *nor)
                return ret;
        }
 
-       if (memcmp(buf, nor->info->id, nor->info->id_len))
+       if (memcmp(buf, nor->info->id->bytes, nor->info->id->len))
                return -EINVAL;
 
        return 0;
@@ -756,155 +756,252 @@ static const struct spi_nor_fixups s25fs_s_nor_fixups = {
 };
 
 static const struct flash_info spansion_nor_parts[] = {
-       /* Spansion/Cypress -- single (large) sector size only, at least
-        * for the chips listed here (without boot sectors).
-        */
-       { "s25sl032p",  INFO(0x010215, 0x4d00,  64 * 1024,  64)
-               NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-       { "s25sl064p",  INFO(0x010216, 0x4d00,  64 * 1024, 128)
-               NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-       { "s25fl128s0", INFO6(0x012018, 0x4d0080, 256 * 1024, 64)
-               NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               MFR_FLAGS(USE_CLSR)
-       },
-       { "s25fl128s1", INFO6(0x012018, 0x4d0180, 64 * 1024, 256)
-               NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               MFR_FLAGS(USE_CLSR)
-       },
-       { "s25fl256s0", INFO6(0x010219, 0x4d0080, 256 * 1024, 128)
-               NO_SFDP_FLAGS(SPI_NOR_SKIP_SFDP | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ)
-               MFR_FLAGS(USE_CLSR)
-       },
-       { "s25fl256s1", INFO6(0x010219, 0x4d0180, 64 * 1024, 512)
-               NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               MFR_FLAGS(USE_CLSR)
-       },
-       { "s25fl512s",  INFO6(0x010220, 0x4d0080, 256 * 1024, 256)
-               FLAGS(SPI_NOR_HAS_LOCK)
-               NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               MFR_FLAGS(USE_CLSR)
-       },
-       { "s25fs128s1", INFO6(0x012018, 0x4d0181, 64 * 1024, 256)
-               NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               MFR_FLAGS(USE_CLSR)
-               .fixups = &s25fs_s_nor_fixups, },
-       { "s25fs256s0", INFO6(0x010219, 0x4d0081, 256 * 1024, 128)
-               NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               MFR_FLAGS(USE_CLSR)
-       },
-       { "s25fs256s1", INFO6(0x010219, 0x4d0181, 64 * 1024, 512)
-               NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               MFR_FLAGS(USE_CLSR)
-       },
-       { "s25fs512s",  INFO6(0x010220, 0x4d0081, 256 * 1024, 256)
-               NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               MFR_FLAGS(USE_CLSR)
-               .fixups = &s25fs_s_nor_fixups, },
-       { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024,  64) },
-       { "s25sl12801", INFO(0x012018, 0x0301,  64 * 1024, 256) },
-       { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024,  64)
-               NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               MFR_FLAGS(USE_CLSR)
-       },
-       { "s25fl129p1", INFO(0x012018, 0x4d01,  64 * 1024, 256)
-               NO_SFDP_FLAGS(SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               MFR_FLAGS(USE_CLSR)
-       },
-       { "s25sl004a",  INFO(0x010212,      0,  64 * 1024,   8) },
-       { "s25sl008a",  INFO(0x010213,      0,  64 * 1024,  16) },
-       { "s25sl016a",  INFO(0x010214,      0,  64 * 1024,  32) },
-       { "s25sl032a",  INFO(0x010215,      0,  64 * 1024,  64) },
-       { "s25sl064a",  INFO(0x010216,      0,  64 * 1024, 128) },
-       { "s25fl004k",  INFO(0xef4013,      0,  64 * 1024,   8)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "s25fl008k",  INFO(0xef4014,      0,  64 * 1024,  16)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "s25fl016k",  INFO(0xef4015,      0,  64 * 1024,  32)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "s25fl064k",  INFO(0xef4017,      0,  64 * 1024, 128)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "s25fl116k",  INFO(0x014015,      0,  64 * 1024,  32)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "s25fl132k",  INFO(0x014016,      0,  64 * 1024,  64)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "s25fl164k",  INFO(0x014017,      0,  64 * 1024, 128)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "s25fl204k",  INFO(0x014013,      0,  64 * 1024,   8)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
-       { "s25fl208k",  INFO(0x014014,      0,  64 * 1024,  16)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
-       { "s25fl064l",  INFO(0x016017,      0,  64 * 1024, 128)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
-       { "s25fl128l",  INFO(0x016018,      0,  64 * 1024, 256)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
-       { "s25fl256l",  INFO(0x016019,      0,  64 * 1024, 512)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               FIXUP_FLAGS(SPI_NOR_4B_OPCODES) },
-       { "s25fs256t",  INFO6(0x342b19, 0x0f0890, 0, 0)
-               PARSE_SFDP
-               MFR_FLAGS(USE_CLPEF)
-               .fixups = &s25fs256t_fixups },
-       { "s25hl512t",  INFO6(0x342a1a, 0x0f0390, 0, 0)
-               PARSE_SFDP
-               MFR_FLAGS(USE_CLPEF)
-               .fixups = &s25hx_t_fixups },
-       { "s25hl01gt",  INFO6(0x342a1b, 0x0f0390, 0, 0)
-               PARSE_SFDP
-               MFR_FLAGS(USE_CLPEF)
-               .fixups = &s25hx_t_fixups },
-       { "s25hl02gt",  INFO6(0x342a1c, 0x0f0090, 0, 0)
-               PARSE_SFDP
-               MFR_FLAGS(USE_CLPEF)
-               FLAGS(NO_CHIP_ERASE)
-               .fixups = &s25hx_t_fixups },
-       { "s25hs512t",  INFO6(0x342b1a, 0x0f0390, 0, 0)
-               PARSE_SFDP
-               MFR_FLAGS(USE_CLPEF)
-               .fixups = &s25hx_t_fixups },
-       { "s25hs01gt",  INFO6(0x342b1b, 0x0f0390, 0, 0)
-               PARSE_SFDP
-               MFR_FLAGS(USE_CLPEF)
-               .fixups = &s25hx_t_fixups },
-       { "s25hs02gt",  INFO6(0x342b1c, 0x0f0090, 0, 0)
-               PARSE_SFDP
-               MFR_FLAGS(USE_CLPEF)
-               FLAGS(NO_CHIP_ERASE)
-               .fixups = &s25hx_t_fixups },
-       { "cy15x104q",  INFO6(0x042cc2, 0x7f7f7f, 512 * 1024, 1)
-               FLAGS(SPI_NOR_NO_ERASE) },
-       { "s28hl512t",   INFO(0x345a1a,      0, 0, 0)
-               PARSE_SFDP
-               MFR_FLAGS(USE_CLPEF)
+       {
+               .id = SNOR_ID(0x01, 0x02, 0x12),
+               .name = "s25sl004a",
+               .size = SZ_512K,
+       }, {
+               .id = SNOR_ID(0x01, 0x02, 0x13),
+               .name = "s25sl008a",
+               .size = SZ_1M,
+       }, {
+               .id = SNOR_ID(0x01, 0x02, 0x14),
+               .name = "s25sl016a",
+               .size = SZ_2M,
+       }, {
+               .id = SNOR_ID(0x01, 0x02, 0x15, 0x4d, 0x00),
+               .name = "s25sl032p",
+               .size = SZ_4M,
+               .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0x01, 0x02, 0x15),
+               .name = "s25sl032a",
+               .size = SZ_4M,
+       }, {
+               .id = SNOR_ID(0x01, 0x02, 0x16, 0x4d, 0x00),
+               .name = "s25sl064p",
+               .size = SZ_8M,
+               .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0x01, 0x02, 0x16),
+               .name = "s25sl064a",
+               .size = SZ_8M,
+       }, {
+               .id = SNOR_ID(0x01, 0x02, 0x19, 0x4d, 0x00, 0x80),
+               .name = "s25fl256s0",
+               .size = SZ_32M,
+               .sector_size = SZ_256K,
+               .no_sfdp_flags = SPI_NOR_SKIP_SFDP | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .mfr_flags = USE_CLSR,
+       }, {
+               .id = SNOR_ID(0x01, 0x02, 0x19, 0x4d, 0x00, 0x81),
+               .name = "s25fs256s0",
+               .size = SZ_32M,
+               .sector_size = SZ_256K,
+               .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .mfr_flags = USE_CLSR,
+       }, {
+               .id = SNOR_ID(0x01, 0x02, 0x19, 0x4d, 0x01, 0x80),
+               .name = "s25fl256s1",
+               .size = SZ_32M,
+               .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .mfr_flags = USE_CLSR,
+       }, {
+               .id = SNOR_ID(0x01, 0x02, 0x19, 0x4d, 0x01, 0x81),
+               .name = "s25fs256s1",
+               .size = SZ_32M,
+               .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .mfr_flags = USE_CLSR,
+       }, {
+               .id = SNOR_ID(0x01, 0x02, 0x20, 0x4d, 0x00, 0x80),
+               .name = "s25fl512s",
+               .size = SZ_64M,
+               .sector_size = SZ_256K,
+               .flags = SPI_NOR_HAS_LOCK,
+               .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .mfr_flags = USE_CLSR,
+       }, {
+               .id = SNOR_ID(0x01, 0x02, 0x20, 0x4d, 0x00, 0x81),
+               .name = "s25fs512s",
+               .size = SZ_64M,
+               .sector_size = SZ_256K,
+               .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .mfr_flags = USE_CLSR,
+               .fixups = &s25fs_s_nor_fixups,
+       }, {
+               .id = SNOR_ID(0x01, 0x20, 0x18, 0x03, 0x00),
+               .name = "s25sl12800",
+               .size = SZ_16M,
+               .sector_size = SZ_256K,
+       }, {
+               .id = SNOR_ID(0x01, 0x20, 0x18, 0x03, 0x01),
+               .name = "s25sl12801",
+               .size = SZ_16M,
+       }, {
+               .id = SNOR_ID(0x01, 0x20, 0x18, 0x4d, 0x00, 0x80),
+               .name = "s25fl128s0",
+               .size = SZ_16M,
+               .sector_size = SZ_256K,
+               .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .mfr_flags = USE_CLSR,
+       }, {
+               .id = SNOR_ID(0x01, 0x20, 0x18, 0x4d, 0x00),
+               .name = "s25fl129p0",
+               .size = SZ_16M,
+               .sector_size = SZ_256K,
+               .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .mfr_flags = USE_CLSR,
+       }, {
+               .id = SNOR_ID(0x01, 0x20, 0x18, 0x4d, 0x01, 0x80),
+               .name = "s25fl128s1",
+               .size = SZ_16M,
+               .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .mfr_flags = USE_CLSR,
+       }, {
+               .id = SNOR_ID(0x01, 0x20, 0x18, 0x4d, 0x01, 0x81),
+               .name = "s25fs128s1",
+               .size = SZ_16M,
+               .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .mfr_flags = USE_CLSR,
+               .fixups = &s25fs_s_nor_fixups,
+       }, {
+               .id = SNOR_ID(0x01, 0x20, 0x18, 0x4d, 0x01),
+               .name = "s25fl129p1",
+               .size = SZ_16M,
+               .no_sfdp_flags = SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .mfr_flags = USE_CLSR,
+       }, {
+               .id = SNOR_ID(0x01, 0x40, 0x13),
+               .name = "s25fl204k",
+               .size = SZ_512K,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+       }, {
+               .id = SNOR_ID(0x01, 0x40, 0x14),
+               .name = "s25fl208k",
+               .size = SZ_1M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+       }, {
+               .id = SNOR_ID(0x01, 0x40, 0x15),
+               .name = "s25fl116k",
+               .size = SZ_2M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0x01, 0x40, 0x16),
+               .name = "s25fl132k",
+               .size = SZ_4M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0x01, 0x40, 0x17),
+               .name = "s25fl164k",
+               .size = SZ_8M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0x01, 0x60, 0x17),
+               .name = "s25fl064l",
+               .size = SZ_8M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .fixup_flags = SPI_NOR_4B_OPCODES,
+       }, {
+               .id = SNOR_ID(0x01, 0x60, 0x18),
+               .name = "s25fl128l",
+               .size = SZ_16M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .fixup_flags = SPI_NOR_4B_OPCODES,
+       }, {
+               .id = SNOR_ID(0x01, 0x60, 0x19),
+               .name = "s25fl256l",
+               .size = SZ_32M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .fixup_flags = SPI_NOR_4B_OPCODES,
+       }, {
+               .id = SNOR_ID(0x04, 0x2c, 0xc2, 0x7f, 0x7f, 0x7f),
+               .name = "cy15x104q",
+               .size = SZ_512K,
+               .sector_size = SZ_512K,
+               .flags = SPI_NOR_NO_ERASE,
+       }, {
+               .id = SNOR_ID(0x34, 0x2a, 0x1a, 0x0f, 0x03, 0x90),
+               .name = "s25hl512t",
+               .mfr_flags = USE_CLPEF,
+               .fixups = &s25hx_t_fixups
+       }, {
+               .id = SNOR_ID(0x34, 0x2a, 0x1b, 0x0f, 0x03, 0x90),
+               .name = "s25hl01gt",
+               .mfr_flags = USE_CLPEF,
+               .fixups = &s25hx_t_fixups
+       }, {
+               .id = SNOR_ID(0x34, 0x2a, 0x1c, 0x0f, 0x00, 0x90),
+               .name = "s25hl02gt",
+               .mfr_flags = USE_CLPEF,
+               .flags = NO_CHIP_ERASE,
+               .fixups = &s25hx_t_fixups
+       }, {
+               .id = SNOR_ID(0x34, 0x2b, 0x19, 0x0f, 0x08, 0x90),
+               .name = "s25fs256t",
+               .mfr_flags = USE_CLPEF,
+               .fixups = &s25fs256t_fixups
+       }, {
+               .id = SNOR_ID(0x34, 0x2b, 0x1a, 0x0f, 0x03, 0x90),
+               .name = "s25hs512t",
+               .mfr_flags = USE_CLPEF,
+               .fixups = &s25hx_t_fixups
+       }, {
+               .id = SNOR_ID(0x34, 0x2b, 0x1b, 0x0f, 0x03, 0x90),
+               .name = "s25hs01gt",
+               .mfr_flags = USE_CLPEF,
+               .fixups = &s25hx_t_fixups
+       }, {
+               .id = SNOR_ID(0x34, 0x2b, 0x1c, 0x0f, 0x00, 0x90),
+               .name = "s25hs02gt",
+               .mfr_flags = USE_CLPEF,
+               .flags = NO_CHIP_ERASE,
+               .fixups = &s25hx_t_fixups
+       }, {
+               .id = SNOR_ID(0x34, 0x5a, 0x1a),
+               .name = "s28hl512t",
+               .mfr_flags = USE_CLPEF,
                .fixups = &s28hx_t_fixups,
-       },
-       { "s28hl01gt",   INFO(0x345a1b,      0, 0, 0)
-               PARSE_SFDP
-               MFR_FLAGS(USE_CLPEF)
+       }, {
+               .id = SNOR_ID(0x34, 0x5a, 0x1b),
+               .name = "s28hl01gt",
+               .mfr_flags = USE_CLPEF,
                .fixups = &s28hx_t_fixups,
-       },
-       { "s28hs512t",   INFO(0x345b1a,      0, 0, 0)
-               PARSE_SFDP
-               MFR_FLAGS(USE_CLPEF)
+       }, {
+               .id = SNOR_ID(0x34, 0x5b, 0x1a),
+               .name = "s28hs512t",
+               .mfr_flags = USE_CLPEF,
                .fixups = &s28hx_t_fixups,
-       },
-       { "s28hs01gt",   INFO(0x345b1b,      0, 0, 0)
-               PARSE_SFDP
-               MFR_FLAGS(USE_CLPEF)
+       }, {
+               .id = SNOR_ID(0x34, 0x5b, 0x1b),
+               .name = "s28hs01gt",
+               .mfr_flags = USE_CLPEF,
                .fixups = &s28hx_t_fixups,
-       },
-       { "s28hs02gt",   INFO(0x345b1c,      0, 0, 0)
-               PARSE_SFDP
-               MFR_FLAGS(USE_CLPEF)
+       }, {
+               .id = SNOR_ID(0x34, 0x5b, 0x1c),
+               .name = "s28hs02gt",
+               .mfr_flags = USE_CLPEF,
                .fixups = &s28hx_t_fixups,
-       },
+       }, {
+               .id = SNOR_ID(0xef, 0x40, 0x13),
+               .name = "s25fl004k",
+               .size = SZ_512K,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xef, 0x40, 0x14),
+               .name = "s25fl008k",
+               .size = SZ_1M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xef, 0x40, 0x15),
+               .name = "s25fl016k",
+               .size = SZ_2M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xef, 0x40, 0x17),
+               .name = "s25fl064k",
+               .size = SZ_8M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }
 };
 
 /**
@@ -956,7 +1053,8 @@ static int spansion_nor_late_init(struct spi_nor *nor)
                nor->flags |= SNOR_F_4B_OPCODES;
                /* No small sector erase for 4-byte command set */
                nor->erase_opcode = SPINOR_OP_SE;
-               nor->mtd.erasesize = nor->info->sector_size;
+               nor->mtd.erasesize = nor->info->sector_size ?:
+                       SPI_NOR_DEFAULT_SECTOR_SIZE;
        }
 
        if (mfr_flags & (USE_CLSR | USE_CLPEF)) {
index 197d2c1101ed5e37e3556edd3d2b8768e99b0040..44d2a546bf17742ae17494002c8254d1a1a5ceab 100644 (file)
@@ -61,66 +61,110 @@ static const struct spi_nor_fixups sst26vf_nor_fixups = {
 };
 
 static const struct flash_info sst_nor_parts[] = {
-       /* SST -- large erase sizes are "overlays", "sectors" are 4K */
-       { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024,  8)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
-               NO_SFDP_FLAGS(SECT_4K)
-               MFR_FLAGS(SST_WRITE) },
-       { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
-               NO_SFDP_FLAGS(SECT_4K)
-               MFR_FLAGS(SST_WRITE) },
-       { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
-               NO_SFDP_FLAGS(SECT_4K)
-               MFR_FLAGS(SST_WRITE) },
-       { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
-               NO_SFDP_FLAGS(SECT_4K)
-               MFR_FLAGS(SST_WRITE) },
-       { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP |
-                     SPI_NOR_SWP_IS_VOLATILE)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "sst25wf512",  INFO(0xbf2501, 0, 64 * 1024,  1)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
-               NO_SFDP_FLAGS(SECT_4K)
-               MFR_FLAGS(SST_WRITE) },
-       { "sst25wf010",  INFO(0xbf2502, 0, 64 * 1024,  2)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
-               NO_SFDP_FLAGS(SECT_4K)
-               MFR_FLAGS(SST_WRITE) },
-       { "sst25wf020",  INFO(0xbf2503, 0, 64 * 1024,  4)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
-               NO_SFDP_FLAGS(SECT_4K)
-               MFR_FLAGS(SST_WRITE) },
-       { "sst25wf020a", INFO(0x621612, 0, 64 * 1024,  4)
-               FLAGS(SPI_NOR_HAS_LOCK)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "sst25wf040b", INFO(0x621613, 0, 64 * 1024,  8)
-               FLAGS(SPI_NOR_HAS_LOCK)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "sst25wf040",  INFO(0xbf2504, 0, 64 * 1024,  8)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
-               NO_SFDP_FLAGS(SECT_4K)
-               MFR_FLAGS(SST_WRITE) },
-       { "sst25wf080",  INFO(0xbf2505, 0, 64 * 1024, 16)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
-               NO_SFDP_FLAGS(SECT_4K)
-               MFR_FLAGS(SST_WRITE) },
-       { "sst26wf016b", INFO(0xbf2651, 0, 64 * 1024, 32)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "sst26vf016b", INFO(0xbf2641, 0, 64 * 1024, 32)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ) },
-       { "sst26vf032b", INFO(0xbf2642, 0, 0, 0)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
-               PARSE_SFDP
-               .fixups = &sst26vf_nor_fixups },
-       { "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               .fixups = &sst26vf_nor_fixups },
+       {
+               .id = SNOR_ID(0x62, 0x16, 0x12),
+               .name = "sst25wf020a",
+               .size = SZ_256K,
+               .flags = SPI_NOR_HAS_LOCK,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0x62, 0x16, 0x13),
+               .name = "sst25wf040b",
+               .size = SZ_512K,
+               .flags = SPI_NOR_HAS_LOCK,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xbf, 0x25, 0x01),
+               .name = "sst25wf512",
+               .size = SZ_64K,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+               .no_sfdp_flags = SECT_4K,
+               .mfr_flags = SST_WRITE,
+       }, {
+               .id = SNOR_ID(0xbf, 0x25, 0x02),
+               .name = "sst25wf010",
+               .size = SZ_128K,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+               .no_sfdp_flags = SECT_4K,
+               .mfr_flags = SST_WRITE,
+       }, {
+               .id = SNOR_ID(0xbf, 0x25, 0x03),
+               .name = "sst25wf020",
+               .size = SZ_256K,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+               .no_sfdp_flags = SECT_4K,
+               .mfr_flags = SST_WRITE,
+       }, {
+               .id = SNOR_ID(0xbf, 0x25, 0x04),
+               .name = "sst25wf040",
+               .size = SZ_512K,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+               .no_sfdp_flags = SECT_4K,
+               .mfr_flags = SST_WRITE,
+       }, {
+               .id = SNOR_ID(0xbf, 0x25, 0x05),
+               .name = "sst25wf080",
+               .size = SZ_1M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+               .no_sfdp_flags = SECT_4K,
+               .mfr_flags = SST_WRITE,
+       }, {
+               .id = SNOR_ID(0xbf, 0x25, 0x41),
+               .name = "sst25vf016b",
+               .size = SZ_2M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+               .no_sfdp_flags = SECT_4K,
+               .mfr_flags = SST_WRITE,
+       }, {
+               .id = SNOR_ID(0xbf, 0x25, 0x4a),
+               .name = "sst25vf032b",
+               .size = SZ_4M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+               .no_sfdp_flags = SECT_4K,
+               .mfr_flags = SST_WRITE,
+       }, {
+               .id = SNOR_ID(0xbf, 0x25, 0x4b),
+               .name = "sst25vf064c",
+               .size = SZ_8M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_4BIT_BP | SPI_NOR_SWP_IS_VOLATILE,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xbf, 0x25, 0x8d),
+               .name = "sst25vf040b",
+               .size = SZ_512K,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+               .no_sfdp_flags = SECT_4K,
+               .mfr_flags = SST_WRITE,
+       }, {
+               .id = SNOR_ID(0xbf, 0x25, 0x8e),
+               .name = "sst25vf080b",
+               .size = SZ_1M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+               .no_sfdp_flags = SECT_4K,
+               .mfr_flags = SST_WRITE,
+       }, {
+               .id = SNOR_ID(0xbf, 0x26, 0x41),
+               .name = "sst26vf016b",
+               .size = SZ_2M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ,
+       }, {
+               .id = SNOR_ID(0xbf, 0x26, 0x42),
+               .name = "sst26vf032b",
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+               .fixups = &sst26vf_nor_fixups,
+       }, {
+               .id = SNOR_ID(0xbf, 0x26, 0x43),
+               .name = "sst26vf064b",
+               .size = SZ_8M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_SWP_IS_VOLATILE,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .fixups = &sst26vf_nor_fixups,
+       }, {
+               .id = SNOR_ID(0xbf, 0x26, 0x51),
+               .name = "sst26wf016b",
+               .size = SZ_2M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }
 };
 
 static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
index 5ab9d532486081c8a3c1a81fc458678f866457e8..585813310ee1e0f53a7dc1690c137d2490a873f7 100644 (file)
@@ -34,17 +34,22 @@ static u8 spi_nor_get_sr_tb_mask(struct spi_nor *nor)
 static u64 spi_nor_get_min_prot_length_sr(struct spi_nor *nor)
 {
        unsigned int bp_slots, bp_slots_needed;
+       /*
+        * sector_size will eventually be replaced with the max erase size of
+        * the flash. For now, we need to have that ugly default.
+        */
+       unsigned int sector_size = nor->info->sector_size ?: SPI_NOR_DEFAULT_SECTOR_SIZE;
+       u64 n_sectors = div_u64(nor->params->size, sector_size);
        u8 mask = spi_nor_get_sr_bp_mask(nor);
 
        /* Reserved one for "protect none" and one for "protect all". */
        bp_slots = (1 << hweight8(mask)) - 2;
-       bp_slots_needed = ilog2(nor->info->n_sectors);
+       bp_slots_needed = ilog2(n_sectors);
 
        if (bp_slots_needed > bp_slots)
-               return nor->info->sector_size <<
-                       (bp_slots_needed - bp_slots);
+               return sector_size << (bp_slots_needed - bp_slots);
        else
-               return nor->info->sector_size;
+               return sector_size;
 }
 
 static void spi_nor_get_locked_range_sr(struct spi_nor *nor, u8 sr, loff_t *ofs,
index c09bb832b3b9e68145eaacc5a3e7cf341a3b831f..2dfdc555a69faf6392e9deb6110b88a82968a507 100644 (file)
@@ -35,8 +35,8 @@ static ssize_t jedec_id_show(struct device *dev,
        struct spi_device *spi = to_spi_device(dev);
        struct spi_mem *spimem = spi_get_drvdata(spi);
        struct spi_nor *nor = spi_mem_get_drvdata(spimem);
-       const u8 *id = nor->info->id_len ? nor->info->id : nor->id;
-       u8 id_len = nor->info->id_len ?: SPI_NOR_MAX_ID_LEN;
+       const u8 *id = nor->info->id ? nor->info->id->bytes : nor->id;
+       u8 id_len = nor->info->id ? nor->info->id->len : SPI_NOR_MAX_ID_LEN;
 
        return sysfs_emit(buf, "%*phN\n", id_len, id);
 }
@@ -78,7 +78,7 @@ static umode_t spi_nor_sysfs_is_visible(struct kobject *kobj,
 
        if (attr == &dev_attr_manufacturer.attr && !nor->manufacturer)
                return 0;
-       if (attr == &dev_attr_jedec_id.attr && !nor->info->id_len && !nor->id)
+       if (attr == &dev_attr_jedec_id.attr && !nor->info->id && !nor->id)
                return 0;
 
        return 0444;
index cd99c9a1c568844ca6ef3f307928fe116c448899..142fb27b2ea9a35da4a2eb74868df9a6b39d34fa 100644 (file)
@@ -42,107 +42,191 @@ static const struct spi_nor_fixups w25q256_fixups = {
 };
 
 static const struct flash_info winbond_nor_parts[] = {
-       /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
-       { "w25x05", INFO(0xef3010, 0, 64 * 1024,  1)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "w25x10", INFO(0xef3011, 0, 64 * 1024,  2)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "w25x20", INFO(0xef3012, 0, 64 * 1024,  4)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "w25x40", INFO(0xef3013, 0, 64 * 1024,  8)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "w25x80", INFO(0xef3014, 0, 64 * 1024,  16)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "w25x16", INFO(0xef3015, 0, 64 * 1024,  32)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "w25q16dw", INFO(0xef6015, 0, 64 * 1024,  32)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "w25x32", INFO(0xef3016, 0, 64 * 1024,  64)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "w25q16jv-im/jm", INFO(0xef7015, 0, 64 * 1024,  32)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "w25q20cl", INFO(0xef4012, 0, 64 * 1024,  4)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "w25q20bw", INFO(0xef5012, 0, 64 * 1024,  4)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "w25q20ew", INFO(0xef6012, 0, 64 * 1024,  4)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "w25q32", INFO(0xef4016, 0, 64 * 1024,  64)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "w25q32dw", INFO(0xef6016, 0, 64 * 1024,  64)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               OTP_INFO(256, 3, 0x1000, 0x1000) },
-       { "w25q32jv", INFO(0xef7016, 0, 64 * 1024,  64)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "w25q32jwm", INFO(0xef8016, 0, 64 * 1024,  64)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               OTP_INFO(256, 3, 0x1000, 0x1000) },
-       { "w25q64jwm", INFO(0xef8017, 0, 64 * 1024, 128)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "w25q128jwm", INFO(0xef8018, 0, 64 * 1024, 256)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "w25q256jwm", INFO(0xef8019, 0, 64 * 1024, 512)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "w25q64jvm", INFO(0xef7017, 0, 64 * 1024, 128)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "w25q128jv", INFO(0xef7018, 0, 64 * 1024, 256)
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "w25q80", INFO(0xef5014, 0, 64 * 1024,  16)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "w25q80bl", INFO(0xef4014, 0, 64 * 1024,  16)
-               NO_SFDP_FLAGS(SECT_4K) },
-       { "w25q128", INFO(0xef4018, 0, 0, 0)
-               PARSE_SFDP
-               FLAGS(SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) },
-       { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)
-               .fixups = &w25q256_fixups },
-       { "w25q256jvm", INFO(0xef7019, 0, 64 * 1024, 512)
-               PARSE_SFDP },
-       { "w25q256jw", INFO(0xef6019, 0, 64 * 1024, 512)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_QUAD_READ |
-                             SPI_NOR_DUAL_READ) },
-       { "w25q512nwq", INFO(0xef6020, 0, 0, 0)
-               PARSE_SFDP
-               OTP_INFO(256, 3, 0x1000, 0x1000) },
-       { "w25q512nwm", INFO(0xef8020, 0, 64 * 1024, 1024)
-               PARSE_SFDP
-               OTP_INFO(256, 3, 0x1000, 0x1000) },
-       { "w25q512jvq", INFO(0xef4020, 0, 64 * 1024, 1024)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
+       {
+               .id = SNOR_ID(0xef, 0x30, 0x10),
+               .name = "w25x05",
+               .size = SZ_64K,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xef, 0x30, 0x11),
+               .name = "w25x10",
+               .size = SZ_128K,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xef, 0x30, 0x12),
+               .name = "w25x20",
+               .size = SZ_256K,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xef, 0x30, 0x13),
+               .name = "w25x40",
+               .size = SZ_512K,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xef, 0x30, 0x14),
+               .name = "w25x80",
+               .size = SZ_1M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xef, 0x30, 0x15),
+               .name = "w25x16",
+               .size = SZ_2M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xef, 0x30, 0x16),
+               .name = "w25x32",
+               .size = SZ_4M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xef, 0x30, 0x17),
+               .name = "w25x64",
+               .size = SZ_8M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xef, 0x40, 0x12),
+               .name = "w25q20cl",
+               .size = SZ_256K,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xef, 0x40, 0x14),
+               .name = "w25q80bl",
+               .size = SZ_1M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xef, 0x40, 0x16),
+               .name = "w25q32",
+               .size = SZ_4M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xef, 0x40, 0x17),
+               .name = "w25q64",
+               .size = SZ_8M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xef, 0x40, 0x18),
+               .name = "w25q128",
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+       }, {
+               .id = SNOR_ID(0xef, 0x40, 0x19),
+               .name = "w25q256",
+               .size = SZ_32M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .fixups = &w25q256_fixups,
+       }, {
+               .id = SNOR_ID(0xef, 0x40, 0x20),
+               .name = "w25q512jvq",
+               .size = SZ_64M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xef, 0x50, 0x12),
+               .name = "w25q20bw",
+               .size = SZ_256K,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xef, 0x50, 0x14),
+               .name = "w25q80",
+               .size = SZ_1M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xef, 0x60, 0x12),
+               .name = "w25q20ew",
+               .size = SZ_256K,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xef, 0x60, 0x15),
+               .name = "w25q16dw",
+               .size = SZ_2M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xef, 0x60, 0x16),
+               .name = "w25q32dw",
+               .size = SZ_4M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .otp = SNOR_OTP(256, 3, 0x1000, 0x1000),
+       }, {
+               .id = SNOR_ID(0xef, 0x60, 0x17),
+               .name = "w25q64dw",
+               .size = SZ_8M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xef, 0x60, 0x18),
+               .name = "w25q128fw",
+               .size = SZ_16M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xef, 0x60, 0x19),
+               .name = "w25q256jw",
+               .size = SZ_32M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xef, 0x60, 0x20),
+               .name = "w25q512nwq",
+               .otp = SNOR_OTP(256, 3, 0x1000, 0x1000),
+       }, {
+               .id = SNOR_ID(0xef, 0x70, 0x15),
+               .name = "w25q16jv-im/jm",
+               .size = SZ_2M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xef, 0x70, 0x16),
+               .name = "w25q32jv",
+               .size = SZ_4M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xef, 0x70, 0x17),
+               .name = "w25q64jvm",
+               .size = SZ_8M,
+               .no_sfdp_flags = SECT_4K,
+       }, {
+               .id = SNOR_ID(0xef, 0x70, 0x18),
+               .name = "w25q128jv",
+               .size = SZ_16M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xef, 0x70, 0x19),
+               .name = "w25q256jvm",
+       }, {
+               .id = SNOR_ID(0xef, 0x71, 0x19),
+               .name = "w25m512jv",
+               .size = SZ_64M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xef, 0x80, 0x16),
+               .name = "w25q32jwm",
+               .size = SZ_4M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+               .otp = SNOR_OTP(256, 3, 0x1000, 0x1000),
+       }, {
+               .id = SNOR_ID(0xef, 0x80, 0x17),
+               .name = "w25q64jwm",
+               .size = SZ_8M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xef, 0x80, 0x18),
+               .name = "w25q128jwm",
+               .size = SZ_16M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xef, 0x80, 0x19),
+               .name = "w25q256jwm",
+               .size = SZ_32M,
+               .flags = SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0xef, 0x80, 0x20),
+               .name = "w25q512nwm",
+               .otp = SNOR_OTP(256, 3, 0x1000, 0x1000),
+       },
 };
 
 /**
@@ -221,7 +305,7 @@ static int winbond_nor_late_init(struct spi_nor *nor)
 {
        struct spi_nor_flash_parameter *params = nor->params;
 
-       if (params->otp.org->n_regions)
+       if (params->otp.org)
                params->otp.ops = &winbond_nor_otp_ops;
 
        /*
index 00d53eae5ee83c634c232a45a086a0cffa8a7d87..f99118c691b03e860050c789e18404b6f09af5d4 100644 (file)
                   SPI_MEM_OP_NO_DUMMY,                                 \
                   SPI_MEM_OP_DATA_IN(1, buf, 0))
 
-#define S3AN_INFO(_jedec_id, _n_sectors, _page_size)                   \
-               .id = {                                                 \
-                       ((_jedec_id) >> 16) & 0xff,                     \
-                       ((_jedec_id) >> 8) & 0xff,                      \
-                       (_jedec_id) & 0xff                              \
-                       },                                              \
-               .id_len = 3,                                            \
-               .sector_size = (8 * (_page_size)),                      \
-               .n_sectors = (_n_sectors),                              \
-               .page_size = (_page_size),                              \
-               .n_banks = 1,                                           \
-               .addr_nbytes = 3,                                       \
-               .flags = SPI_NOR_NO_FR
+#define S3AN_FLASH(_id, _name, _n_sectors, _page_size)         \
+       .id = _id,                                              \
+       .name = _name,                                          \
+       .size = 8 * (_page_size) * (_n_sectors),                \
+       .sector_size = (8 * (_page_size)),                      \
+       .page_size = (_page_size),                              \
+       .flags = SPI_NOR_NO_FR
 
 /* Xilinx S3AN share MFR with Atmel SPI NOR */
 static const struct flash_info xilinx_nor_parts[] = {
        /* Xilinx S3AN Internal Flash */
-       { "3S50AN", S3AN_INFO(0x1f2200, 64, 264) },
-       { "3S200AN", S3AN_INFO(0x1f2400, 256, 264) },
-       { "3S400AN", S3AN_INFO(0x1f2400, 256, 264) },
-       { "3S700AN", S3AN_INFO(0x1f2500, 512, 264) },
-       { "3S1400AN", S3AN_INFO(0x1f2600, 512, 528) },
+       { S3AN_FLASH(SNOR_ID(0x1f, 0x22, 0x00), "3S50AN", 64, 264) },
+       { S3AN_FLASH(SNOR_ID(0x1f, 0x24, 0x00), "3S200AN", 256, 264) },
+       { S3AN_FLASH(SNOR_ID(0x1f, 0x24, 0x00), "3S400AN", 256, 264) },
+       { S3AN_FLASH(SNOR_ID(0x1f, 0x25, 0x00), "3S700AN", 512, 264) },
+       { S3AN_FLASH(SNOR_ID(0x1f, 0x26, 0x00), "3S1400AN", 512, 528) },
 };
 
 /*
@@ -144,7 +138,7 @@ static int xilinx_nor_setup(struct spi_nor *nor,
                page_size = (nor->params->page_size == 264) ? 256 : 512;
                nor->params->page_size = page_size;
                nor->mtd.writebufsize = page_size;
-               nor->params->size = 8 * page_size * nor->info->n_sectors;
+               nor->params->size = nor->info->size;
                nor->mtd.erasesize = 8 * page_size;
        } else {
                /* Flash in Default addressing mode */
index 051411e86339c8a2b1e48de50975fcc4d325ecd3..d5a06054b0dd8e4c8b4454d6020b39468fc8861b 100644 (file)
@@ -9,15 +9,20 @@
 #include "core.h"
 
 static const struct flash_info xmc_nor_parts[] = {
-       /* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
-       { "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
-       { "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256)
-               NO_SFDP_FLAGS(SECT_4K | SPI_NOR_DUAL_READ |
-                             SPI_NOR_QUAD_READ) },
+       {
+               .id = SNOR_ID(0x20, 0x70, 0x17),
+               .name = "XM25QH64A",
+               .size = SZ_8M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       }, {
+               .id = SNOR_ID(0x20, 0x70, 0x18),
+               .name = "XM25QH128A",
+               .size = SZ_16M,
+               .no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
+       },
 };
 
+/* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
 const struct spi_nor_manufacturer spi_nor_xmc = {
        .name = "xmc",
        .parts = xmc_nor_parts,
index 437c5b83ffe51347b40841edbc7409ef91c0bd83..309a42aeaa4cdbbab017252175b18748c91d0cf4 100644 (file)
@@ -447,13 +447,15 @@ out_unlock:
 
 static void ubiblock_cleanup(struct ubiblock *dev)
 {
+       int id = dev->gd->first_minor;
+
        /* Stop new requests to arrive */
        del_gendisk(dev->gd);
        /* Finally destroy the blk queue */
        dev_info(disk_to_dev(dev->gd), "released");
        put_disk(dev->gd);
        blk_mq_free_tag_set(&dev->tag_set);
-       idr_remove(&ubiblock_minor_idr, dev->gd->first_minor);
+       idr_remove(&ubiblock_minor_idr, id);
 }
 
 int ubiblock_remove(struct ubi_volume_info *vi)
index 8ee51e49fced559d3a5e561956103cb53d6fcf50..7d4ff1193db6f04a114b1e0fcdde3984644607d1 100644 (file)
@@ -35,7 +35,7 @@
 #define MTD_PARAM_LEN_MAX 64
 
 /* Maximum number of comma-separated items in the 'mtd=' parameter */
-#define MTD_PARAM_MAX_COUNT 5
+#define MTD_PARAM_MAX_COUNT 6
 
 /* Maximum value for the number of bad PEBs per 1024 PEBs */
 #define MAX_MTD_UBI_BEB_LIMIT 768
@@ -54,6 +54,7 @@
  * @vid_hdr_offs: VID header offset
  * @max_beb_per1024: maximum expected number of bad PEBs per 1024 PEBs
  * @enable_fm: enable fastmap when value is non-zero
+ * @need_resv_pool: reserve pool->max_size pebs when value is none-zero
  */
 struct mtd_dev_param {
        char name[MTD_PARAM_LEN_MAX];
@@ -61,6 +62,7 @@ struct mtd_dev_param {
        int vid_hdr_offs;
        int max_beb_per1024;
        int enable_fm;
+       int need_resv_pool;
 };
 
 /* Numbers of elements set in the @mtd_dev_param array */
@@ -825,6 +827,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
  * @vid_hdr_offset: VID header offset
  * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
  * @disable_fm: whether disable fastmap
+ * @need_resv_pool: whether reserve pebs to fill fm_pool
  *
  * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
  * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
@@ -840,7 +843,8 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
  * @ubi_devices_mutex.
  */
 int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
-                      int vid_hdr_offset, int max_beb_per1024, bool disable_fm)
+                      int vid_hdr_offset, int max_beb_per1024, bool disable_fm,
+                      bool need_resv_pool)
 {
        struct ubi_device *ubi;
        int i, err;
@@ -951,6 +955,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
                UBI_FM_MIN_POOL_SIZE);
 
        ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2;
+       ubi->fm_pool_rsv_cnt = need_resv_pool ? ubi->fm_pool.max_size : 0;
        ubi->fm_disabled = (!fm_autoconvert || disable_fm) ? 1 : 0;
        if (fm_debug)
                ubi_enable_dbg_chk_fastmap(ubi);
@@ -1273,7 +1278,8 @@ static int __init ubi_init(void)
                mutex_lock(&ubi_devices_mutex);
                err = ubi_attach_mtd_dev(mtd, p->ubi_num,
                                         p->vid_hdr_offs, p->max_beb_per1024,
-                                        p->enable_fm == 0);
+                                        p->enable_fm == 0,
+                                        p->need_resv_pool != 0);
                mutex_unlock(&ubi_devices_mutex);
                if (err < 0) {
                        pr_err("UBI error: cannot attach mtd%d\n",
@@ -1482,6 +1488,18 @@ static int ubi_mtd_param_parse(const char *val, const struct kernel_param *kp)
        } else
                p->enable_fm = 0;
 
+       token = tokens[5];
+       if (token) {
+               int err = kstrtoint(token, 10, &p->need_resv_pool);
+
+               if (err) {
+                       pr_err("UBI error: bad value for need_resv_pool parameter: %s\n",
+                               token);
+                       return -EINVAL;
+               }
+       } else
+               p->need_resv_pool = 0;
+
        mtd_devs += 1;
        return 0;
 }
@@ -1495,6 +1513,7 @@ MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|pa
                      __stringify(CONFIG_MTD_UBI_BEB_LIMIT) ") if 0)\n"
                      "Optional \"ubi_num\" parameter specifies UBI device number which have to be assigned to the newly created UBI device (assigned automatically by default)\n"
                      "Optional \"enable_fm\" parameter determines whether to enable fastmap during attach. If the value is non-zero, fastmap is enabled. Default value is 0.\n"
+                     "Optional \"need_resv_pool\" parameter determines whether to reserve pool->max_size pebs during attach. If the value is non-zero, peb reservation is enabled. Default value is 0.\n"
                      "\n"
                      "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
                      "Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
index f43430b9c1e65c326e3c870a7743d32d5181ccd8..0d8f04cf03c5bbff04154ca30c62cd6c936cb648 100644 (file)
@@ -1041,7 +1041,8 @@ static long ctrl_cdev_ioctl(struct file *file, unsigned int cmd,
                 */
                mutex_lock(&ubi_devices_mutex);
                err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset,
-                                        req.max_beb_per1024, !!req.disable_fm);
+                                        req.max_beb_per1024, !!req.disable_fm,
+                                        !!req.need_resv_pool);
                mutex_unlock(&ubi_devices_mutex);
                if (err < 0)
                        put_mtd_device(mtd);
index 655ff41863e2be4e7d5169bb374ca3e365582c71..8d1f0e05892c1fa6d869b2e9c86a096295c2736c 100644 (file)
@@ -33,9 +33,6 @@
 #include <linux/err.h>
 #include "ubi.h"
 
-/* Number of physical eraseblocks reserved for atomic LEB change operation */
-#define EBA_RESERVED_PEBS 1
-
 /**
  * struct ubi_eba_entry - structure encoding a single LEB -> PEB association
  * @pnum: the physical eraseblock number attached to the LEB
index 863f571f1adb545a4991c0d22efc9889968d22f3..2a9cc9413c427dfb4b70774e0a2ddf23e9bbfe5d 100644 (file)
@@ -76,7 +76,7 @@ struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
 {
        struct ubi_wl_entry *e = NULL;
 
-       if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
+       if (!ubi->free.rb_node)
                goto out;
 
        if (anchor)
@@ -98,43 +98,104 @@ out:
 }
 
 /*
- * has_enough_free_count - whether ubi has enough free pebs to fill fm pools
+ * wait_free_pebs_for_pool - wait until there enough free pebs
+ * @ubi: UBI device description object
+ *
+ * Wait and execute do_work until there are enough free pebs, fill pool
+ * as much as we can. This will reduce pool refilling times, which can
+ * reduce the fastmap updating frequency.
+ */
+static void wait_free_pebs_for_pool(struct ubi_device *ubi)
+{
+       struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+       struct ubi_fm_pool *pool = &ubi->fm_pool;
+       int free, expect_free, executed;
+       /*
+        * There are at least following free pebs which reserved by UBI:
+        * 1. WL_RESERVED_PEBS[1]
+        * 2. EBA_RESERVED_PEBS[1]
+        * 3. fm pebs - 1: Twice fastmap size deducted by fastmap and fm_anchor
+        * 4. beb_rsvd_pebs: This value should be get under lock ubi->wl_lock
+        */
+       int reserved = WL_RESERVED_PEBS + EBA_RESERVED_PEBS +
+                      ubi->fm_size / ubi->leb_size - 1 + ubi->fm_pool_rsv_cnt;
+
+       do {
+               spin_lock(&ubi->wl_lock);
+               free = ubi->free_count;
+               free += pool->size - pool->used + wl_pool->size - wl_pool->used;
+               expect_free = reserved + ubi->beb_rsvd_pebs;
+               spin_unlock(&ubi->wl_lock);
+
+               /*
+                * Break out if there are no works or work is executed failure,
+                * given the fact that erase_worker will schedule itself when
+                * -EBUSY is returned from mtd layer caused by system shutdown.
+                */
+               if (do_work(ubi, &executed) || !executed)
+                       break;
+       } while (free < expect_free);
+}
+
+/*
+ * left_free_count - returns the number of free pebs to fill fm pools
  * @ubi: UBI device description object
- * @is_wl_pool: whether UBI is filling wear leveling pool
  *
- * This helper function checks whether there are enough free pebs (deducted
- * by fastmap pebs) to fill fm_pool and fm_wl_pool, above rule works after
- * there is at least one of free pebs is filled into fm_wl_pool.
- * For wear leveling pool, UBI should also reserve free pebs for bad pebs
- * handling, because there maybe no enough free pebs for user volumes after
- * producing new bad pebs.
+ * This helper function returns the number of free pebs (deducted
+ * by fastmap pebs) to fill fm_pool and fm_wl_pool.
  */
-static bool has_enough_free_count(struct ubi_device *ubi, bool is_wl_pool)
+static int left_free_count(struct ubi_device *ubi)
 {
        int fm_used = 0;        // fastmap non anchor pebs.
-       int beb_rsvd_pebs;
 
        if (!ubi->free.rb_node)
-               return false;
+               return 0;
 
-       beb_rsvd_pebs = is_wl_pool ? ubi->beb_rsvd_pebs : 0;
-       if (ubi->fm_wl_pool.size > 0 && !(ubi->ro_mode || ubi->fm_disabled))
+       if (!ubi->ro_mode && !ubi->fm_disabled)
                fm_used = ubi->fm_size / ubi->leb_size - 1;
 
-       return ubi->free_count - beb_rsvd_pebs > fm_used;
+       return ubi->free_count - fm_used;
+}
+
+/*
+ * can_fill_pools - whether free PEBs will be left after filling pools
+ * @ubi: UBI device description object
+ * @free: current number of free PEBs
+ *
+ * Return %1 if there are still left free PEBs after filling pools,
+ * otherwise %0 is returned.
+ */
+static int can_fill_pools(struct ubi_device *ubi, int free)
+{
+       struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
+       struct ubi_fm_pool *pool = &ubi->fm_pool;
+       int pool_need = pool->max_size - pool->size +
+                       wl_pool->max_size - wl_pool->size;
+
+       if (free - pool_need < 1)
+               return 0;
+
+       return 1;
 }
 
 /**
- * ubi_refill_pools - refills all fastmap PEB pools.
+ * ubi_refill_pools_and_lock - refills all fastmap PEB pools and takes fm locks.
  * @ubi: UBI device description object
  */
-void ubi_refill_pools(struct ubi_device *ubi)
+void ubi_refill_pools_and_lock(struct ubi_device *ubi)
 {
        struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
        struct ubi_fm_pool *pool = &ubi->fm_pool;
        struct ubi_wl_entry *e;
        int enough;
 
+       if (!ubi->ro_mode && !ubi->fm_disabled)
+               wait_free_pebs_for_pool(ubi);
+
+       down_write(&ubi->fm_protect);
+       down_write(&ubi->work_sem);
+       down_write(&ubi->fm_eba_sem);
+
        spin_lock(&ubi->wl_lock);
 
        return_unused_pool_pebs(ubi, wl_pool);
@@ -159,7 +220,7 @@ void ubi_refill_pools(struct ubi_device *ubi)
        for (;;) {
                enough = 0;
                if (pool->size < pool->max_size) {
-                       if (!has_enough_free_count(ubi, false))
+                       if (left_free_count(ubi) <= 0)
                                break;
 
                        e = wl_get_wle(ubi);
@@ -172,10 +233,13 @@ void ubi_refill_pools(struct ubi_device *ubi)
                        enough++;
 
                if (wl_pool->size < wl_pool->max_size) {
-                       if (!has_enough_free_count(ubi, true))
+                       int left_free = left_free_count(ubi);
+
+                       if (left_free <= 0)
                                break;
 
-                       e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+                       e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF,
+                                         !can_fill_pools(ubi, left_free));
                        self_check_in_wl_tree(ubi, e, &ubi->free);
                        rb_erase(&e->u.rb, &ubi->free);
                        ubi->free_count--;
@@ -210,7 +274,7 @@ static int produce_free_peb(struct ubi_device *ubi)
 
        while (!ubi->free.rb_node && ubi->works_count) {
                dbg_wl("do one work synchronously");
-               err = do_work(ubi);
+               err = do_work(ubi, NULL);
 
                if (err)
                        return err;
@@ -315,12 +379,12 @@ static bool need_wear_leveling(struct ubi_device *ubi)
        if (!e) {
                if (!ubi->free.rb_node)
                        return false;
-               e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+               e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
                ec = e->ec;
        } else {
                ec = e->ec;
                if (ubi->free.rb_node) {
-                       e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+                       e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
                        ec = max(ec, e->ec);
                }
        }
@@ -481,7 +545,7 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
 static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
                                           struct ubi_wl_entry *e,
                                           struct rb_root *root) {
-       if (e && !ubi->fm_disabled && !ubi->fm &&
+       if (e && !ubi->fm_disabled && !ubi->fm && !ubi->fm_anchor &&
            e->pnum < UBI_FM_MAX_START)
                e = rb_entry(rb_next(root->rb_node),
                             struct ubi_wl_entry, u.rb);
index 28c8151a0725d5b5c9abbc31bdab3e48fad3253b..2a728c31e6b8549353a60c38abb387d123821bc5 100644 (file)
@@ -20,7 +20,7 @@ static inline unsigned long *init_seen(struct ubi_device *ubi)
        if (!ubi_dbg_chk_fastmap(ubi))
                return NULL;
 
-       ret = bitmap_zalloc(ubi->peb_count, GFP_KERNEL);
+       ret = bitmap_zalloc(ubi->peb_count, GFP_NOFS);
        if (!ret)
                return ERR_PTR(-ENOMEM);
 
@@ -105,7 +105,7 @@ static struct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
        struct ubi_vid_io_buf *new;
        struct ubi_vid_hdr *vh;
 
-       new = ubi_alloc_vid_buf(ubi, GFP_KERNEL);
+       new = ubi_alloc_vid_buf(ubi, GFP_NOFS);
        if (!new)
                goto out;
 
@@ -1389,53 +1389,6 @@ out:
        return ret;
 }
 
-/**
- * erase_block - Manually erase a PEB.
- * @ubi: UBI device object
- * @pnum: PEB to be erased
- *
- * Returns the new EC value on success, < 0 indicates an internal error.
- */
-static int erase_block(struct ubi_device *ubi, int pnum)
-{
-       int ret;
-       struct ubi_ec_hdr *ec_hdr;
-       long long ec;
-
-       ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
-       if (!ec_hdr)
-               return -ENOMEM;
-
-       ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
-       if (ret < 0)
-               goto out;
-       else if (ret && ret != UBI_IO_BITFLIPS) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       ret = ubi_io_sync_erase(ubi, pnum, 0);
-       if (ret < 0)
-               goto out;
-
-       ec = be64_to_cpu(ec_hdr->ec);
-       ec += ret;
-       if (ec > UBI_MAX_ERASECOUNTER) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       ec_hdr->ec = cpu_to_be64(ec);
-       ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
-       if (ret < 0)
-               goto out;
-
-       ret = ec;
-out:
-       kfree(ec_hdr);
-       return ret;
-}
-
 /**
  * invalidate_fastmap - destroys a fastmap.
  * @ubi: UBI device object
@@ -1462,7 +1415,7 @@ static int invalidate_fastmap(struct ubi_device *ubi)
        ubi->fm = NULL;
 
        ret = -ENOMEM;
-       fm = kzalloc(sizeof(*fm), GFP_KERNEL);
+       fm = kzalloc(sizeof(*fm), GFP_NOFS);
        if (!fm)
                goto out;
 
@@ -1538,11 +1491,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
        struct ubi_fastmap_layout *new_fm, *old_fm;
        struct ubi_wl_entry *tmp_e;
 
-       down_write(&ubi->fm_protect);
-       down_write(&ubi->work_sem);
-       down_write(&ubi->fm_eba_sem);
-
-       ubi_refill_pools(ubi);
+       ubi_refill_pools_and_lock(ubi);
 
        if (ubi->ro_mode || ubi->fm_disabled) {
                up_write(&ubi->fm_eba_sem);
@@ -1551,7 +1500,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
                return 0;
        }
 
-       new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
+       new_fm = kzalloc(sizeof(*new_fm), GFP_NOFS);
        if (!new_fm) {
                up_write(&ubi->fm_eba_sem);
                up_write(&ubi->work_sem);
@@ -1576,7 +1525,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
 
                if (!tmp_e) {
                        if (old_fm && old_fm->e[i]) {
-                               ret = erase_block(ubi, old_fm->e[i]->pnum);
+                               ret = ubi_sync_erase(ubi, old_fm->e[i], 0);
                                if (ret < 0) {
                                        ubi_err(ubi, "could not erase old fastmap PEB");
 
@@ -1628,7 +1577,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
        if (old_fm) {
                /* no fresh anchor PEB was found, reuse the old one */
                if (!tmp_e) {
-                       ret = erase_block(ubi, old_fm->e[0]->pnum);
+                       ret = ubi_sync_erase(ubi, old_fm->e[0], 0);
                        if (ret < 0) {
                                ubi_err(ubi, "could not erase old anchor PEB");
 
@@ -1640,7 +1589,6 @@ int ubi_update_fastmap(struct ubi_device *ubi)
                                goto err;
                        }
                        new_fm->e[0] = old_fm->e[0];
-                       new_fm->e[0]->ec = ret;
                        old_fm->e[0] = NULL;
                } else {
                        /* we've got a new anchor PEB, return the old one */
index c8f1bd4fa10080c775042ecd9fd3514f366fc82d..a5ec566df0d74d255e200f7c67f493a859d52dc1 100644 (file)
@@ -82,6 +82,9 @@ void ubi_err(const struct ubi_device *ubi, const char *fmt, ...);
 #define UBI_DFS_DIR_NAME "ubi%d"
 #define UBI_DFS_DIR_LEN  (3 + 2 + 1)
 
+/* Number of physical eraseblocks reserved for atomic LEB change operation */
+#define EBA_RESERVED_PEBS 1
+
 /*
  * Error codes returned by the I/O sub-system.
  *
@@ -491,6 +494,7 @@ struct ubi_debug_info {
  * @fast_attach: non-zero if UBI was attached by fastmap
  * @fm_anchor: The next anchor PEB to use for fastmap
  * @fm_do_produce_anchor: If true produce an anchor PEB in wl
+ * @fm_pool_rsv_cnt: Number of reserved PEBs for filling pool/wl_pool
  *
  * @used: RB-tree of used physical eraseblocks
  * @erroneous: RB-tree of erroneous used physical eraseblocks
@@ -601,6 +605,7 @@ struct ubi_device {
        int fast_attach;
        struct ubi_wl_entry *fm_anchor;
        int fm_do_produce_anchor;
+       int fm_pool_rsv_cnt;
 
        /* Wear-leveling sub-system's stuff */
        struct rb_root used;
@@ -902,6 +907,7 @@ int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
                   struct ubi_attach_info *ai_scan);
 
 /* wl.c */
+int ubi_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture);
 int ubi_wl_get_peb(struct ubi_device *ubi);
 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
                   int pnum, int torture);
@@ -914,7 +920,7 @@ struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor);
 int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
                      int lnum, int torture);
 int ubi_is_erase_work(struct ubi_work *wrk);
-void ubi_refill_pools(struct ubi_device *ubi);
+void ubi_refill_pools_and_lock(struct ubi_device *ubi);
 int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
 int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force_scrub);
 
@@ -938,7 +944,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
 /* build.c */
 int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
                       int vid_hdr_offset, int max_beb_per1024,
-                      bool disable_fm);
+                      bool disable_fm, bool need_resv_pool);
 int ubi_detach_mtd_dev(int ubi_num, int anyway);
 struct ubi_device *ubi_get_device(int ubi_num);
 void ubi_put_device(struct ubi_device *ubi);
index 26a214f016c18448469c4bb3988ac09af224ab95..a357f3d27f2f3ddce617de80b6f13719b7dccd9b 100644 (file)
@@ -181,11 +181,13 @@ static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
 /**
  * do_work - do one pending work.
  * @ubi: UBI device description object
+ * @executed: whether there is one work is executed
  *
  * This function returns zero in case of success and a negative error code in
- * case of failure.
+ * case of failure. If @executed is not NULL and there is one work executed,
+ * @executed is set as %1, otherwise @executed is set as %0.
  */
-static int do_work(struct ubi_device *ubi)
+static int do_work(struct ubi_device *ubi, int *executed)
 {
        int err;
        struct ubi_work *wrk;
@@ -203,9 +205,13 @@ static int do_work(struct ubi_device *ubi)
        if (list_empty(&ubi->works)) {
                spin_unlock(&ubi->wl_lock);
                up_read(&ubi->work_sem);
+               if (executed)
+                       *executed = 0;
                return 0;
        }
 
+       if (executed)
+               *executed = 1;
        wrk = list_entry(ubi->works.next, struct ubi_work, list);
        list_del(&wrk->list);
        ubi->works_count -= 1;
@@ -311,12 +317,14 @@ static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
  * @ubi: UBI device description object
  * @root: the RB-tree where to look for
  * @diff: maximum possible difference from the smallest erase counter
+ * @pick_max: pick PEB even its erase counter beyonds 'min_ec + @diff'
  *
  * This function looks for a wear leveling entry with erase counter closest to
  * min + @diff, where min is the smallest erase counter.
  */
 static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
-                                         struct rb_root *root, int diff)
+                                         struct rb_root *root, int diff,
+                                         int pick_max)
 {
        struct rb_node *p;
        struct ubi_wl_entry *e;
@@ -330,9 +338,11 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
                struct ubi_wl_entry *e1;
 
                e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
-               if (e1->ec >= max)
+               if (e1->ec >= max) {
+                       if (pick_max)
+                               e = e1;
                        p = p->rb_left;
-               else {
+               else {
                        p = p->rb_right;
                        e = e1;
                }
@@ -361,12 +371,15 @@ static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
        if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
                e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
 
-               /* If no fastmap has been written and this WL entry can be used
-                * as anchor PEB, hold it back and return the second best
-                * WL entry such that fastmap can use the anchor PEB later. */
+               /*
+                * If no fastmap has been written and fm_anchor is not
+                * reserved and this WL entry can be used as anchor PEB
+                * hold it back and return the second best WL entry such
+                * that fastmap can use the anchor PEB later.
+                */
                e = may_reserve_for_fm(ubi, e, root);
        } else
-               e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
+               e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2, 0);
 
        return e;
 }
@@ -427,7 +440,7 @@ static int prot_queue_del(struct ubi_device *ubi, int pnum)
 }
 
 /**
- * sync_erase - synchronously erase a physical eraseblock.
+ * ubi_sync_erase - synchronously erase a physical eraseblock.
  * @ubi: UBI device description object
  * @e: the physical eraseblock to erase
  * @torture: if the physical eraseblock has to be tortured
@@ -435,8 +448,7 @@ static int prot_queue_del(struct ubi_device *ubi, int pnum)
  * This function returns zero in case of success and a negative error code in
  * case of failure.
  */
-static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
-                     int torture)
+int ubi_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture)
 {
        int err;
        struct ubi_ec_hdr *ec_hdr;
@@ -1040,7 +1052,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
                 * %UBI_WL_THRESHOLD.
                 */
                e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
-               e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+               e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
 
                if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
                        goto out_unlock;
@@ -1094,7 +1106,7 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
        dbg_wl("erase PEB %d EC %d LEB %d:%d",
               pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
 
-       err = sync_erase(ubi, e, wl_wrk->torture);
+       err = ubi_sync_erase(ubi, e, wl_wrk->torture);
        if (!err) {
                spin_lock(&ubi->wl_lock);
 
@@ -1686,7 +1698,7 @@ int ubi_thread(void *u)
                }
                spin_unlock(&ubi->wl_lock);
 
-               err = do_work(ubi);
+               err = do_work(ubi, NULL);
                if (err) {
                        ubi_err(ubi, "%s: work failed with error code %d",
                                ubi->bgt_name, err);
@@ -1749,7 +1761,7 @@ static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync
        ubi->lookuptbl[e->pnum] = e;
 
        if (sync) {
-               err = sync_erase(ubi, e, false);
+               err = ubi_sync_erase(ubi, e, false);
                if (err)
                        goto out_free;
 
@@ -2071,7 +2083,7 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
 {
        struct ubi_wl_entry *e;
 
-       e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
+       e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
        self_check_in_wl_tree(ubi, e, &ubi->free);
        ubi->free_count--;
        ubi_assert(ubi->free_count >= 0);
@@ -2097,7 +2109,7 @@ static int produce_free_peb(struct ubi_device *ubi)
                spin_unlock(&ubi->wl_lock);
 
                dbg_wl("do one work synchronously");
-               err = do_work(ubi);
+               err = do_work(ubi, NULL);
 
                spin_lock(&ubi->wl_lock);
                if (err)
index 5ebe374a08aed9954e385cbcdabad7d974617659..7b6715ef6d4a356905e8805c4ec23a3b27a9e113 100644 (file)
@@ -10,8 +10,10 @@ static bool need_wear_leveling(struct ubi_device *ubi);
 static void ubi_fastmap_close(struct ubi_device *ubi);
 static inline void ubi_fastmap_init(struct ubi_device *ubi, int *count)
 {
-       /* Reserve enough LEBs to store two fastmaps. */
-       *count += (ubi->fm_size / ubi->leb_size) * 2;
+       if (ubi->fm_disabled)
+               ubi->fm_pool_rsv_cnt = 0;
+       /* Reserve enough LEBs to store two fastmaps and to fill pools. */
+       *count += (ubi->fm_size / ubi->leb_size) * 2 + ubi->fm_pool_rsv_cnt;
        INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
 }
 static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
index d8ab2b77d201e03d10e162a9a0b297a048a240fb..167a86f39f277118582de8692d6e549f8d7a9e0b 100644 (file)
@@ -32,7 +32,7 @@ static int lan9303_mdio_write(void *ctx, uint32_t reg, uint32_t val)
        struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
 
        reg <<= 2; /* reg num to offset */
-       mutex_lock(&sw_dev->device->bus->mdio_lock);
+       mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
        lan9303_mdio_real_write(sw_dev->device, reg, val & 0xffff);
        lan9303_mdio_real_write(sw_dev->device, reg + 2, (val >> 16) & 0xffff);
        mutex_unlock(&sw_dev->device->bus->mdio_lock);
@@ -50,7 +50,7 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val)
        struct lan9303_mdio *sw_dev = (struct lan9303_mdio *)ctx;
 
        reg <<= 2; /* reg num to offset */
-       mutex_lock(&sw_dev->device->bus->mdio_lock);
+       mutex_lock_nested(&sw_dev->device->bus->mdio_lock, MDIO_MUTEX_NESTED);
        *val = lan9303_mdio_real_read(sw_dev->device, reg);
        *val |= (lan9303_mdio_real_read(sw_dev->device, reg + 2) << 16);
        mutex_unlock(&sw_dev->device->bus->mdio_lock);
index 5665d0c3668fb149a26e7e1d27d2df52885d09eb..1dee27349367e76e9460b4eaeffdd42eb1946b42 100644 (file)
@@ -6647,9 +6647,9 @@ static void tg3_tx(struct tg3_napi *tnapi)
 
        tnapi->tx_cons = sw_idx;
 
-       /* Need to make the tx_cons update visible to tg3_start_xmit()
+       /* Need to make the tx_cons update visible to __tg3_start_xmit()
         * before checking for netif_queue_stopped().  Without the
-        * memory barrier, there is a small possibility that tg3_start_xmit()
+        * memory barrier, there is a small possibility that __tg3_start_xmit()
         * will miss it and cause the queue to be stopped forever.
         */
        smp_mb();
@@ -7889,7 +7889,7 @@ static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
        return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
 }
 
-static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
+static netdev_tx_t __tg3_start_xmit(struct sk_buff *, struct net_device *);
 
 /* Use GSO to workaround all TSO packets that meet HW bug conditions
  * indicated in tg3_tx_frag_set()
@@ -7923,7 +7923,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
 
        skb_list_walk_safe(segs, seg, next) {
                skb_mark_not_on_list(seg);
-               tg3_start_xmit(seg, tp->dev);
+               __tg3_start_xmit(seg, tp->dev);
        }
 
 tg3_tso_bug_end:
@@ -7933,7 +7933,7 @@ tg3_tso_bug_end:
 }
 
 /* hard_start_xmit for all devices */
-static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t __tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct tg3 *tp = netdev_priv(dev);
        u32 len, entry, base_flags, mss, vlan = 0;
@@ -8182,11 +8182,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        netif_tx_wake_queue(txq);
        }
 
-       if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
-               /* Packets are ready, update Tx producer idx on card. */
-               tw32_tx_mbox(tnapi->prodmbox, entry);
-       }
-
        return NETDEV_TX_OK;
 
 dma_error:
@@ -8199,6 +8194,42 @@ drop_nofree:
        return NETDEV_TX_OK;
 }
 
+static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct netdev_queue *txq;
+       u16 skb_queue_mapping;
+       netdev_tx_t ret;
+
+       skb_queue_mapping = skb_get_queue_mapping(skb);
+       txq = netdev_get_tx_queue(dev, skb_queue_mapping);
+
+       ret = __tg3_start_xmit(skb, dev);
+
+       /* Notify the hardware that packets are ready by updating the TX ring
+        * tail pointer. We respect netdev_xmit_more() thus avoiding poking
+        * the hardware for every packet. To guarantee forward progress the TX
+        * ring must be drained when it is full as indicated by
+        * netif_xmit_stopped(). This needs to happen even when the current
+        * skb was dropped or rejected with NETDEV_TX_BUSY. Otherwise packets
+        * queued by previous __tg3_start_xmit() calls might get stuck in
+        * the queue forever.
+        */
+       if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
+               struct tg3_napi *tnapi;
+               struct tg3 *tp;
+
+               tp = netdev_priv(dev);
+               tnapi = &tp->napi[skb_queue_mapping];
+
+               if (tg3_flag(tp, ENABLE_TSS))
+                       tnapi++;
+
+               tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
+       }
+
+       return ret;
+}
+
 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
 {
        if (enable) {
@@ -17729,7 +17760,7 @@ static int tg3_init_one(struct pci_dev *pdev,
         * device behind the EPB cannot support DMA addresses > 40-bit.
         * On 64-bit systems with IOMMU, use 40-bit dma_mask.
         * On 64-bit systems without IOMMU, use 64-bit dma_mask and
-        * do DMA address check in tg3_start_xmit().
+        * do DMA address check in __tg3_start_xmit().
         */
        if (tg3_flag(tp, IS_5788))
                persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
@@ -18127,7 +18158,8 @@ static void tg3_shutdown(struct pci_dev *pdev)
        if (netif_running(dev))
                dev_close(dev);
 
-       tg3_power_down(tp);
+       if (system_state == SYSTEM_POWER_OFF)
+               tg3_power_down(tp);
 
        rtnl_unlock();
 
index 30bec47bc665bf6fe66fc28cbc987cb0884fde0c..cffbf27c4656b27b0694f2a1ac88ad596c6196b4 100644 (file)
@@ -2769,7 +2769,7 @@ static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog,
        if (priv->min_num_stack_tx_queues + num_xdp_tx_queues >
            priv->num_tx_rings) {
                NL_SET_ERR_MSG_FMT_MOD(extack,
-                                      "Reserving %d XDP TXQs does not leave a minimum of %d TXQs for network stack (total %d available)",
+                                      "Reserving %d XDP TXQs does not leave a minimum of %d for stack (total %d)",
                                       num_xdp_tx_queues,
                                       priv->min_num_stack_tx_queues,
                                       priv->num_tx_rings);
index 74bc111b4849412387258152611951b5d86ec40e..cc4e9e2addb75f918a7dc2f262b726ae260a9578 100644 (file)
@@ -231,6 +231,5 @@ int i40e_devlink_create_port(struct i40e_pf *pf)
  **/
 void i40e_devlink_destroy_port(struct i40e_pf *pf)
 {
-       devlink_port_type_clear(&pf->devlink_port);
        devlink_port_unregister(&pf->devlink_port);
 }
index 3157d14d9b1213765fcd3d4443283fbaa5001afb..f7a332e51524d1a28b2895cf16ce2c2e22639109 100644 (file)
@@ -14213,8 +14213,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
        }
        set_bit(__I40E_VSI_RELEASING, vsi->state);
        uplink_seid = vsi->uplink_seid;
-       if (vsi->type == I40E_VSI_MAIN)
-               i40e_devlink_destroy_port(pf);
+
        if (vsi->type != I40E_VSI_SRIOV) {
                if (vsi->netdev_registered) {
                        vsi->netdev_registered = false;
@@ -14228,6 +14227,9 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
                i40e_vsi_disable_irq(vsi);
        }
 
+       if (vsi->type == I40E_VSI_MAIN)
+               i40e_devlink_destroy_port(pf);
+
        spin_lock_bh(&vsi->mac_filter_hash_lock);
 
        /* clear the sync flag on all filters */
@@ -14402,14 +14404,14 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
 
 err_rings:
        i40e_vsi_free_q_vectors(vsi);
-       if (vsi->type == I40E_VSI_MAIN)
-               i40e_devlink_destroy_port(pf);
        if (vsi->netdev_registered) {
                vsi->netdev_registered = false;
                unregister_netdev(vsi->netdev);
                free_netdev(vsi->netdev);
                vsi->netdev = NULL;
        }
+       if (vsi->type == I40E_VSI_MAIN)
+               i40e_devlink_destroy_port(pf);
        i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
 err_vsi:
        i40e_vsi_clear(vsi);
index b980f89dc892df4c92b16bab658d0d79ce229ea2..cd065ec48c87e62752081c8491bf41acd5c8d63a 100644 (file)
@@ -628,7 +628,7 @@ void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
                INIT_LIST_HEAD(&ndlist.node);
                rcu_read_lock();
                for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
-                       nl = kzalloc(sizeof(*nl), GFP_KERNEL);
+                       nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
                        if (!nl)
                                break;
 
@@ -1555,18 +1555,12 @@ static void ice_lag_chk_disabled_bond(struct ice_lag *lag, void *ptr)
  */
 static void ice_lag_disable_sriov_bond(struct ice_lag *lag)
 {
-       struct ice_lag_netdev_list *entry;
        struct ice_netdev_priv *np;
-       struct net_device *netdev;
        struct ice_pf *pf;
 
-       list_for_each_entry(entry, lag->netdev_head, node) {
-               netdev = entry->netdev;
-               np = netdev_priv(netdev);
-               pf = np->vsi->back;
-
-               ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
-       }
+       np = netdev_priv(lag->netdev);
+       pf = np->vsi->back;
+       ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
 }
 
 /**
@@ -1698,7 +1692,7 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
 
                rcu_read_lock();
                for_each_netdev_in_bond_rcu(upper_netdev, tmp_nd) {
-                       nd_list = kzalloc(sizeof(*nd_list), GFP_KERNEL);
+                       nd_list = kzalloc(sizeof(*nd_list), GFP_ATOMIC);
                        if (!nd_list)
                                break;
 
@@ -2075,7 +2069,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
                INIT_LIST_HEAD(&ndlist.node);
                rcu_read_lock();
                for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
-                       nl = kzalloc(sizeof(*nl), GFP_KERNEL);
+                       nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
                        if (!nl)
                                break;
 
index 37b54db91df2750e316afe1a82a465bd742a854f..dd03cb69ad26b4360b14e63c7c9745f6063f70dc 100644 (file)
@@ -630,32 +630,83 @@ bool ice_is_tunnel_supported(struct net_device *dev)
        return ice_tc_tun_get_type(dev) != TNL_LAST;
 }
 
-static int
-ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr,
-                           struct flow_action_entry *act)
+static bool ice_tc_is_dev_uplink(struct net_device *dev)
+{
+       return netif_is_ice(dev) || ice_is_tunnel_supported(dev);
+}
+
+static int ice_tc_setup_redirect_action(struct net_device *filter_dev,
+                                       struct ice_tc_flower_fltr *fltr,
+                                       struct net_device *target_dev)
 {
        struct ice_repr *repr;
 
+       fltr->action.fltr_act = ICE_FWD_TO_VSI;
+
+       if (ice_is_port_repr_netdev(filter_dev) &&
+           ice_is_port_repr_netdev(target_dev)) {
+               repr = ice_netdev_to_repr(target_dev);
+
+               fltr->dest_vsi = repr->src_vsi;
+               fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
+       } else if (ice_is_port_repr_netdev(filter_dev) &&
+                  ice_tc_is_dev_uplink(target_dev)) {
+               repr = ice_netdev_to_repr(filter_dev);
+
+               fltr->dest_vsi = repr->src_vsi->back->switchdev.uplink_vsi;
+               fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
+       } else if (ice_tc_is_dev_uplink(filter_dev) &&
+                  ice_is_port_repr_netdev(target_dev)) {
+               repr = ice_netdev_to_repr(target_dev);
+
+               fltr->dest_vsi = repr->src_vsi;
+               fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
+       } else {
+               NL_SET_ERR_MSG_MOD(fltr->extack,
+                                  "Unsupported netdevice in switchdev mode");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int
+ice_tc_setup_drop_action(struct net_device *filter_dev,
+                        struct ice_tc_flower_fltr *fltr)
+{
+       fltr->action.fltr_act = ICE_DROP_PACKET;
+
+       if (ice_is_port_repr_netdev(filter_dev)) {
+               fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
+       } else if (ice_tc_is_dev_uplink(filter_dev)) {
+               fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
+       } else {
+               NL_SET_ERR_MSG_MOD(fltr->extack,
+                                  "Unsupported netdevice in switchdev mode");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
+                                      struct ice_tc_flower_fltr *fltr,
+                                      struct flow_action_entry *act)
+{
+       int err;
+
        switch (act->id) {
        case FLOW_ACTION_DROP:
-               fltr->action.fltr_act = ICE_DROP_PACKET;
+               err = ice_tc_setup_drop_action(filter_dev, fltr);
+               if (err)
+                       return err;
+
                break;
 
        case FLOW_ACTION_REDIRECT:
-               fltr->action.fltr_act = ICE_FWD_TO_VSI;
-
-               if (ice_is_port_repr_netdev(act->dev)) {
-                       repr = ice_netdev_to_repr(act->dev);
-
-                       fltr->dest_vsi = repr->src_vsi;
-                       fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
-               } else if (netif_is_ice(act->dev) ||
-                          ice_is_tunnel_supported(act->dev)) {
-                       fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
-               } else {
-                       NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode");
-                       return -EINVAL;
-               }
+               err = ice_tc_setup_redirect_action(filter_dev, fltr, act->dev);
+               if (err)
+                       return err;
 
                break;
 
@@ -696,10 +747,6 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
                goto exit;
        }
 
-       /* egress traffic is always redirect to uplink */
-       if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
-               fltr->dest_vsi = vsi->back->switchdev.uplink_vsi;
-
        rule_info.sw_act.fltr_act = fltr->action.fltr_act;
        if (fltr->action.fltr_act != ICE_DROP_PACKET)
                rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
@@ -713,13 +760,21 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
        rule_info.flags_info.act_valid = true;
 
        if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
+               /* Uplink to VF */
                rule_info.sw_act.flag |= ICE_FLTR_RX;
                rule_info.sw_act.src = hw->pf_id;
                rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
-       } else {
+       } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
+                  fltr->dest_vsi == vsi->back->switchdev.uplink_vsi) {
+               /* VF to Uplink */
                rule_info.sw_act.flag |= ICE_FLTR_TX;
                rule_info.sw_act.src = vsi->idx;
                rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
+       } else {
+               /* VF to VF */
+               rule_info.sw_act.flag |= ICE_FLTR_TX;
+               rule_info.sw_act.src = vsi->idx;
+               rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
        }
 
        /* specify the cookie as filter_rule_id */
@@ -1745,16 +1800,17 @@ ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
 
 /**
  * ice_parse_tc_flower_actions - Parse the actions for a TC filter
+ * @filter_dev: Pointer to device on which filter is being added
  * @vsi: Pointer to VSI
  * @cls_flower: Pointer to TC flower offload structure
  * @fltr: Pointer to TC flower filter structure
  *
  * Parse the actions for a TC filter
  */
-static int
-ice_parse_tc_flower_actions(struct ice_vsi *vsi,
-                           struct flow_cls_offload *cls_flower,
-                           struct ice_tc_flower_fltr *fltr)
+static int ice_parse_tc_flower_actions(struct net_device *filter_dev,
+                                      struct ice_vsi *vsi,
+                                      struct flow_cls_offload *cls_flower,
+                                      struct ice_tc_flower_fltr *fltr)
 {
        struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
        struct flow_action *flow_action = &rule->action;
@@ -1769,7 +1825,7 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi,
 
        flow_action_for_each(i, act, flow_action) {
                if (ice_is_eswitch_mode_switchdev(vsi->back))
-                       err = ice_eswitch_tc_parse_action(fltr, act);
+                       err = ice_eswitch_tc_parse_action(filter_dev, fltr, act);
                else
                        err = ice_tc_parse_action(vsi, fltr, act);
                if (err)
@@ -1856,7 +1912,7 @@ ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
        if (err < 0)
                goto err;
 
-       err = ice_parse_tc_flower_actions(vsi, f, fltr);
+       err = ice_parse_tc_flower_actions(netdev, vsi, f, fltr);
        if (err < 0)
                goto err;
 
index 5e1ef70d54fe4147a42e5a3263b73cd3e6316679..1f728a9004d9e40d4434534422a42c8c537f5eae 100644 (file)
@@ -2365,7 +2365,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
  */
 int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
 {
-       const struct skb_shared_info *shinfo = skb_shinfo(skb);
+       const struct skb_shared_info *shinfo;
        union {
                struct iphdr *v4;
                struct ipv6hdr *v6;
@@ -2379,13 +2379,15 @@ int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off)
        u32 paylen, l4_start;
        int err;
 
-       if (!shinfo->gso_size)
+       if (!skb_is_gso(skb))
                return 0;
 
        err = skb_cow_head(skb, 0);
        if (err < 0)
                return err;
 
+       shinfo = skb_shinfo(skb);
+
        ip.hdr = skb_network_header(skb);
        l4.hdr = skb_transport_header(skb);
 
index 1a42bfded8722ac7c494552ee98739a95d87089c..7ca6941ea0b9b4d684ba45482b88db066a728f98 100644 (file)
@@ -818,7 +818,6 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
        int qidx, sqe_tail, sqe_head;
        struct otx2_snd_queue *sq;
        u64 incr, *ptr, val;
-       int timeout = 1000;
 
        ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
        for (qidx = 0; qidx < otx2_get_total_tx_queues(pfvf); qidx++) {
@@ -827,15 +826,11 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
                        continue;
 
                incr = (u64)qidx << 32;
-               while (timeout) {
-                       val = otx2_atomic64_add(incr, ptr);
-                       sqe_head = (val >> 20) & 0x3F;
-                       sqe_tail = (val >> 28) & 0x3F;
-                       if (sqe_head == sqe_tail)
-                               break;
-                       usleep_range(1, 3);
-                       timeout--;
-               }
+               val = otx2_atomic64_add(incr, ptr);
+               sqe_head = (val >> 20) & 0x3F;
+               sqe_tail = (val >> 28) & 0x3F;
+               if (sqe_head != sqe_tail)
+                       usleep_range(50, 60);
        }
 }
 
index c04a8ee53a82f1711ea62164b764b92a9d2244cc..e7c69b57147e097827afaba08cf647372ad021f7 100644 (file)
@@ -977,6 +977,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool pfc_en);
 int otx2_txsch_alloc(struct otx2_nic *pfvf);
 void otx2_txschq_stop(struct otx2_nic *pfvf);
 void otx2_txschq_free_one(struct otx2_nic *pfvf, u16 lvl, u16 schq);
+void otx2_free_pending_sqe(struct otx2_nic *pfvf);
 void otx2_sqb_flush(struct otx2_nic *pfvf);
 int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
                    dma_addr_t *dma);
index 6daf4d58c25d6379c5e78689d8d91b301927a186..91b99fd703616ae5915758a374a17e458bea8ffc 100644 (file)
@@ -1193,31 +1193,32 @@ static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = {
 };
 
 static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] =  {
-       "NIX_SND_STATUS_GOOD",
-       "NIX_SND_STATUS_SQ_CTX_FAULT",
-       "NIX_SND_STATUS_SQ_CTX_POISON",
-       "NIX_SND_STATUS_SQB_FAULT",
-       "NIX_SND_STATUS_SQB_POISON",
-       "NIX_SND_STATUS_HDR_ERR",
-       "NIX_SND_STATUS_EXT_ERR",
-       "NIX_SND_STATUS_JUMP_FAULT",
-       "NIX_SND_STATUS_JUMP_POISON",
-       "NIX_SND_STATUS_CRC_ERR",
-       "NIX_SND_STATUS_IMM_ERR",
-       "NIX_SND_STATUS_SG_ERR",
-       "NIX_SND_STATUS_MEM_ERR",
-       "NIX_SND_STATUS_INVALID_SUBDC",
-       "NIX_SND_STATUS_SUBDC_ORDER_ERR",
-       "NIX_SND_STATUS_DATA_FAULT",
-       "NIX_SND_STATUS_DATA_POISON",
-       "NIX_SND_STATUS_NPC_DROP_ACTION",
-       "NIX_SND_STATUS_LOCK_VIOL",
-       "NIX_SND_STATUS_NPC_UCAST_CHAN_ERR",
-       "NIX_SND_STATUS_NPC_MCAST_CHAN_ERR",
-       "NIX_SND_STATUS_NPC_MCAST_ABORT",
-       "NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
-       "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
-       "NIX_SND_STATUS_SEND_STATS_ERR",
+       [NIX_SND_STATUS_GOOD] = "NIX_SND_STATUS_GOOD",
+       [NIX_SND_STATUS_SQ_CTX_FAULT] = "NIX_SND_STATUS_SQ_CTX_FAULT",
+       [NIX_SND_STATUS_SQ_CTX_POISON] = "NIX_SND_STATUS_SQ_CTX_POISON",
+       [NIX_SND_STATUS_SQB_FAULT] = "NIX_SND_STATUS_SQB_FAULT",
+       [NIX_SND_STATUS_SQB_POISON] = "NIX_SND_STATUS_SQB_POISON",
+       [NIX_SND_STATUS_HDR_ERR] = "NIX_SND_STATUS_HDR_ERR",
+       [NIX_SND_STATUS_EXT_ERR] = "NIX_SND_STATUS_EXT_ERR",
+       [NIX_SND_STATUS_JUMP_FAULT] = "NIX_SND_STATUS_JUMP_FAULT",
+       [NIX_SND_STATUS_JUMP_POISON] = "NIX_SND_STATUS_JUMP_POISON",
+       [NIX_SND_STATUS_CRC_ERR] = "NIX_SND_STATUS_CRC_ERR",
+       [NIX_SND_STATUS_IMM_ERR] = "NIX_SND_STATUS_IMM_ERR",
+       [NIX_SND_STATUS_SG_ERR] = "NIX_SND_STATUS_SG_ERR",
+       [NIX_SND_STATUS_MEM_ERR] = "NIX_SND_STATUS_MEM_ERR",
+       [NIX_SND_STATUS_INVALID_SUBDC] = "NIX_SND_STATUS_INVALID_SUBDC",
+       [NIX_SND_STATUS_SUBDC_ORDER_ERR] = "NIX_SND_STATUS_SUBDC_ORDER_ERR",
+       [NIX_SND_STATUS_DATA_FAULT] = "NIX_SND_STATUS_DATA_FAULT",
+       [NIX_SND_STATUS_DATA_POISON] = "NIX_SND_STATUS_DATA_POISON",
+       [NIX_SND_STATUS_NPC_DROP_ACTION] = "NIX_SND_STATUS_NPC_DROP_ACTION",
+       [NIX_SND_STATUS_LOCK_VIOL] = "NIX_SND_STATUS_LOCK_VIOL",
+       [NIX_SND_STATUS_NPC_UCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_UCAST_CHAN_ERR",
+       [NIX_SND_STATUS_NPC_MCAST_CHAN_ERR] = "NIX_SND_STAT_NPC_MCAST_CHAN_ERR",
+       [NIX_SND_STATUS_NPC_MCAST_ABORT] = "NIX_SND_STATUS_NPC_MCAST_ABORT",
+       [NIX_SND_STATUS_NPC_VTAG_PTR_ERR] = "NIX_SND_STATUS_NPC_VTAG_PTR_ERR",
+       [NIX_SND_STATUS_NPC_VTAG_SIZE_ERR] = "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR",
+       [NIX_SND_STATUS_SEND_MEM_FAULT] = "NIX_SND_STATUS_SEND_MEM_FAULT",
+       [NIX_SND_STATUS_SEND_STATS_ERR] = "NIX_SND_STATUS_SEND_STATS_ERR",
 };
 
 static irqreturn_t otx2_q_intr_handler(int irq, void *data)
@@ -1238,14 +1239,16 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
                        continue;
 
                if (val & BIT_ULL(42)) {
-                       netdev_err(pf->netdev, "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+                       netdev_err(pf->netdev,
+                                  "CQ%lld: error reading NIX_LF_CQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
                                   qidx, otx2_read64(pf, NIX_LF_ERR_INT));
                } else {
                        if (val & BIT_ULL(NIX_CQERRINT_DOOR_ERR))
                                netdev_err(pf->netdev, "CQ%lld: Doorbell error",
                                           qidx);
                        if (val & BIT_ULL(NIX_CQERRINT_CQE_FAULT))
-                               netdev_err(pf->netdev, "CQ%lld: Memory fault on CQE write to LLC/DRAM",
+                               netdev_err(pf->netdev,
+                                          "CQ%lld: Memory fault on CQE write to LLC/DRAM",
                                           qidx);
                }
 
@@ -1272,7 +1275,8 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
                             (val & NIX_SQINT_BITS));
 
                if (val & BIT_ULL(42)) {
-                       netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
+                       netdev_err(pf->netdev,
+                                  "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n",
                                   qidx, otx2_read64(pf, NIX_LF_ERR_INT));
                        goto done;
                }
@@ -1282,8 +1286,11 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
                        goto chk_mnq_err_dbg;
 
                sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg);
-               netdev_err(pf->netdev, "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(%llx)  err=%s\n",
-                          qidx, sq_op_err_dbg, nix_sqoperr_e_str[sq_op_err_code]);
+               netdev_err(pf->netdev,
+                          "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(0x%llx)  err=%s(%#x)\n",
+                          qidx, sq_op_err_dbg,
+                          nix_sqoperr_e_str[sq_op_err_code],
+                          sq_op_err_code);
 
                otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44));
 
@@ -1300,16 +1307,21 @@ chk_mnq_err_dbg:
                        goto chk_snd_err_dbg;
 
                mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg);
-               netdev_err(pf->netdev, "SQ%lld: NIX_LF_MNQ_ERR_DBG(%llx)  err=%s\n",
-                          qidx, mnq_err_dbg,  nix_mnqerr_e_str[mnq_err_code]);
+               netdev_err(pf->netdev,
+                          "SQ%lld: NIX_LF_MNQ_ERR_DBG(0x%llx)  err=%s(%#x)\n",
+                          qidx, mnq_err_dbg,  nix_mnqerr_e_str[mnq_err_code],
+                          mnq_err_code);
                otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44));
 
 chk_snd_err_dbg:
                snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG);
                if (snd_err_dbg & BIT(44)) {
                        snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg);
-                       netdev_err(pf->netdev, "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s\n",
-                                  qidx, snd_err_dbg, nix_snd_status_e_str[snd_err_code]);
+                       netdev_err(pf->netdev,
+                                  "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s(%#x)\n",
+                                  qidx, snd_err_dbg,
+                                  nix_snd_status_e_str[snd_err_code],
+                                  snd_err_code);
                        otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44));
                }
 
@@ -1589,6 +1601,7 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
                else
                        otx2_cleanup_tx_cqes(pf, cq);
        }
+       otx2_free_pending_sqe(pf);
 
        otx2_free_sq_res(pf);
 
index fa37b9f312cae4450c565c11e348071114f2225b..4e5899d8fa2e6e6902f9f3316b5408975727a841 100644 (file)
@@ -318,23 +318,23 @@ enum nix_snd_status_e {
        NIX_SND_STATUS_EXT_ERR = 0x6,
        NIX_SND_STATUS_JUMP_FAULT = 0x7,
        NIX_SND_STATUS_JUMP_POISON = 0x8,
-       NIX_SND_STATUS_CRC_ERR = 0x9,
-       NIX_SND_STATUS_IMM_ERR = 0x10,
-       NIX_SND_STATUS_SG_ERR = 0x11,
-       NIX_SND_STATUS_MEM_ERR = 0x12,
-       NIX_SND_STATUS_INVALID_SUBDC = 0x13,
-       NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x14,
-       NIX_SND_STATUS_DATA_FAULT = 0x15,
-       NIX_SND_STATUS_DATA_POISON = 0x16,
-       NIX_SND_STATUS_NPC_DROP_ACTION = 0x17,
-       NIX_SND_STATUS_LOCK_VIOL = 0x18,
-       NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x19,
-       NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x20,
-       NIX_SND_STATUS_NPC_MCAST_ABORT = 0x21,
-       NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x22,
-       NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x23,
-       NIX_SND_STATUS_SEND_MEM_FAULT = 0x24,
-       NIX_SND_STATUS_SEND_STATS_ERR = 0x25,
+       NIX_SND_STATUS_CRC_ERR = 0x10,
+       NIX_SND_STATUS_IMM_ERR = 0x11,
+       NIX_SND_STATUS_SG_ERR = 0x12,
+       NIX_SND_STATUS_MEM_ERR = 0x13,
+       NIX_SND_STATUS_INVALID_SUBDC = 0x14,
+       NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x15,
+       NIX_SND_STATUS_DATA_FAULT = 0x16,
+       NIX_SND_STATUS_DATA_POISON = 0x17,
+       NIX_SND_STATUS_NPC_DROP_ACTION = 0x20,
+       NIX_SND_STATUS_LOCK_VIOL = 0x21,
+       NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x22,
+       NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x23,
+       NIX_SND_STATUS_NPC_MCAST_ABORT = 0x24,
+       NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x25,
+       NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x26,
+       NIX_SND_STATUS_SEND_MEM_FAULT = 0x27,
+       NIX_SND_STATUS_SEND_STATS_ERR = 0x28,
        NIX_SND_STATUS_MAX,
 };
 
index 53b2a4ef52985271ab6d0ee67c779af878876ba0..6ee15f3c25ede947ce0103a7d8b00cd7291c177e 100644 (file)
@@ -1247,9 +1247,11 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int q
 
 void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
 {
+       int tx_pkts = 0, tx_bytes = 0;
        struct sk_buff *skb = NULL;
        struct otx2_snd_queue *sq;
        struct nix_cqe_tx_s *cqe;
+       struct netdev_queue *txq;
        int processed_cqe = 0;
        struct sg_list *sg;
        int qidx;
@@ -1270,12 +1272,20 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
                sg = &sq->sg[cqe->comp.sqe_id];
                skb = (struct sk_buff *)sg->skb;
                if (skb) {
+                       tx_bytes += skb->len;
+                       tx_pkts++;
                        otx2_dma_unmap_skb_frags(pfvf, sg);
                        dev_kfree_skb_any(skb);
                        sg->skb = (u64)NULL;
                }
        }
 
+       if (likely(tx_pkts)) {
+               if (qidx >= pfvf->hw.tx_queues)
+                       qidx -= pfvf->hw.xdp_queues;
+               txq = netdev_get_tx_queue(pfvf->netdev, qidx);
+               netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
+       }
        /* Free CQEs to HW */
        otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
                     ((u64)cq->cq_idx << 32) | processed_cqe);
@@ -1302,6 +1312,38 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
        return err;
 }
 
+void otx2_free_pending_sqe(struct otx2_nic *pfvf)
+{
+       int tx_pkts = 0, tx_bytes = 0;
+       struct sk_buff *skb = NULL;
+       struct otx2_snd_queue *sq;
+       struct netdev_queue *txq;
+       struct sg_list *sg;
+       int sq_idx, sqe;
+
+       for (sq_idx = 0; sq_idx < pfvf->hw.tx_queues; sq_idx++) {
+               sq = &pfvf->qset.sq[sq_idx];
+               for (sqe = 0; sqe < sq->sqe_cnt; sqe++) {
+                       sg = &sq->sg[sqe];
+                       skb = (struct sk_buff *)sg->skb;
+                       if (skb) {
+                               tx_bytes += skb->len;
+                               tx_pkts++;
+                               otx2_dma_unmap_skb_frags(pfvf, sg);
+                               dev_kfree_skb_any(skb);
+                               sg->skb = (u64)NULL;
+                       }
+               }
+
+               if (!tx_pkts)
+                       continue;
+               txq = netdev_get_tx_queue(pfvf->netdev, sq_idx);
+               netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
+               tx_pkts = 0;
+               tx_bytes = 0;
+       }
+}
+
 static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
                                int len, int *offset)
 {
index a987defb575cfbef1c7792eb6de5d9e7144514fa..0c76c162b8a9f5eb720ea3ed00a5beaf4d930ad0 100644 (file)
@@ -2582,9 +2582,13 @@ static void rtl_set_rx_mode(struct net_device *dev)
 
        if (dev->flags & IFF_PROMISC) {
                rx_mode |= AcceptAllPhys;
+       } else if (!(dev->flags & IFF_MULTICAST)) {
+               rx_mode &= ~AcceptMulticast;
        } else if (netdev_mc_count(dev) > MC_FILTER_LIMIT ||
                   dev->flags & IFF_ALLMULTI ||
-                  tp->mac_version == RTL_GIGA_MAC_VER_35) {
+                  tp->mac_version == RTL_GIGA_MAC_VER_35 ||
+                  tp->mac_version == RTL_GIGA_MAC_VER_46 ||
+                  tp->mac_version == RTL_GIGA_MAC_VER_48) {
                /* accept all multicasts */
        } else if (netdev_mc_empty(dev)) {
                rx_mode &= ~AcceptMulticast;
index 7a8f47e7b728bd3be1deaf26199874e0a2570500..a4e8b498dea9644dae6c4e90e3a2b6ee63817053 100644 (file)
        ((val) << XGMAC_PPS_MINIDX(x))
 #define XGMAC_PPSCMD_START             0x2
 #define XGMAC_PPSCMD_STOP              0x5
-#define XGMAC_PPSEN0                   BIT(4)
+#define XGMAC_PPSENx(x)                        BIT(4 + (x) * 8)
 #define XGMAC_PPSx_TARGET_TIME_SEC(x)  (0x00000d80 + (x) * 0x10)
 #define XGMAC_PPSx_TARGET_TIME_NSEC(x) (0x00000d84 + (x) * 0x10)
 #define XGMAC_TRGTBUSY0                        BIT(31)
index f352be269deb54554e309c1f4d8b55762d399d7b..453e88b75be08a71d67472fc5bd31b680fd1fd51 100644 (file)
@@ -1178,7 +1178,19 @@ static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
 
        val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
        val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
-       val |= XGMAC_PPSEN0;
+
+       /* XGMAC Core has 4 PPS outputs at most.
+        *
+        * Prior XGMAC Core 3.20, Fixed mode or Flexible mode are selectable for
+        * PPS0 only via PPSEN0. PPS{1,2,3} are in Flexible mode by default,
+        * and can not be switched to Fixed mode, since PPSEN{1,2,3} are
+        * read-only reserved to 0.
+        * But we always set PPSEN{1,2,3} do not make things worse ;-)
+        *
+        * From XGMAC Core 3.20 and later, PPSEN{0,1,2,3} are writable and must
+        * be set, or the PPS outputs stay in Fixed PPS mode by default.
+        */
+       val |= XGMAC_PPSENx(index);
 
        writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
 
index 24120605502f9e8b7d3161d066902e2b8edc8a3f..ece9f8df98ae7d6618ee5fc29847938f276a2fd9 100644 (file)
@@ -1588,10 +1588,10 @@ static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy
 
        /* rx_pause/tx_pause */
        if (rx_pause)
-               mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
+               mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
 
        if (tx_pause)
-               mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
+               mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
 
        cpsw_sl_ctl_set(port->slave.mac_sl, mac_control);
 
index 4cf2a52e43783f229c87e17599bf6896754812bf..3025e9c189702ba7be0cab869f733675e979097a 100644 (file)
@@ -177,7 +177,7 @@ static void icss_iep_set_counter(struct icss_iep *iep, u64 ns)
        if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
                writel(upper_32_bits(ns), iep->base +
                       iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG1]);
-       writel(upper_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
+       writel(lower_32_bits(ns), iep->base + iep->plat_data->reg_offs[ICSS_IEP_COUNT_REG0]);
 }
 
 static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns);
index 531bf919aef50943bb28b6e4e2c9cadff734305b..e0d26148dfd95cf5eace5027cc06a43ecc737cb5 100644 (file)
@@ -163,7 +163,6 @@ typedef void buffer_t;
 
 /* Information about built-in Ethernet MAC interfaces */
 struct eth_plat_info {
-       u8 phy;         /* MII PHY ID, 0 - 31 */
        u8 rxq;         /* configurable, currently 0 - 31 only */
        u8 txreadyq;
        u8 hwaddr[ETH_ALEN];
@@ -1583,7 +1582,7 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
        if ((err = register_netdev(ndev)))
                goto err_phy_dis;
 
-       netdev_info(ndev, "%s: MII PHY %i on %s\n", ndev->name, plat->phy,
+       netdev_info(ndev, "%s: MII PHY %s on %s\n", ndev->name, phydev_name(phydev),
                    npe_name(port->npe));
 
        return 0;
index 4630dde019749a7a4de81c27b9f6af902bad1c2f..5d0f11f280cfc5a63a50cfacdc9724a98b8aac6f 100644 (file)
@@ -16,6 +16,7 @@
 
 MODULE_AUTHOR("Calvin Johnson <calvin.johnson@oss.nxp.com>");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ACPI MDIO bus (Ethernet PHY) accessors");
 
 /**
  * __acpi_mdiobus_register - Register mii_bus and create PHYs from the ACPI ASL.
index 1183ef5e203e3e248f1d73a22ccd04ea7b063851..fd02f5cbc853a61a4e8f74c816943867147297a9 100644 (file)
@@ -14,6 +14,7 @@
 
 MODULE_AUTHOR("Calvin Johnson <calvin.johnson@oss.nxp.com>");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FWNODE MDIO bus (Ethernet PHY) accessors");
 
 static struct pse_control *
 fwnode_find_pse_control(struct fwnode_handle *fwnode)
index 70edeeb7771e873b4b236c955e5ea33284cf5b30..c2170650415cdbd9d6076fe12e0014c33e40be15 100644 (file)
@@ -205,3 +205,4 @@ module_platform_driver(aspeed_mdio_driver);
 
 MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ASPEED MDIO bus controller");
index 81b7748c10ce0e0d74ab3f314a21aa34e9a13c0a..f88639297ff2df9f36d60c9680bf91d221aa219f 100644 (file)
@@ -263,3 +263,4 @@ void free_mdio_bitbang(struct mii_bus *bus)
 EXPORT_SYMBOL(free_mdio_bitbang);
 
 MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Bitbanged MDIO buses");
index 7eb32ebb846d86ef8b89bfc4a3cf73e82c0fbfa1..64ebcb6d235cc0b314c1d0dab5f96e31b8c48242 100644 (file)
@@ -25,6 +25,7 @@
 
 MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("OpenFirmware MDIO bus (Ethernet PHY) accessors");
 
 /* Extract the clause 22 phy ID from the compatible string of the form
  * ethernet-phy-idAAAA.BBBB */
index ef00d6163061fab6740a1c14995ee53da56e521c..cb4b91af5e17337cc22ba94efbf3f912c47fb084 100644 (file)
@@ -942,3 +942,4 @@ struct bcm_ptp_private *bcm_ptp_probe(struct phy_device *phydev)
 EXPORT_SYMBOL_GPL(bcm_ptp_probe);
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Broadcom PHY PTP driver");
index cc28581076682fda823a23381c21b2cfa94e6287..e81404bf899474f0a716c5dbe283d5aa3a87d83b 100644 (file)
@@ -223,3 +223,4 @@ static struct phy_driver bcm87xx_driver[] = {
 module_phy_driver(bcm87xx_driver);
 
 MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Broadcom BCM87xx PHY driver");
index 6712883498bbe99570275b5a074b55ddfc7b282e..25c19496a33616fb3a1808a331d5ff52111d3fee 100644 (file)
@@ -1616,6 +1616,7 @@ struct phylink *phylink_create(struct phylink_config *config,
        pl->config = config;
        if (config->type == PHYLINK_NETDEV) {
                pl->netdev = to_net_dev(config->dev);
+               netif_carrier_off(pl->netdev);
        } else if (config->type == PHYLINK_DEV) {
                pl->dev = config->dev;
        } else {
@@ -3726,3 +3727,4 @@ static int __init phylink_init(void)
 module_init(phylink_init);
 
 MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("phylink models the MAC to optional PHY connection");
index b8c0961daf53171733885531ba4fe7b5d831ad7c..5468bd209fab87f09f081e7daf9375911b69ba58 100644 (file)
@@ -3153,3 +3153,4 @@ module_exit(sfp_exit);
 MODULE_ALIAS("platform:sfp");
 MODULE_AUTHOR("Russell King");
 MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("SFP cage support");
index a9beacd552cf825b6e391862f196141358b9eadd..0193af2d31c9bcf5dc8864da49ba4e75ba0192fc 100644 (file)
@@ -570,8 +570,8 @@ static struct bpf_prog *get_filter(struct sock_fprog *uprog)
 
        /* uprog->len is unsigned short, so no overflow here */
        fprog.len = uprog->len;
-       fprog.filter = memdup_user(uprog->filter,
-                                  uprog->len * sizeof(struct sock_filter));
+       fprog.filter = memdup_array_user(uprog->filter,
+                                        uprog->len, sizeof(struct sock_filter));
        if (IS_ERR(fprog.filter))
                return ERR_CAST(fprog.filter);
 
index eedca8c720983cfdd0a8e1b21b36b3688744c940..74f59ceed3d5aed9930b559fbc45c6fec286b4be 100644 (file)
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
-obj-$(CONFIG_NVME_COMMON)              += common/
+obj-y          += common/
 obj-y          += host/
 obj-y          += target/
index 06c8df00d1e21450e180c3edcec170d05d81b0c4..244432e0b73d8e9df71ba8e717ee3315ac3e78eb 100644 (file)
@@ -1,14 +1,11 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
-config NVME_COMMON
-       tristate
-
 config NVME_KEYRING
-       bool
+       tristate
        select KEYS
 
 config NVME_AUTH
-       bool
+       tristate
        select CRYPTO
        select CRYPTO_HMAC
        select CRYPTO_SHA256
index 0cbd0b0b8d499864ae65db2b0406a6b6cfde5ec8..681514cf2e2f50649f690e8b5d9fc135457f58ab 100644 (file)
@@ -2,7 +2,8 @@
 
 ccflags-y                      += -I$(src)
 
-obj-$(CONFIG_NVME_COMMON)      += nvme-common.o
+obj-$(CONFIG_NVME_AUTH)                += nvme-auth.o
+obj-$(CONFIG_NVME_KEYRING)     += nvme-keyring.o
 
-nvme-common-$(CONFIG_NVME_AUTH)        += auth.o
-nvme-common-$(CONFIG_NVME_KEYRING) += keyring.o
+nvme-auth-y                    += auth.o
+nvme-keyring-y                 += keyring.o
index a8e87dfbeab2a41a9c73f8c880f709fc0fc830fb..a23ab5c968b9457bee89f14cc1f158e377ffa084 100644 (file)
@@ -341,7 +341,6 @@ int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
                u8 *challenge, u8 *aug, size_t hlen)
 {
        struct crypto_shash *tfm;
-       struct shash_desc *desc;
        u8 *hashed_key;
        const char *hmac_name;
        int ret;
@@ -369,29 +368,11 @@ int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
                goto out_free_key;
        }
 
-       desc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
-                      GFP_KERNEL);
-       if (!desc) {
-               ret = -ENOMEM;
-               goto out_free_hash;
-       }
-       desc->tfm = tfm;
-
        ret = crypto_shash_setkey(tfm, hashed_key, hlen);
        if (ret)
-               goto out_free_desc;
-
-       ret = crypto_shash_init(desc);
-       if (ret)
-               goto out_free_desc;
-
-       ret = crypto_shash_update(desc, challenge, hlen);
-       if (ret)
-               goto out_free_desc;
+               goto out_free_hash;
 
-       ret = crypto_shash_final(desc, aug);
-out_free_desc:
-       kfree_sensitive(desc);
+       ret = crypto_shash_tfm_digest(tfm, challenge, hlen, aug);
 out_free_hash:
        crypto_free_shash(tfm);
 out_free_key:
index f8d9a208397b4d580cac4398b7edb0b22f6fe77d..ee341b83eebaf553cbf91a045b048285d590157a 100644 (file)
@@ -151,7 +151,7 @@ key_serial_t nvme_tls_psk_default(struct key *keyring,
 }
 EXPORT_SYMBOL_GPL(nvme_tls_psk_default);
 
-int nvme_keyring_init(void)
+static int __init nvme_keyring_init(void)
 {
        int err;
 
@@ -171,12 +171,15 @@ int nvme_keyring_init(void)
        }
        return 0;
 }
-EXPORT_SYMBOL_GPL(nvme_keyring_init);
 
-void nvme_keyring_exit(void)
+static void __exit nvme_keyring_exit(void)
 {
        unregister_key_type(&nvme_tls_psk_key_type);
        key_revoke(nvme_keyring);
        key_put(nvme_keyring);
 }
-EXPORT_SYMBOL_GPL(nvme_keyring_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
+module_init(nvme_keyring_init);
+module_exit(nvme_keyring_exit);
index 48f7d72de5e9a5d2d2861e6835640d0176736517..8fe2dd619e80eb4b3f45b85e940397e1f2ae6cca 100644 (file)
@@ -95,7 +95,6 @@ config NVME_TCP
 config NVME_TCP_TLS
        bool "NVMe over Fabrics TCP TLS encryption support"
        depends on NVME_TCP
-       select NVME_COMMON
        select NVME_KEYRING
        select NET_HANDSHAKE
        select KEYS
@@ -110,7 +109,6 @@ config NVME_TCP_TLS
 config NVME_HOST_AUTH
        bool "NVM Express over Fabrics In-Band Authentication"
        depends on NVME_CORE
-       select NVME_COMMON
        select NVME_AUTH
        help
          This provides support for NVMe over Fabrics In-Band Authentication.
index eaefebb2a799a20d6912b5a4a9b17558e418a46f..48328e36e93bc423974f5089a4ee5fd0bbcc9a6d 100644 (file)
@@ -29,6 +29,7 @@ struct nvme_dhchap_queue_context {
        int error;
        u32 s1;
        u32 s2;
+       bool bi_directional;
        u16 transaction;
        u8 status;
        u8 dhgroup_id;
@@ -312,17 +313,17 @@ static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
        data->dhvlen = cpu_to_le16(chap->host_key_len);
        memcpy(data->rval, chap->response, chap->hash_len);
        if (ctrl->ctrl_key) {
+               chap->bi_directional = true;
                get_random_bytes(chap->c2, chap->hash_len);
                data->cvalid = 1;
-               chap->s2 = nvme_auth_get_seqnum();
                memcpy(data->rval + chap->hash_len, chap->c2,
                       chap->hash_len);
                dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n",
                        __func__, chap->qid, (int)chap->hash_len, chap->c2);
        } else {
                memset(chap->c2, 0, chap->hash_len);
-               chap->s2 = 0;
        }
+       chap->s2 = nvme_auth_get_seqnum();
        data->seqnum = cpu_to_le32(chap->s2);
        if (chap->host_key_len) {
                dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n",
@@ -339,10 +340,7 @@ static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
                struct nvme_dhchap_queue_context *chap)
 {
        struct nvmf_auth_dhchap_success1_data *data = chap->buf;
-       size_t size = sizeof(*data);
-
-       if (chap->s2)
-               size += chap->hash_len;
+       size_t size = sizeof(*data) + chap->hash_len;
 
        if (size > CHAP_BUF_SIZE) {
                chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
@@ -663,6 +661,7 @@ static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap)
        chap->error = 0;
        chap->s1 = 0;
        chap->s2 = 0;
+       chap->bi_directional = false;
        chap->transaction = 0;
        memset(chap->c1, 0, sizeof(chap->c1));
        memset(chap->c2, 0, sizeof(chap->c2));
@@ -825,7 +824,7 @@ static void nvme_queue_auth_work(struct work_struct *work)
                goto fail2;
        }
 
-       if (chap->s2) {
+       if (chap->bi_directional) {
                /* DH-HMAC-CHAP Step 5: send success2 */
                dev_dbg(ctrl->device, "%s: qid %d send success2\n",
                        __func__, chap->qid);
index 62612f87aafa228622859ebc2f484c93b7ebe55e..88b54cdcbd683cd3e7f0a26742aff156ad4c57b5 100644 (file)
@@ -25,7 +25,6 @@
 #include "nvme.h"
 #include "fabrics.h"
 #include <linux/nvme-auth.h>
-#include <linux/nvme-keyring.h>
 
 #define CREATE_TRACE_POINTS
 #include "trace.h"
@@ -483,6 +482,7 @@ EXPORT_SYMBOL_GPL(nvme_cancel_tagset);
 
 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl)
 {
+       nvme_stop_keep_alive(ctrl);
        if (ctrl->admin_tagset) {
                blk_mq_tagset_busy_iter(ctrl->admin_tagset,
                                nvme_cancel_request, ctrl);
@@ -3200,6 +3200,8 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl, bool was_suspended)
        clear_bit(NVME_CTRL_DIRTY_CAPABILITY, &ctrl->flags);
        ctrl->identified = true;
 
+       nvme_start_keep_alive(ctrl);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(nvme_init_ctrl_finish);
@@ -4074,8 +4076,21 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
                return;
 
        if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, NVME_CSI_NVM,
-                       log, sizeof(*log), 0))
+                        log, sizeof(*log), 0)) {
                dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
+               goto out_free_log;
+       }
+
+       if (log->afi & 0x70 || !(log->afi & 0x7)) {
+               dev_info(ctrl->device,
+                        "Firmware is activated after next Controller Level Reset\n");
+               goto out_free_log;
+       }
+
+       memcpy(ctrl->subsys->firmware_rev, &log->frs[(log->afi & 0x7) - 1],
+               sizeof(ctrl->subsys->firmware_rev));
+
+out_free_log:
        kfree(log);
 }
 
@@ -4333,7 +4348,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
 {
        nvme_mpath_stop(ctrl);
        nvme_auth_stop(ctrl);
-       nvme_stop_keep_alive(ctrl);
        nvme_stop_failfast_work(ctrl);
        flush_work(&ctrl->async_event_work);
        cancel_work_sync(&ctrl->fw_act_work);
@@ -4344,8 +4358,6 @@ EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
 
 void nvme_start_ctrl(struct nvme_ctrl *ctrl)
 {
-       nvme_start_keep_alive(ctrl);
-
        nvme_enable_aen(ctrl);
 
        /*
@@ -4724,16 +4736,11 @@ static int __init nvme_core_init(void)
                result = PTR_ERR(nvme_ns_chr_class);
                goto unregister_generic_ns;
        }
-       result = nvme_keyring_init();
-       if (result)
-               goto destroy_ns_chr;
        result = nvme_init_auth();
        if (result)
-               goto keyring_exit;
+               goto destroy_ns_chr;
        return 0;
 
-keyring_exit:
-       nvme_keyring_exit();
 destroy_ns_chr:
        class_destroy(nvme_ns_chr_class);
 unregister_generic_ns:
@@ -4757,7 +4764,6 @@ out:
 static void __exit nvme_core_exit(void)
 {
        nvme_exit_auth();
-       nvme_keyring_exit();
        class_destroy(nvme_ns_chr_class);
        class_destroy(nvme_subsys_class);
        class_destroy(nvme_class);
index a15b37750d6e931b0a875058cda6868034a642d9..49c3e46eaa1eee13b1174044104072dc6390990f 100644 (file)
@@ -2530,6 +2530,12 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
         * clean up the admin queue. Same thing as above.
         */
        nvme_quiesce_admin_queue(&ctrl->ctrl);
+
+       /*
+        * Open-coding nvme_cancel_admin_tagset() as fc
+        * is not using nvme_cancel_request().
+        */
+       nvme_stop_keep_alive(&ctrl->ctrl);
        blk_sync_queue(ctrl->ctrl.admin_q);
        blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
                                nvme_fc_terminate_exchange, &ctrl->ctrl);
index 747c879e8982b803525238afd1572fc90197fd6d..529b9954d2b8c0429e6bd7316791fef884121a29 100644 (file)
@@ -510,10 +510,13 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
        struct nvme_uring_cmd_pdu *pdu = nvme_uring_cmd_pdu(ioucmd);
 
        req->bio = pdu->bio;
-       if (nvme_req(req)->flags & NVME_REQ_CANCELLED)
+       if (nvme_req(req)->flags & NVME_REQ_CANCELLED) {
                pdu->nvme_status = -EINTR;
-       else
+       } else {
                pdu->nvme_status = nvme_req(req)->status;
+               if (!pdu->nvme_status)
+                       pdu->nvme_status = blk_status_to_errno(err);
+       }
        pdu->u.result = le64_to_cpu(nvme_req(req)->result.u64);
 
        /*
index 4714a902f4caa8ac30a9f3091b4011fd5e7df5e7..89661a9cf850d493d0ff6e69b60a5525154cfbc4 100644 (file)
@@ -1423,13 +1423,14 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
                        nvme_tcp_queue_id(queue), ret);
                goto free_icresp;
        }
+       ret = -ENOTCONN;
        if (queue->ctrl->ctrl.opts->tls) {
                ctype = tls_get_record_type(queue->sock->sk,
                                            (struct cmsghdr *)cbuf);
                if (ctype != TLS_RECORD_TYPE_DATA) {
                        pr_err("queue %d: unhandled TLS record %d\n",
                               nvme_tcp_queue_id(queue), ctype);
-                       return -ENOTCONN;
+                       goto free_icresp;
                }
        }
        ret = -EINVAL;
@@ -2236,11 +2237,7 @@ destroy_io:
                nvme_tcp_destroy_io_queues(ctrl, new);
        }
 destroy_admin:
-       nvme_quiesce_admin_queue(ctrl);
-       blk_sync_queue(ctrl->admin_q);
-       nvme_tcp_stop_queue(ctrl, 0);
-       nvme_cancel_admin_tagset(ctrl);
-       nvme_tcp_destroy_admin_queue(ctrl, new);
+       nvme_tcp_teardown_admin_queue(ctrl, false);
        return ret;
 }
 
index fa479c9f5c3d3237939ac01a2aa65128abdc0599..31633da9427c7f54958ecb0eb514f3df20600cb3 100644 (file)
@@ -87,7 +87,6 @@ config NVME_TARGET_TCP
 config NVME_TARGET_TCP_TLS
        bool "NVMe over Fabrics TCP target TLS encryption support"
        depends on NVME_TARGET_TCP
-       select NVME_COMMON
        select NVME_KEYRING
        select NET_HANDSHAKE
        select KEYS
@@ -102,7 +101,6 @@ config NVME_TARGET_TCP_TLS
 config NVME_TARGET_AUTH
        bool "NVMe over Fabrics In-band Authentication support"
        depends on NVME_TARGET
-       select NVME_COMMON
        select NVME_AUTH
        help
          This enables support for NVMe over Fabrics In-band Authentication
index 1d9854484e2e83509b46d021f4941a583b7a34c3..eb7785be0ca770c68c0084059f0a4f23b06f6dff 100644 (file)
@@ -163,11 +163,11 @@ static u16 nvmet_auth_reply(struct nvmet_req *req, void *d)
                pr_debug("%s: ctrl %d qid %d challenge %*ph\n",
                         __func__, ctrl->cntlid, req->sq->qid, data->hl,
                         req->sq->dhchap_c2);
-               req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
        } else {
                req->sq->authenticated = true;
                req->sq->dhchap_c2 = NULL;
        }
+       req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
 
        return 0;
 }
index 48d5df054cd0245e62acf3997be6c2413af9064e..9cb434c5807514813afe91eada69c0a925daf83a 100644 (file)
@@ -466,6 +466,8 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work)
 out_destroy_io:
        nvme_loop_destroy_io_queues(ctrl);
 out_destroy_admin:
+       nvme_quiesce_admin_queue(&ctrl->ctrl);
+       nvme_cancel_admin_tagset(&ctrl->ctrl);
        nvme_loop_destroy_admin_queue(ctrl);
 out_disable:
        dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
@@ -600,6 +602,8 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
        return &ctrl->ctrl;
 
 out_remove_admin_queue:
+       nvme_quiesce_admin_queue(&ctrl->ctrl);
+       nvme_cancel_admin_tagset(&ctrl->ctrl);
        nvme_loop_destroy_admin_queue(ctrl);
 out_free_queues:
        kfree(ctrl->queues);
index cf8dacf3e3b84d8a56aae6aa1bd79da7cf8e7fea..afdaefbd03f61563c2b2972ff0c64266d3acaa9e 100644 (file)
@@ -1267,6 +1267,7 @@ DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells")
 DEFINE_SIMPLE_PROP(leds, "leds", NULL)
 DEFINE_SIMPLE_PROP(backlight, "backlight", NULL)
 DEFINE_SIMPLE_PROP(panel, "panel", NULL)
+DEFINE_SIMPLE_PROP(msi_parent, "msi-parent", "#msi-cells")
 DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
 DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
 
@@ -1356,6 +1357,7 @@ static const struct supplier_bindings of_supplier_bindings[] = {
        { .parse_prop = parse_leds, },
        { .parse_prop = parse_backlight, },
        { .parse_prop = parse_panel, },
+       { .parse_prop = parse_msi_parent, },
        { .parse_prop = parse_gpio_compat, },
        { .parse_prop = parse_interrupts, },
        { .parse_prop = parse_regulators, },
index 228652a59f27e0c29ecef7fb337596653dd9e0b4..8999fcebde6aa8705d2b8bf9f62cf7358a940424 100644 (file)
@@ -49,6 +49,15 @@ config PCIEAER_INJECT
          gotten from:
             https://git.kernel.org/cgit/linux/kernel/git/gong.chen/aer-inject.git/
 
+config PCIEAER_CXL
+       bool "PCI Express CXL RAS support"
+       default y
+       depends on PCIEAER && CXL_PCI
+       help
+         Enables CXL error handling.
+
+         If unsure, say Y.
+
 #
 # PCI Express ECRC
 #
index dcd35993004e301f431fd13cef84911e7ad834c2..42a3bd35a3e118d8eb656d1d24b9f0fa0f4afaf7 100644 (file)
@@ -760,9 +760,10 @@ int cper_severity_to_aer(int cper_severity)
        }
 }
 EXPORT_SYMBOL_GPL(cper_severity_to_aer);
+#endif
 
-void cper_print_aer(struct pci_dev *dev, int aer_severity,
-                   struct aer_capability_regs *aer)
+void pci_print_aer(struct pci_dev *dev, int aer_severity,
+                  struct aer_capability_regs *aer)
 {
        int layer, agent, tlp_header_valid = 0;
        u32 status, mask;
@@ -801,7 +802,7 @@ void cper_print_aer(struct pci_dev *dev, int aer_severity,
        trace_aer_event(dev_name(&dev->dev), (status & ~mask),
                        aer_severity, tlp_header_valid, &aer->header_log);
 }
-#endif
+EXPORT_SYMBOL_NS_GPL(pci_print_aer, CXL);
 
 /**
  * add_error_device - list device to be handled
@@ -934,14 +935,153 @@ static bool find_source_device(struct pci_dev *parent,
        return true;
 }
 
+#ifdef CONFIG_PCIEAER_CXL
+
 /**
- * handle_error_source - handle logging error into an event log
+ * pci_aer_unmask_internal_errors - unmask internal errors
+ * @dev: pointer to the pcie_dev data structure
+ *
+ * Unmasks internal errors in the Uncorrectable and Correctable Error
+ * Mask registers.
+ *
+ * Note: AER must be enabled and supported by the device which must be
+ * checked in advance, e.g. with pcie_aer_is_native().
+ */
+static void pci_aer_unmask_internal_errors(struct pci_dev *dev)
+{
+       int aer = dev->aer_cap;
+       u32 mask;
+
+       pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &mask);
+       mask &= ~PCI_ERR_UNC_INTN;
+       pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, mask);
+
+       pci_read_config_dword(dev, aer + PCI_ERR_COR_MASK, &mask);
+       mask &= ~PCI_ERR_COR_INTERNAL;
+       pci_write_config_dword(dev, aer + PCI_ERR_COR_MASK, mask);
+}
+
+static bool is_cxl_mem_dev(struct pci_dev *dev)
+{
+       /*
+        * The capability, status, and control fields in Device 0,
+        * Function 0 DVSEC control the CXL functionality of the
+        * entire device (CXL 3.0, 8.1.3).
+        */
+       if (dev->devfn != PCI_DEVFN(0, 0))
+               return false;
+
+       /*
+        * CXL Memory Devices must have the 502h class code set (CXL
+        * 3.0, 8.1.12.1).
+        */
+       if ((dev->class >> 8) != PCI_CLASS_MEMORY_CXL)
+               return false;
+
+       return true;
+}
+
+static bool cxl_error_is_native(struct pci_dev *dev)
+{
+       struct pci_host_bridge *host = pci_find_host_bridge(dev->bus);
+
+       return (pcie_ports_native || host->native_aer);
+}
+
+static bool is_internal_error(struct aer_err_info *info)
+{
+       if (info->severity == AER_CORRECTABLE)
+               return info->status & PCI_ERR_COR_INTERNAL;
+
+       return info->status & PCI_ERR_UNC_INTN;
+}
+
+static int cxl_rch_handle_error_iter(struct pci_dev *dev, void *data)
+{
+       struct aer_err_info *info = (struct aer_err_info *)data;
+       const struct pci_error_handlers *err_handler;
+
+       if (!is_cxl_mem_dev(dev) || !cxl_error_is_native(dev))
+               return 0;
+
+       /* protect dev->driver */
+       device_lock(&dev->dev);
+
+       err_handler = dev->driver ? dev->driver->err_handler : NULL;
+       if (!err_handler)
+               goto out;
+
+       if (info->severity == AER_CORRECTABLE) {
+               if (err_handler->cor_error_detected)
+                       err_handler->cor_error_detected(dev);
+       } else if (err_handler->error_detected) {
+               if (info->severity == AER_NONFATAL)
+                       err_handler->error_detected(dev, pci_channel_io_normal);
+               else if (info->severity == AER_FATAL)
+                       err_handler->error_detected(dev, pci_channel_io_frozen);
+       }
+out:
+       device_unlock(&dev->dev);
+       return 0;
+}
+
+static void cxl_rch_handle_error(struct pci_dev *dev, struct aer_err_info *info)
+{
+       /*
+        * Internal errors of an RCEC indicate an AER error in an
+        * RCH's downstream port. Check and handle them in the CXL.mem
+        * device driver.
+        */
+       if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_EC &&
+           is_internal_error(info))
+               pcie_walk_rcec(dev, cxl_rch_handle_error_iter, info);
+}
+
+static int handles_cxl_error_iter(struct pci_dev *dev, void *data)
+{
+       bool *handles_cxl = data;
+
+       if (!*handles_cxl)
+               *handles_cxl = is_cxl_mem_dev(dev) && cxl_error_is_native(dev);
+
+       /* Non-zero terminates iteration */
+       return *handles_cxl;
+}
+
+static bool handles_cxl_errors(struct pci_dev *rcec)
+{
+       bool handles_cxl = false;
+
+       if (pci_pcie_type(rcec) == PCI_EXP_TYPE_RC_EC &&
+           pcie_aer_is_native(rcec))
+               pcie_walk_rcec(rcec, handles_cxl_error_iter, &handles_cxl);
+
+       return handles_cxl;
+}
+
+static void cxl_rch_enable_rcec(struct pci_dev *rcec)
+{
+       if (!handles_cxl_errors(rcec))
+               return;
+
+       pci_aer_unmask_internal_errors(rcec);
+       pci_info(rcec, "CXL: Internal errors unmasked");
+}
+
+#else
+static inline void cxl_rch_enable_rcec(struct pci_dev *dev) { }
+static inline void cxl_rch_handle_error(struct pci_dev *dev,
+                                       struct aer_err_info *info) { }
+#endif
+
+/**
+ * pci_aer_handle_error - handle logging error into an event log
  * @dev: pointer to pci_dev data structure of error source device
  * @info: comprehensive error information
  *
  * Invoked when an error being detected by Root Port.
  */
-static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
+static void pci_aer_handle_error(struct pci_dev *dev, struct aer_err_info *info)
 {
        int aer = dev->aer_cap;
 
@@ -965,6 +1105,12 @@ static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
                pcie_do_recovery(dev, pci_channel_io_normal, aer_root_reset);
        else if (info->severity == AER_FATAL)
                pcie_do_recovery(dev, pci_channel_io_frozen, aer_root_reset);
+}
+
+static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
+{
+       cxl_rch_handle_error(dev, info);
+       pci_aer_handle_error(dev, info);
        pci_dev_put(dev);
 }
 
@@ -997,7 +1143,7 @@ static void aer_recover_work_func(struct work_struct *work)
                               PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
                        continue;
                }
-               cper_print_aer(pdev, entry.severity, entry.regs);
+               pci_print_aer(pdev, entry.severity, entry.regs);
                /*
                 * Memory for aer_capability_regs(entry.regs) is being allocated from the
                 * ghes_estatus_pool to protect it from overwriting when multiple sections
@@ -1348,6 +1494,7 @@ static int aer_probe(struct pcie_device *dev)
                return status;
        }
 
+       cxl_rch_enable_rcec(port);
        aer_enable_rootport(rpc);
        pci_info(port, "enabled with IRQ %d\n", dev->irq);
        return 0;
index 5658745c398f57de8979e0ba41fe575244e0761f..b33be1e63c98fbf85bb1e39b5f5e3bac9c44716c 100644 (file)
@@ -605,6 +605,7 @@ static int pccardd(void *__skt)
                dev_warn(&skt->dev, "PCMCIA: unable to register socket\n");
                skt->thread = NULL;
                complete(&skt->thread_done);
+               put_device(&skt->dev);
                return 0;
        }
        ret = pccard_sysfs_add_socket(&skt->dev);
index d500e5dbbc3f5e45b6181efa67b1bd27cb04fa8b..b4b8363d1de21f3997ee6d7e5a3af65f169a56a5 100644 (file)
@@ -513,9 +513,6 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s,
        /* by default don't allow DMA */
        p_dev->dma_mask = 0;
        p_dev->dev.dma_mask = &p_dev->dma_mask;
-       dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no);
-       if (!dev_name(&p_dev->dev))
-               goto err_free;
        p_dev->devname = kasprintf(GFP_KERNEL, "pcmcia%s", dev_name(&p_dev->dev));
        if (!p_dev->devname)
                goto err_free;
@@ -573,8 +570,15 @@ static struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s,
 
        pcmcia_device_query(p_dev);
 
-       if (device_register(&p_dev->dev))
-               goto err_unreg;
+       dev_set_name(&p_dev->dev, "%d.%d", p_dev->socket->sock, p_dev->device_no);
+       if (device_register(&p_dev->dev)) {
+               mutex_lock(&s->ops_mutex);
+               list_del(&p_dev->socket_device_list);
+               s->device_count--;
+               mutex_unlock(&s->ops_mutex);
+               put_device(&p_dev->dev);
+               return NULL;
+       }
 
        return p_dev;
 
index d78091e79a0f6f0b11facb37657298686688e5a2..e9e31c638a6710a1543365b48210db0fffd1a87d 100644 (file)
@@ -684,7 +684,7 @@ EXPORT_SYMBOL(pcmcia_request_io);
  * pcmcia_request_irq() is a wrapper around request_irq() which allows
  * the PCMCIA core to clean up the registration in pcmcia_disable_device().
  * Drivers are free to use request_irq() directly, but then they need to
- * call free_irq() themselfves, too. Also, only %IRQF_SHARED capable IRQ
+ * call free_irq() themselves, too. Also, only %IRQF_SHARED capable IRQ
  * handlers are allowed.
  */
 int __must_check pcmcia_request_irq(struct pcmcia_device *p_dev,
index 1a0e3f0987599d8a8b6415ac31dac40bc4bab8a1..5ef888688e231c87bb112e4041580a7a0c7bba1b 100644 (file)
@@ -435,7 +435,7 @@ static int __init init_tcic(void)
     }
     
     /* Set up polling */
-    timer_setup(&poll_timer, &tcic_timer, 0);
+    timer_setup(&poll_timer, tcic_timer, 0);
 
     /* Build interrupt mask */
     printk(KERN_CONT ", %d sockets\n", sockets);
index 42b72042f6b3759d2d1cbfd57d1268d070b584ae..2cc35dded0079f9f3a59289e476fd1f579114556 100644 (file)
@@ -676,6 +676,9 @@ static int arm_cspmu_event_init(struct perf_event *event)
 
        cspmu = to_arm_cspmu(event->pmu);
 
+       if (event->attr.type != event->pmu->type)
+               return -ENOENT;
+
        /*
         * Following other "uncore" PMUs, we do not support sampling mode or
         * attach to a task (per-process mode).
index 18b91b56af1d467ceb155cd8ce89739a2df1c240..6ca7be05229c107e87ad2301cd8107b55d2a0e01 100644 (file)
@@ -428,12 +428,12 @@ static inline bool armv8pmu_event_is_chained(struct perf_event *event)
 #define        ARMV8_IDX_TO_COUNTER(x) \
        (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK)
 
-static inline u32 armv8pmu_pmcr_read(void)
+static inline u64 armv8pmu_pmcr_read(void)
 {
        return read_pmcr();
 }
 
-static inline void armv8pmu_pmcr_write(u32 val)
+static inline void armv8pmu_pmcr_write(u64 val)
 {
        val &= ARMV8_PMU_PMCR_MASK;
        isb();
@@ -957,7 +957,7 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
 static void armv8pmu_reset(void *info)
 {
        struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
-       u32 pmcr;
+       u64 pmcr;
 
        /* The counter and interrupt enable registers are unknown at reset. */
        armv8pmu_disable_counter(U32_MAX);
index 96c7f670c8f0d1821277a303abbdffdc517c9e25..16acd4dcdb96c75e07b45a3745a71842f2d7d2b8 100644 (file)
@@ -22,7 +22,7 @@
 
 #include <asm/errata_list.h>
 #include <asm/sbi.h>
-#include <asm/hwcap.h>
+#include <asm/cpufeature.h>
 
 #define SYSCTL_NO_USER_ACCESS  0
 #define SYSCTL_USER_ACCESS     1
@@ -543,8 +543,7 @@ static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival)
 
        if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
            (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
-               on_each_cpu_mask(mm_cpumask(event->owner->mm),
-                                pmu_sbi_set_scounteren, (void *)event, 1);
+               pmu_sbi_set_scounteren((void *)event);
 }
 
 static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
@@ -554,8 +553,7 @@ static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag)
 
        if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) &&
            (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT))
-               on_each_cpu_mask(mm_cpumask(event->owner->mm),
-                                pmu_sbi_reset_scounteren, (void *)event, 1);
+               pmu_sbi_reset_scounteren((void *)event);
 
        ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0);
        if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) &&
@@ -689,6 +687,11 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
 
        /* Firmware counter don't support overflow yet */
        fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS);
+       if (fidx == RISCV_MAX_COUNTERS) {
+               csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
+               return IRQ_NONE;
+       }
+
        event = cpu_hw_evt->events[fidx];
        if (!event) {
                csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
index bc7bb9876e5765a0a504370492571002766bcdd5..fd5ce52d05b1d40c57a0f6cfe84546256ed061e0 100644 (file)
@@ -330,24 +330,22 @@ static const struct irq_chip iproc_gpio_irq_chip = {
 static int iproc_gpio_request(struct gpio_chip *gc, unsigned offset)
 {
        struct iproc_gpio *chip = gpiochip_get_data(gc);
-       unsigned gpio = gc->base + offset;
 
        /* not all Iproc GPIO pins can be muxed individually */
        if (!chip->pinmux_is_supported)
                return 0;
 
-       return pinctrl_gpio_request(gpio);
+       return pinctrl_gpio_request(gc, offset);
 }
 
 static void iproc_gpio_free(struct gpio_chip *gc, unsigned offset)
 {
        struct iproc_gpio *chip = gpiochip_get_data(gc);
-       unsigned gpio = gc->base + offset;
 
        if (!chip->pinmux_is_supported)
                return;
 
-       pinctrl_gpio_free(gpio);
+       pinctrl_gpio_free(gc, offset);
 }
 
 static int iproc_gpio_direction_input(struct gpio_chip *gc, unsigned gpio)
index c096463184195589a1b17887688bf0423ddf683c..012b0a3bad5a9c1d637b5871756c65d4fbdcb271 100644 (file)
@@ -506,17 +506,12 @@ static void cs42l43_gpio_set(struct gpio_chip *chip, unsigned int offset, int va
        pm_runtime_put(priv->dev);
 }
 
-static int cs42l43_gpio_direction_in(struct gpio_chip *chip, unsigned int offset)
-{
-       return pinctrl_gpio_direction_input(chip->base + offset);
-}
-
 static int cs42l43_gpio_direction_out(struct gpio_chip *chip,
                                      unsigned int offset, int value)
 {
        cs42l43_gpio_set(chip, offset, value);
 
-       return pinctrl_gpio_direction_output(chip->base + offset);
+       return pinctrl_gpio_direction_output(chip, offset);
 }
 
 static int cs42l43_gpio_add_pin_ranges(struct gpio_chip *chip)
@@ -551,7 +546,7 @@ static int cs42l43_pin_probe(struct platform_device *pdev)
 
        priv->gpio_chip.request = gpiochip_generic_request;
        priv->gpio_chip.free = gpiochip_generic_free;
-       priv->gpio_chip.direction_input = cs42l43_gpio_direction_in;
+       priv->gpio_chip.direction_input = pinctrl_gpio_direction_input;
        priv->gpio_chip.direction_output = cs42l43_gpio_direction_out;
        priv->gpio_chip.add_pin_ranges = cs42l43_gpio_add_pin_ranges;
        priv->gpio_chip.get = cs42l43_gpio_get;
index 0b78cf611afe00c185ccc6f819f0d58f7cb681d0..014297a3fbd28725eaf4a5f1d24f18476753f581 100644 (file)
@@ -1098,7 +1098,7 @@ static int lochnagar_gpio_direction_out(struct gpio_chip *chip,
 {
        lochnagar_gpio_set(chip, offset, value);
 
-       return pinctrl_gpio_direction_output(chip->base + offset);
+       return pinctrl_gpio_direction_output(chip, offset);
 }
 
 static int lochnagar_fill_func_groups(struct lochnagar_pin_priv *priv)
index 71fc9f95584e1f1da7b16819b679f748fe13a2a9..1fa89be29b8f98b22895aed0c65f1fa68df9d7bb 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 
+#include <linux/gpio/driver.h>
+
 #include <linux/pinctrl/consumer.h>
 #include <linux/pinctrl/devinfo.h>
 #include <linux/pinctrl/machine.h>
@@ -267,7 +269,8 @@ static int pinctrl_register_pins(struct pinctrl_dev *pctldev,
 /**
  * gpio_to_pin() - GPIO range GPIO number to pin number translation
  * @range: GPIO range used for the translation
- * @gpio: gpio pin to translate to a pin number
+ * @gc: GPIO chip structure from the GPIO subsystem
+ * @offset: hardware offset of the GPIO relative to the controller
  *
  * Finds the pin number for a given GPIO using the specified GPIO range
  * as a base for translation. The distinction between linear GPIO ranges
@@ -278,25 +281,27 @@ static int pinctrl_register_pins(struct pinctrl_dev *pctldev,
  * result of successful pinctrl_get_device_gpio_range calls)!
  */
 static inline int gpio_to_pin(struct pinctrl_gpio_range *range,
-                               unsigned int gpio)
+                             struct gpio_chip *gc, unsigned int offset)
 {
-       unsigned int offset = gpio - range->base;
+       unsigned int pin = gc->base + offset - range->base;
        if (range->pins)
-               return range->pins[offset];
+               return range->pins[pin];
        else
-               return range->pin_base + offset;
+               return range->pin_base + pin;
 }
 
 /**
  * pinctrl_match_gpio_range() - check if a certain GPIO pin is in range
  * @pctldev: pin controller device to check
- * @gpio: gpio pin to check taken from the global GPIO pin space
+ * @gc: GPIO chip structure from the GPIO subsystem
+ * @offset: hardware offset of the GPIO relative to the controller
  *
  * Tries to match a GPIO pin number to the ranges handled by a certain pin
  * controller, return the range or NULL
  */
 static struct pinctrl_gpio_range *
-pinctrl_match_gpio_range(struct pinctrl_dev *pctldev, unsigned gpio)
+pinctrl_match_gpio_range(struct pinctrl_dev *pctldev, struct gpio_chip *gc,
+                        unsigned int offset)
 {
        struct pinctrl_gpio_range *range;
 
@@ -304,8 +309,8 @@ pinctrl_match_gpio_range(struct pinctrl_dev *pctldev, unsigned gpio)
        /* Loop over the ranges */
        list_for_each_entry(range, &pctldev->gpio_ranges, node) {
                /* Check if we're in the valid range */
-               if (gpio >= range->base &&
-                   gpio < range->base + range->npins) {
+               if ((gc->base + offset) >= range->base &&
+                   (gc->base + offset) < range->base + range->npins) {
                        mutex_unlock(&pctldev->mutex);
                        return range;
                }
@@ -317,7 +322,8 @@ pinctrl_match_gpio_range(struct pinctrl_dev *pctldev, unsigned gpio)
 /**
  * pinctrl_ready_for_gpio_range() - check if other GPIO pins of
  * the same GPIO chip are in range
- * @gpio: gpio pin to check taken from the global GPIO pin space
+ * @gc: GPIO chip structure from the GPIO subsystem
+ * @offset: hardware offset of the GPIO relative to the controller
  *
  * This function is complement of pinctrl_match_gpio_range(). If the return
  * value of pinctrl_match_gpio_range() is NULL, this function could be used
@@ -328,19 +334,11 @@ pinctrl_match_gpio_range(struct pinctrl_dev *pctldev, unsigned gpio)
  * is false, it means that pinctrl device may not be ready.
  */
 #ifdef CONFIG_GPIOLIB
-static bool pinctrl_ready_for_gpio_range(unsigned gpio)
+static bool pinctrl_ready_for_gpio_range(struct gpio_chip *gc,
+                                        unsigned int offset)
 {
        struct pinctrl_dev *pctldev;
        struct pinctrl_gpio_range *range = NULL;
-       /*
-        * FIXME: "gpio" here is a number in the global GPIO numberspace.
-        * get rid of this from the ranges eventually and get the GPIO
-        * descriptor from the gpio_chip.
-        */
-       struct gpio_chip *chip = gpiod_to_chip(gpio_to_desc(gpio));
-
-       if (WARN(!chip, "no gpio_chip for gpio%i?", gpio))
-               return false;
 
        mutex_lock(&pinctrldev_list_mutex);
 
@@ -350,8 +348,8 @@ static bool pinctrl_ready_for_gpio_range(unsigned gpio)
                mutex_lock(&pctldev->mutex);
                list_for_each_entry(range, &pctldev->gpio_ranges, node) {
                        /* Check if any gpio range overlapped with gpio chip */
-                       if (range->base + range->npins - 1 < chip->base ||
-                           range->base > chip->base + chip->ngpio - 1)
+                       if (range->base + range->npins - 1 < gc->base ||
+                           range->base > gc->base + gc->ngpio - 1)
                                continue;
                        mutex_unlock(&pctldev->mutex);
                        mutex_unlock(&pinctrldev_list_mutex);
@@ -365,12 +363,17 @@ static bool pinctrl_ready_for_gpio_range(unsigned gpio)
        return false;
 }
 #else
-static bool pinctrl_ready_for_gpio_range(unsigned gpio) { return true; }
+static inline bool
+pinctrl_ready_for_gpio_range(struct gpio_chip *gc, unsigned int offset)
+{
+       return true;
+}
 #endif
 
 /**
  * pinctrl_get_device_gpio_range() - find device for GPIO range
- * @gpio: the pin to locate the pin controller for
+ * @gc: GPIO chip structure from the GPIO subsystem
+ * @offset: hardware offset of the GPIO relative to the controller
  * @outdev: the pin control device if found
  * @outrange: the GPIO range if found
  *
@@ -379,7 +382,8 @@ static bool pinctrl_ready_for_gpio_range(unsigned gpio) { return true; }
  * -EPROBE_DEFER if the GPIO range could not be found in any device since it
  * may still have not been registered.
  */
-static int pinctrl_get_device_gpio_range(unsigned gpio,
+static int pinctrl_get_device_gpio_range(struct gpio_chip *gc,
+                                        unsigned int offset,
                                         struct pinctrl_dev **outdev,
                                         struct pinctrl_gpio_range **outrange)
 {
@@ -391,7 +395,7 @@ static int pinctrl_get_device_gpio_range(unsigned gpio,
        list_for_each_entry(pctldev, &pinctrldev_list, node) {
                struct pinctrl_gpio_range *range;
 
-               range = pinctrl_match_gpio_range(pctldev, gpio);
+               range = pinctrl_match_gpio_range(pctldev, gc, offset);
                if (range) {
                        *outdev = pctldev;
                        *outrange = range;
@@ -753,7 +757,7 @@ int pinctrl_get_group_selector(struct pinctrl_dev *pctldev,
        return -EINVAL;
 }
 
-bool pinctrl_gpio_can_use_line(unsigned gpio)
+bool pinctrl_gpio_can_use_line(struct gpio_chip *gc, unsigned int offset)
 {
        struct pinctrl_dev *pctldev;
        struct pinctrl_gpio_range *range;
@@ -765,13 +769,13 @@ bool pinctrl_gpio_can_use_line(unsigned gpio)
         * we're probably dealing with GPIO driver
         * without a backing pin controller - bail out.
         */
-       if (pinctrl_get_device_gpio_range(gpio, &pctldev, &range))
+       if (pinctrl_get_device_gpio_range(gc, offset, &pctldev, &range))
                return true;
 
        mutex_lock(&pctldev->mutex);
 
        /* Convert to the pin controllers number space */
-       pin = gpio_to_pin(range, gpio);
+       pin = gpio_to_pin(range, gc, offset);
 
        result = pinmux_can_be_used_for_gpio(pctldev, pin);
 
@@ -783,22 +787,22 @@ EXPORT_SYMBOL_GPL(pinctrl_gpio_can_use_line);
 
 /**
  * pinctrl_gpio_request() - request a single pin to be used as GPIO
- * @gpio: the GPIO pin number from the GPIO subsystem number space
+ * @gc: GPIO chip structure from the GPIO subsystem
+ * @offset: hardware offset of the GPIO relative to the controller
  *
  * This function should *ONLY* be used from gpiolib-based GPIO drivers,
  * as part of their gpio_request() semantics, platforms and individual drivers
  * shall *NOT* request GPIO pins to be muxed in.
  */
-int pinctrl_gpio_request(unsigned gpio)
+int pinctrl_gpio_request(struct gpio_chip *gc, unsigned int offset)
 {
-       struct pinctrl_dev *pctldev;
        struct pinctrl_gpio_range *range;
-       int ret;
-       int pin;
+       struct pinctrl_dev *pctldev;
+       int ret, pin;
 
-       ret = pinctrl_get_device_gpio_range(gpio, &pctldev, &range);
+       ret = pinctrl_get_device_gpio_range(gc, offset, &pctldev, &range);
        if (ret) {
-               if (pinctrl_ready_for_gpio_range(gpio))
+               if (pinctrl_ready_for_gpio_range(gc, offset))
                        ret = 0;
                return ret;
        }
@@ -806,9 +810,9 @@ int pinctrl_gpio_request(unsigned gpio)
        mutex_lock(&pctldev->mutex);
 
        /* Convert to the pin controllers number space */
-       pin = gpio_to_pin(range, gpio);
+       pin = gpio_to_pin(range, gc, offset);
 
-       ret = pinmux_request_gpio(pctldev, range, pin, gpio);
+       ret = pinmux_request_gpio(pctldev, range, pin, gc->base + offset);
 
        mutex_unlock(&pctldev->mutex);
 
@@ -818,27 +822,27 @@ EXPORT_SYMBOL_GPL(pinctrl_gpio_request);
 
 /**
  * pinctrl_gpio_free() - free control on a single pin, currently used as GPIO
- * @gpio: the GPIO pin number from the GPIO subsystem number space
+ * @gc: GPIO chip structure from the GPIO subsystem
+ * @offset: hardware offset of the GPIO relative to the controller
  *
  * This function should *ONLY* be used from gpiolib-based GPIO drivers,
- * as part of their gpio_free() semantics, platforms and individual drivers
- * shall *NOT* request GPIO pins to be muxed out.
+ * as part of their gpio_request() semantics, platforms and individual drivers
+ * shall *NOT* request GPIO pins to be muxed in.
  */
-void pinctrl_gpio_free(unsigned gpio)
+void pinctrl_gpio_free(struct gpio_chip *gc, unsigned int offset)
 {
-       struct pinctrl_dev *pctldev;
        struct pinctrl_gpio_range *range;
-       int ret;
-       int pin;
+       struct pinctrl_dev *pctldev;
+       int ret, pin;
 
-       ret = pinctrl_get_device_gpio_range(gpio, &pctldev, &range);
-       if (ret) {
+       ret = pinctrl_get_device_gpio_range(gc, offset, &pctldev, &range);
+       if (ret)
                return;
-       }
+
        mutex_lock(&pctldev->mutex);
 
        /* Convert to the pin controllers number space */
-       pin = gpio_to_pin(range, gpio);
+       pin = gpio_to_pin(range, gc, offset);
 
        pinmux_free_gpio(pctldev, pin, range);
 
@@ -846,14 +850,15 @@ void pinctrl_gpio_free(unsigned gpio)
 }
 EXPORT_SYMBOL_GPL(pinctrl_gpio_free);
 
-static int pinctrl_gpio_direction(unsigned gpio, bool input)
+static int pinctrl_gpio_direction(struct gpio_chip *gc, unsigned int offset,
+                                 bool input)
 {
        struct pinctrl_dev *pctldev;
        struct pinctrl_gpio_range *range;
        int ret;
        int pin;
 
-       ret = pinctrl_get_device_gpio_range(gpio, &pctldev, &range);
+       ret = pinctrl_get_device_gpio_range(gc, offset, &pctldev, &range);
        if (ret) {
                return ret;
        }
@@ -861,7 +866,7 @@ static int pinctrl_gpio_direction(unsigned gpio, bool input)
        mutex_lock(&pctldev->mutex);
 
        /* Convert to the pin controllers number space */
-       pin = gpio_to_pin(range, gpio);
+       pin = gpio_to_pin(range, gc, offset);
        ret = pinmux_gpio_direction(pctldev, range, pin, input);
 
        mutex_unlock(&pctldev->mutex);
@@ -871,54 +876,58 @@ static int pinctrl_gpio_direction(unsigned gpio, bool input)
 
 /**
  * pinctrl_gpio_direction_input() - request a GPIO pin to go into input mode
- * @gpio: the GPIO pin number from the GPIO subsystem number space
+ * @gc: GPIO chip structure from the GPIO subsystem
+ * @offset: hardware offset of the GPIO relative to the controller
  *
  * This function should *ONLY* be used from gpiolib-based GPIO drivers,
  * as part of their gpio_direction_input() semantics, platforms and individual
  * drivers shall *NOT* touch pin control GPIO calls.
  */
-int pinctrl_gpio_direction_input(unsigned gpio)
+int pinctrl_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
 {
-       return pinctrl_gpio_direction(gpio, true);
+       return pinctrl_gpio_direction(gc, offset, true);
 }
 EXPORT_SYMBOL_GPL(pinctrl_gpio_direction_input);
 
 /**
  * pinctrl_gpio_direction_output() - request a GPIO pin to go into output mode
- * @gpio: the GPIO pin number from the GPIO subsystem number space
+ * @gc: GPIO chip structure from the GPIO subsystem
+ * @offset: hardware offset of the GPIO relative to the controller
  *
  * This function should *ONLY* be used from gpiolib-based GPIO drivers,
  * as part of their gpio_direction_output() semantics, platforms and individual
  * drivers shall *NOT* touch pin control GPIO calls.
  */
-int pinctrl_gpio_direction_output(unsigned gpio)
+int pinctrl_gpio_direction_output(struct gpio_chip *gc, unsigned int offset)
 {
-       return pinctrl_gpio_direction(gpio, false);
+       return pinctrl_gpio_direction(gc, offset, false);
 }
 EXPORT_SYMBOL_GPL(pinctrl_gpio_direction_output);
 
 /**
  * pinctrl_gpio_set_config() - Apply config to given GPIO pin
- * @gpio: the GPIO pin number from the GPIO subsystem number space
+ * @gc: GPIO chip structure from the GPIO subsystem
+ * @offset: hardware offset of the GPIO relative to the controller
  * @config: the configuration to apply to the GPIO
  *
  * This function should *ONLY* be used from gpiolib-based GPIO drivers, if
  * they need to call the underlying pin controller to change GPIO config
  * (for example set debounce time).
  */
-int pinctrl_gpio_set_config(unsigned gpio, unsigned long config)
+int pinctrl_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+                               unsigned long config)
 {
        unsigned long configs[] = { config };
        struct pinctrl_gpio_range *range;
        struct pinctrl_dev *pctldev;
        int ret, pin;
 
-       ret = pinctrl_get_device_gpio_range(gpio, &pctldev, &range);
+       ret = pinctrl_get_device_gpio_range(gc, offset, &pctldev, &range);
        if (ret)
                return ret;
 
        mutex_lock(&pctldev->mutex);
-       pin = gpio_to_pin(range, gpio);
+       pin = gpio_to_pin(range, gc, offset);
        ret = pinconf_set_config(pctldev, pin, configs, ARRAY_SIZE(configs));
        mutex_unlock(&pctldev->mutex);
 
index b1d8f6136f99f652d027f3f959216cdfd047de7d..067b0d344f0e5fea7223f2522f374d1e7bdcd2f3 100644 (file)
@@ -1148,14 +1148,14 @@ static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
 
 static int chv_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
 {
-       return pinctrl_gpio_direction_input(chip->base + offset);
+       return pinctrl_gpio_direction_input(chip, offset);
 }
 
 static int chv_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
                                     int value)
 {
        chv_gpio_set(chip, offset, value);
-       return pinctrl_gpio_direction_output(chip->base + offset);
+       return pinctrl_gpio_direction_output(chip, offset);
 }
 
 static const struct gpio_chip chv_gpio_chip = {
index b19527a8728ef89b7f635a249bac8030e8565796..652ba451f885f0a40cd5a5aeea35493c4a498b24 100644 (file)
@@ -992,14 +992,14 @@ static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
 
 static int intel_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
 {
-       return pinctrl_gpio_direction_input(chip->base + offset);
+       return pinctrl_gpio_direction_input(chip, offset);
 }
 
 static int intel_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
                                       int value)
 {
        intel_gpio_set(chip, offset, value);
-       return pinctrl_gpio_direction_output(chip->base + offset);
+       return pinctrl_gpio_direction_output(chip, offset);
 }
 
 static const struct gpio_chip intel_gpio_chip = {
index d7bc9ef29fcc88bf82d2d2163a915d4e81872e76..e6878e4cf20cb508d46ae51f74f4403ad3fee105 100644 (file)
@@ -541,7 +541,7 @@ static void lp_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
 
 static int lp_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
 {
-       return pinctrl_gpio_direction_input(chip->base + offset);
+       return pinctrl_gpio_direction_input(chip, offset);
 }
 
 static int lp_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
@@ -549,7 +549,7 @@ static int lp_gpio_direction_output(struct gpio_chip *chip, unsigned int offset,
 {
        lp_gpio_set(chip, offset, value);
 
-       return pinctrl_gpio_direction_output(chip->base + offset);
+       return pinctrl_gpio_direction_output(chip offset);
 }
 
 static int lp_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
index 889469c7ac26926fd65be2e88942fb1051ce72e9..c3d59eddd994aa2e00f125875bb7530ff89c1a1e 100644 (file)
@@ -510,17 +510,12 @@ static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
        mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DO, !!value);
 }
 
-static int mtk_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
-{
-       return pinctrl_gpio_direction_input(chip->base + gpio);
-}
-
 static int mtk_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
                                     int value)
 {
        mtk_gpio_set(chip, gpio, value);
 
-       return pinctrl_gpio_direction_output(chip->base + gpio);
+       return pinctrl_gpio_direction_output(chip, gpio);
 }
 
 static int mtk_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
@@ -569,7 +564,7 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw)
        chip->parent            = hw->dev;
        chip->request           = gpiochip_generic_request;
        chip->free              = gpiochip_generic_free;
-       chip->direction_input   = mtk_gpio_direction_input;
+       chip->direction_input   = pinctrl_gpio_direction_input;
        chip->direction_output  = mtk_gpio_direction_output;
        chip->get               = mtk_gpio_get;
        chip->set               = mtk_gpio_set;
index 74b15952b742eb46c61ce68aad011682d77281c0..e79d66a0419401ee7097d7ea9694b6fe4df16bb5 100644 (file)
@@ -808,17 +808,11 @@ static const struct pinmux_ops mtk_pmx_ops = {
        .gpio_request_enable    = mtk_pmx_gpio_request_enable,
 };
 
-static int mtk_gpio_direction_input(struct gpio_chip *chip,
-                                       unsigned offset)
-{
-       return pinctrl_gpio_direction_input(chip->base + offset);
-}
-
 static int mtk_gpio_direction_output(struct gpio_chip *chip,
                                        unsigned offset, int value)
 {
        mtk_gpio_set(chip, offset, value);
-       return pinctrl_gpio_direction_output(chip->base + offset);
+       return pinctrl_gpio_direction_output(chip, offset);
 }
 
 static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
@@ -898,7 +892,7 @@ static const struct gpio_chip mtk_gpio_chip = {
        .request                = gpiochip_generic_request,
        .free                   = gpiochip_generic_free,
        .get_direction          = mtk_gpio_get_direction,
-       .direction_input        = mtk_gpio_direction_input,
+       .direction_input        = pinctrl_gpio_direction_input,
        .direction_output       = mtk_gpio_direction_output,
        .get                    = mtk_gpio_get,
        .set                    = mtk_gpio_set,
index b7cb5a1f1060f672ef8559d05d5fa4f7c2bc6d0e..6392f1e05d028b56697e08b163c17a3dbbe92da7 100644 (file)
@@ -916,7 +916,7 @@ static int mtk_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
        if (gpio >= hw->soc->npins)
                return -EINVAL;
 
-       return pinctrl_gpio_direction_input(chip->base + gpio);
+       return pinctrl_gpio_direction_input(chip, gpio);
 }
 
 static int mtk_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
@@ -929,7 +929,7 @@ static int mtk_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio,
 
        mtk_gpio_set(chip, gpio, value);
 
-       return pinctrl_gpio_direction_output(chip->base + gpio);
+       return pinctrl_gpio_direction_output(chip, gpio);
 }
 
 static int mtk_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
index 8bdd0124e2eb94679ff20a395172455f8f987429..1e658721aaba51fa3ffd5de21f1dcf36c38af439 100644 (file)
@@ -171,7 +171,7 @@ static int npcmgpio_direction_input(struct gpio_chip *chip, unsigned int offset)
        struct npcm7xx_gpio *bank = gpiochip_get_data(chip);
        int ret;
 
-       ret = pinctrl_gpio_direction_input(offset + chip->base);
+       ret = pinctrl_gpio_direction_input(chip, offset);
        if (ret)
                return ret;
 
@@ -188,7 +188,7 @@ static int npcmgpio_direction_output(struct gpio_chip *chip,
        dev_dbg(chip->parent, "gpio_direction_output: offset%d = %x\n", offset,
                value);
 
-       ret = pinctrl_gpio_direction_output(offset + chip->base);
+       ret = pinctrl_gpio_direction_output(chip, offset);
        if (ret)
                return ret;
 
@@ -201,19 +201,13 @@ static int npcmgpio_gpio_request(struct gpio_chip *chip, unsigned int offset)
        int ret;
 
        dev_dbg(chip->parent, "gpio_request: offset%d\n", offset);
-       ret = pinctrl_gpio_request(offset + chip->base);
+       ret = pinctrl_gpio_request(chip, offset);
        if (ret)
                return ret;
 
        return bank->request(chip, offset);
 }
 
-static void npcmgpio_gpio_free(struct gpio_chip *chip, unsigned int offset)
-{
-       dev_dbg(chip->parent, "gpio_free: offset%d\n", offset);
-       pinctrl_gpio_free(offset + chip->base);
-}
-
 static void npcmgpio_irq_handler(struct irq_desc *desc)
 {
        struct gpio_chip *gc;
@@ -1916,7 +1910,7 @@ static int npcm7xx_gpio_of(struct npcm7xx_pinctrl *pctrl)
                pctrl->gpio_bank[id].gc.direction_output = npcmgpio_direction_output;
                pctrl->gpio_bank[id].request = pctrl->gpio_bank[id].gc.request;
                pctrl->gpio_bank[id].gc.request = npcmgpio_gpio_request;
-               pctrl->gpio_bank[id].gc.free = npcmgpio_gpio_free;
+               pctrl->gpio_bank[id].gc.free = pinctrl_gpio_free;
                id++;
        }
 
index da21f6a45888f5a71a64f5245d6fb9957178ad7d..a377d36b0eb07275bcfb68147624cee85aec6683 100644 (file)
@@ -173,7 +173,7 @@ static int npcmgpio_direction_input(struct gpio_chip *chip, unsigned int offset)
        struct npcm8xx_gpio *bank = gpiochip_get_data(chip);
        int ret;
 
-       ret = pinctrl_gpio_direction_input(offset + chip->base);
+       ret = pinctrl_gpio_direction_input(chip, offset);
        if (ret)
                return ret;
 
@@ -186,7 +186,7 @@ static int npcmgpio_direction_output(struct gpio_chip *chip,
        struct npcm8xx_gpio *bank = gpiochip_get_data(chip);
        int ret;
 
-       ret = pinctrl_gpio_direction_output(offset + chip->base);
+       ret = pinctrl_gpio_direction_output(chip, offset);
        if (ret)
                return ret;
 
@@ -198,18 +198,13 @@ static int npcmgpio_gpio_request(struct gpio_chip *chip, unsigned int offset)
        struct npcm8xx_gpio *bank = gpiochip_get_data(chip);
        int ret;
 
-       ret = pinctrl_gpio_request(offset + chip->base);
+       ret = pinctrl_gpio_request(chip, offset);
        if (ret)
                return ret;
 
        return bank->request(chip, offset);
 }
 
-static void npcmgpio_gpio_free(struct gpio_chip *chip, unsigned int offset)
-{
-       pinctrl_gpio_free(offset + chip->base);
-}
-
 static void npcmgpio_irq_handler(struct irq_desc *desc)
 {
        unsigned long sts, en, bit;
@@ -2388,7 +2383,7 @@ static int npcm8xx_gpio_fw(struct npcm8xx_pinctrl *pctrl)
                pctrl->gpio_bank[id].gc.direction_output = npcmgpio_direction_output;
                pctrl->gpio_bank[id].request = pctrl->gpio_bank[id].gc.request;
                pctrl->gpio_bank[id].gc.request = npcmgpio_gpio_request;
-               pctrl->gpio_bank[id].gc.free = npcmgpio_gpio_free;
+               pctrl->gpio_bank[id].gc.free = pinctrl_gpio_free;
                for (i = 0 ; i < NPCM8XX_DEBOUNCE_MAX ; i++)
                        pctrl->gpio_bank[id].debounce.set_val[i] = false;
                pctrl->gpio_bank[id].gc.add_pin_ranges = npcmgpio_add_pin_ranges;
index 84b47a6cc3a6ff100bef40ed88fca96662e8467b..6a5f23cf7a2a2119b6a9c4e43815f5a2094414d2 100644 (file)
@@ -500,16 +500,11 @@ static void as3722_gpio_set(struct gpio_chip *chip, unsigned offset,
                        "GPIO_SIGNAL_OUT_REG update failed: %d\n", ret);
 }
 
-static int as3722_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
-       return pinctrl_gpio_direction_input(chip->base + offset);
-}
-
 static int as3722_gpio_direction_output(struct gpio_chip *chip,
                unsigned offset, int value)
 {
        as3722_gpio_set(chip, offset, value);
-       return pinctrl_gpio_direction_output(chip->base + offset);
+       return pinctrl_gpio_direction_output(chip, offset);
 }
 
 static int as3722_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
@@ -526,7 +521,7 @@ static const struct gpio_chip as3722_gpio_chip = {
        .free                   = gpiochip_generic_free,
        .get                    = as3722_gpio_get,
        .set                    = as3722_gpio_set,
-       .direction_input        = as3722_gpio_direction_input,
+       .direction_input        = pinctrl_gpio_direction_input,
        .direction_output       = as3722_gpio_direction_output,
        .to_irq                 = as3722_gpio_to_irq,
        .can_sleep              = true,
index 9f5b3ab8e184b23d7086132c9626e588585aa79c..2b4805e74eed32bd5a5d3e20143bb80571ffa349 100644 (file)
@@ -124,11 +124,6 @@ static int axp20x_gpio_get_reg(unsigned int offset)
        return -EINVAL;
 }
 
-static int axp20x_gpio_input(struct gpio_chip *chip, unsigned int offset)
-{
-       return pinctrl_gpio_direction_input(chip->base + offset);
-}
-
 static int axp20x_gpio_get(struct gpio_chip *chip, unsigned int offset)
 {
        struct axp20x_pctl *pctl = gpiochip_get_data(chip);
@@ -474,7 +469,7 @@ static int axp20x_pctl_probe(struct platform_device *pdev)
        pctl->chip.get                  = axp20x_gpio_get;
        pctl->chip.get_direction        = axp20x_gpio_get_direction;
        pctl->chip.set                  = axp20x_gpio_set;
-       pctl->chip.direction_input      = axp20x_gpio_input;
+       pctl->chip.direction_input      = pinctrl_gpio_direction_input;
        pctl->chip.direction_output     = axp20x_gpio_output;
 
        pctl->desc = of_device_get_match_data(dev);
index fae80b52a6fc6676b743575136b1beebc6546fbe..04285c930e945ee6701ccfdf152b825d91214137 100644 (file)
@@ -554,7 +554,7 @@ out:
 
 static int cy8c95x0_gpio_direction_input(struct gpio_chip *gc, unsigned int off)
 {
-       return pinctrl_gpio_direction_input(gc->base + off);
+       return pinctrl_gpio_direction_input(gc, off);
 }
 
 static int cy8c95x0_gpio_direction_output(struct gpio_chip *gc,
@@ -571,7 +571,7 @@ static int cy8c95x0_gpio_direction_output(struct gpio_chip *gc,
        if (ret)
                return ret;
 
-       return pinctrl_gpio_direction_output(gc->base + off);
+       return pinctrl_gpio_direction_output(gc, off);
 }
 
 static int cy8c95x0_gpio_get_value(struct gpio_chip *gc, unsigned int off)
index 2f220a47b749788b1cb3a0cb4a961652e761a0a2..ee718f6e25566a437382623a17e370e2e70abab9 100644 (file)
@@ -133,6 +133,8 @@ struct ingenic_pinctrl {
        struct pinctrl_pin_desc *pdesc;
 
        const struct ingenic_chip_info *info;
+
+       struct gpio_chip *gc;
 };
 
 struct ingenic_gpio_chip {
@@ -3558,17 +3560,11 @@ static int ingenic_gpio_get(struct gpio_chip *gc, unsigned int offset)
        return (int) ingenic_gpio_get_value(jzgc, offset);
 }
 
-static int ingenic_gpio_direction_input(struct gpio_chip *gc,
-               unsigned int offset)
-{
-       return pinctrl_gpio_direction_input(gc->base + offset);
-}
-
 static int ingenic_gpio_direction_output(struct gpio_chip *gc,
                unsigned int offset, int value)
 {
        ingenic_gpio_set(gc, offset, value);
-       return pinctrl_gpio_direction_output(gc->base + offset);
+       return pinctrl_gpio_direction_output(gc, offset);
 }
 
 static inline void ingenic_config_pin(struct ingenic_pinctrl *jzpc,
@@ -3678,7 +3674,7 @@ static int ingenic_gpio_irq_request(struct irq_data *data)
        irq_hw_number_t irq = irqd_to_hwirq(data);
        int ret;
 
-       ret = ingenic_gpio_direction_input(gpio_chip, irq);
+       ret = pinctrl_gpio_direction_input(gpio_chip, irq);
        if (ret)
                return ret;
 
@@ -4052,7 +4048,8 @@ static int ingenic_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
                        break;
 
                case PIN_CONFIG_OUTPUT:
-                       ret = pinctrl_gpio_direction_output(pin);
+                       ret = pinctrl_gpio_direction_output(jzpc->gc,
+                                                       pin - jzpc->gc->base);
                        if (ret)
                                return ret;
 
@@ -4172,6 +4169,8 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
        if (!jzgc)
                return -ENOMEM;
 
+       jzpc->gc = &jzgc->gc;
+
        jzgc->jzpc = jzpc;
        jzgc->reg_base = bank * jzpc->info->reg_offset;
 
@@ -4192,7 +4191,7 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc,
 
        jzgc->gc.set = ingenic_gpio_set;
        jzgc->gc.get = ingenic_gpio_get;
-       jzgc->gc.direction_input = ingenic_gpio_direction_input;
+       jzgc->gc.direction_input = pinctrl_gpio_direction_input;
        jzgc->gc.direction_output = ingenic_gpio_direction_output;
        jzgc->gc.get_direction = ingenic_gpio_get_direction;
        jzgc->gc.request = gpiochip_generic_request;
index f8ae2e9742217cc39e314fb1e3c304a84a684019..52aadd6d72a80f74011efe30b309eea19fd55903 100644 (file)
@@ -1776,12 +1776,6 @@ static int ocelot_gpio_get_direction(struct gpio_chip *chip,
        return GPIO_LINE_DIRECTION_IN;
 }
 
-static int ocelot_gpio_direction_input(struct gpio_chip *chip,
-                                      unsigned int offset)
-{
-       return pinctrl_gpio_direction_input(chip->base + offset);
-}
-
 static int ocelot_gpio_direction_output(struct gpio_chip *chip,
                                        unsigned int offset, int value)
 {
@@ -1795,7 +1789,7 @@ static int ocelot_gpio_direction_output(struct gpio_chip *chip,
                regmap_write(info->map, REG(OCELOT_GPIO_OUT_CLR, info, offset),
                             pin);
 
-       return pinctrl_gpio_direction_output(chip->base + offset);
+       return pinctrl_gpio_direction_output(chip, offset);
 }
 
 static const struct gpio_chip ocelot_gpiolib_chip = {
@@ -1804,7 +1798,7 @@ static const struct gpio_chip ocelot_gpiolib_chip = {
        .set = ocelot_gpio_set,
        .get = ocelot_gpio_get,
        .get_direction = ocelot_gpio_get_direction,
-       .direction_input = ocelot_gpio_direction_input,
+       .direction_input = pinctrl_gpio_direction_input,
        .direction_output = ocelot_gpio_direction_output,
        .owner = THIS_MODULE,
 };
index 2639a9ee82cd0fa150c021cfa089551bafd7a096..56d916f2cee6d715ede3cbca4c2ce3be5930f97e 100644 (file)
@@ -286,17 +286,11 @@ static void rk805_gpio_set(struct gpio_chip *chip,
                        offset, value);
 }
 
-static int rk805_gpio_direction_input(struct gpio_chip *chip,
-                                     unsigned int offset)
-{
-       return pinctrl_gpio_direction_input(chip->base + offset);
-}
-
 static int rk805_gpio_direction_output(struct gpio_chip *chip,
                                       unsigned int offset, int value)
 {
        rk805_gpio_set(chip, offset, value);
-       return pinctrl_gpio_direction_output(chip->base + offset);
+       return pinctrl_gpio_direction_output(chip, offset);
 }
 
 static int rk805_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
@@ -330,7 +324,7 @@ static const struct gpio_chip rk805_gpio_chip = {
        .get_direction          = rk805_gpio_get_direction,
        .get                    = rk805_gpio_get,
        .set                    = rk805_gpio_set,
-       .direction_input        = rk805_gpio_direction_input,
+       .direction_input        = pinctrl_gpio_direction_input,
        .direction_output       = rk805_gpio_direction_output,
        .can_sleep              = true,
        .base                   = -1,
index c1f36b164ea5de74ef489075bb452810fd33cadc..1485573b523c2fbe5090b75fe6d05d23d0dd97c5 100644 (file)
@@ -717,20 +717,13 @@ static void st_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
        __st_gpio_set(bank, offset, value);
 }
 
-static int st_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
-       pinctrl_gpio_direction_input(chip->base + offset);
-
-       return 0;
-}
-
 static int st_gpio_direction_output(struct gpio_chip *chip,
        unsigned offset, int value)
 {
        struct st_gpio_bank *bank = gpiochip_get_data(chip);
 
        __st_gpio_set(bank, offset, value);
-       pinctrl_gpio_direction_output(chip->base + offset);
+       pinctrl_gpio_direction_output(chip, offset);
 
        return 0;
 }
@@ -1330,7 +1323,7 @@ static int st_gpio_irq_request_resources(struct irq_data *d)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
 
-       st_gpio_direction_input(gc, d->hwirq);
+       pinctrl_gpio_direction_input(gc, d->hwirq);
 
        return gpiochip_reqres_irq(gc, d->hwirq);
 }
@@ -1488,7 +1481,7 @@ static const struct gpio_chip st_gpio_template = {
        .free                   = gpiochip_generic_free,
        .get                    = st_gpio_get,
        .set                    = st_gpio_set,
-       .direction_input        = st_gpio_direction_input,
+       .direction_input        = pinctrl_gpio_direction_input,
        .direction_output       = st_gpio_direction_output,
        .get_direction          = st_gpio_get_direction,
        .ngpio                  = ST_GPIO_PINS_PER_BANK,
index 5758daf94fe2e867282a4c35c0b7196fb11f6734..a5136dacaaf212ee711262c599f3e273b01b6670 100644 (file)
@@ -135,12 +135,12 @@ static int gpio_pin_request(struct gpio_chip *gc, unsigned offset)
        if (idx < 0 || pfc->info->pins[idx].enum_id == 0)
                return -EINVAL;
 
-       return pinctrl_gpio_request(gc->base + offset);
+       return pinctrl_gpio_request(gc, offset);
 }
 
 static void gpio_pin_free(struct gpio_chip *gc, unsigned offset)
 {
-       return pinctrl_gpio_free(gc->base + offset);
+       return pinctrl_gpio_free(gc, offset);
 }
 
 static void gpio_pin_set_value(struct sh_pfc_chip *chip, unsigned offset,
@@ -164,7 +164,7 @@ static void gpio_pin_set_value(struct sh_pfc_chip *chip, unsigned offset,
 
 static int gpio_pin_direction_input(struct gpio_chip *gc, unsigned offset)
 {
-       return pinctrl_gpio_direction_input(gc->base + offset);
+       return pinctrl_gpio_direction_input(gc, offset);
 }
 
 static int gpio_pin_direction_output(struct gpio_chip *gc, unsigned offset,
@@ -172,7 +172,7 @@ static int gpio_pin_direction_output(struct gpio_chip *gc, unsigned offset,
 {
        gpio_pin_set_value(gpiochip_get_data(gc), offset, value);
 
-       return pinctrl_gpio_direction_output(gc->base + offset);
+       return pinctrl_gpio_direction_output(gc, offset);
 }
 
 static int gpio_pin_get(struct gpio_chip *gc, unsigned offset)
index c7c6d912a975fca21dbdfd2ec6005427d71088a4..9de350ad7e7d5d2d31eb183cbdd38ee012de809f 100644 (file)
@@ -1056,7 +1056,7 @@ static int rzg2l_gpio_request(struct gpio_chip *chip, unsigned int offset)
        if (ret)
                return ret;
 
-       ret = pinctrl_gpio_request(chip->base + offset);
+       ret = pinctrl_gpio_request(chip, offset);
        if (ret)
                return ret;
 
@@ -1181,7 +1181,7 @@ static void rzg2l_gpio_free(struct gpio_chip *chip, unsigned int offset)
 {
        unsigned int virq;
 
-       pinctrl_gpio_free(chip->base + offset);
+       pinctrl_gpio_free(chip, offset);
 
        virq = irq_find_mapping(chip->irq.domain, offset);
        if (virq)
index 52aeafaba4b697a0d8c719f2bc80697ad0d856d5..21d7d5ac8c4a711f666033fcd077a63450d7feaa 100644 (file)
@@ -754,7 +754,7 @@ static int rzv2m_gpio_request(struct gpio_chip *chip, unsigned int offset)
        u8 bit = RZV2M_PIN_ID_TO_PIN(offset);
        int ret;
 
-       ret = pinctrl_gpio_request(chip->base + offset);
+       ret = pinctrl_gpio_request(chip, offset);
        if (ret)
                return ret;
 
@@ -832,7 +832,7 @@ static int rzv2m_gpio_get(struct gpio_chip *chip, unsigned int offset)
 
 static void rzv2m_gpio_free(struct gpio_chip *chip, unsigned int offset)
 {
-       pinctrl_gpio_free(chip->base + offset);
+       pinctrl_gpio_free(chip, offset);
 
        /*
         * Set the GPIO as an input to ensure that the next GPIO request won't
index 722681e0b89b784fbe90dc50f712bf978089b7e0..a05570c7d833d9b8268989f2a6e54e1873d20de9 100644 (file)
@@ -204,14 +204,13 @@ static void plgpio_set_value(struct gpio_chip *chip, unsigned offset, int value)
 static int plgpio_request(struct gpio_chip *chip, unsigned offset)
 {
        struct plgpio *plgpio = gpiochip_get_data(chip);
-       int gpio = chip->base + offset;
        unsigned long flags;
        int ret = 0;
 
        if (offset >= chip->ngpio)
                return -EINVAL;
 
-       ret = pinctrl_gpio_request(gpio);
+       ret = pinctrl_gpio_request(chip, offset);
        if (ret)
                return ret;
 
@@ -249,14 +248,13 @@ err1:
        if (!IS_ERR(plgpio->clk))
                clk_disable(plgpio->clk);
 err0:
-       pinctrl_gpio_free(gpio);
+       pinctrl_gpio_free(chip, offset);
        return ret;
 }
 
 static void plgpio_free(struct gpio_chip *chip, unsigned offset)
 {
        struct plgpio *plgpio = gpiochip_get_data(chip);
-       int gpio = chip->base + offset;
        unsigned long flags;
 
        if (offset >= chip->ngpio)
@@ -280,7 +278,7 @@ disable_clk:
        if (!IS_ERR(plgpio->clk))
                clk_disable(plgpio->clk);
 
-       pinctrl_gpio_free(gpio);
+       pinctrl_gpio_free(chip, offset);
 }
 
 /* PLGPIO IRQ */
index 530fe340a9a154ad01bb6109ef764a2839ac8a77..ea70b8c61679f0134e99350a3e3d7f812ac33e3c 100644 (file)
@@ -916,16 +916,6 @@ static struct pinctrl_desc starfive_desc = {
        .custom_conf_items = starfive_pinconf_custom_conf_items,
 };
 
-static int starfive_gpio_request(struct gpio_chip *gc, unsigned int gpio)
-{
-       return pinctrl_gpio_request(gc->base + gpio);
-}
-
-static void starfive_gpio_free(struct gpio_chip *gc, unsigned int gpio)
-{
-       pinctrl_gpio_free(gc->base + gpio);
-}
-
 static int starfive_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio)
 {
        struct starfive_pinctrl *sfp = container_of(gc, struct starfive_pinctrl, gc);
@@ -1309,8 +1299,8 @@ static int starfive_probe(struct platform_device *pdev)
 
        sfp->gc.label = dev_name(dev);
        sfp->gc.owner = THIS_MODULE;
-       sfp->gc.request = starfive_gpio_request;
-       sfp->gc.free = starfive_gpio_free;
+       sfp->gc.request = pinctrl_gpio_request;
+       sfp->gc.free = pinctrl_gpio_free;
        sfp->gc.get_direction = starfive_gpio_get_direction;
        sfp->gc.direction_input = starfive_gpio_direction_input;
        sfp->gc.direction_output = starfive_gpio_direction_output;
index 640f827a9b2ca6c0e9e05364b00e29874f0078d9..9d71e8c1331020cd73eb4b61aec8191a9433f8c0 100644 (file)
@@ -545,16 +545,6 @@ static const struct pinconf_ops jh7110_pinconf_ops = {
        .is_generic             = true,
 };
 
-static int jh7110_gpio_request(struct gpio_chip *gc, unsigned int gpio)
-{
-       return pinctrl_gpio_request(gc->base + gpio);
-}
-
-static void jh7110_gpio_free(struct gpio_chip *gc, unsigned int gpio)
-{
-       pinctrl_gpio_free(gc->base + gpio);
-}
-
 static int jh7110_gpio_get_direction(struct gpio_chip *gc,
                                     unsigned int gpio)
 {
@@ -940,8 +930,8 @@ int jh7110_pinctrl_probe(struct platform_device *pdev)
 
        sfp->gc.label = dev_name(dev);
        sfp->gc.owner = THIS_MODULE;
-       sfp->gc.request = jh7110_gpio_request;
-       sfp->gc.free = jh7110_gpio_free;
+       sfp->gc.request = pinctrl_gpio_request;
+       sfp->gc.free = pinctrl_gpio_free;
        sfp->gc.get_direction = jh7110_gpio_get_direction;
        sfp->gc.direction_input = jh7110_gpio_direction_input;
        sfp->gc.direction_output = jh7110_gpio_direction_output;
index a73385a431de98120db87aae78565c59866e9416..64e8201c7eacd347ffa85c5139b937e0725016ec 100644 (file)
@@ -217,12 +217,7 @@ static int stm32_gpio_request(struct gpio_chip *chip, unsigned offset)
                return -EINVAL;
        }
 
-       return pinctrl_gpio_request(chip->base + offset);
-}
-
-static void stm32_gpio_free(struct gpio_chip *chip, unsigned offset)
-{
-       pinctrl_gpio_free(chip->base + offset);
+       return pinctrl_gpio_request(chip, offset);
 }
 
 static int stm32_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -239,18 +234,13 @@ static void stm32_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
        __stm32_gpio_set(bank, offset, value);
 }
 
-static int stm32_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
-       return pinctrl_gpio_direction_input(chip->base + offset);
-}
-
 static int stm32_gpio_direction_output(struct gpio_chip *chip,
        unsigned offset, int value)
 {
        struct stm32_gpio_bank *bank = gpiochip_get_data(chip);
 
        __stm32_gpio_set(bank, offset, value);
-       pinctrl_gpio_direction_output(chip->base + offset);
+       pinctrl_gpio_direction_output(chip, offset);
 
        return 0;
 }
@@ -316,10 +306,10 @@ static int stm32_gpio_init_valid_mask(struct gpio_chip *chip,
 
 static const struct gpio_chip stm32_gpio_template = {
        .request                = stm32_gpio_request,
-       .free                   = stm32_gpio_free,
+       .free                   = pinctrl_gpio_free,
        .get                    = stm32_gpio_get,
        .set                    = stm32_gpio_set,
-       .direction_input        = stm32_gpio_direction_input,
+       .direction_input        = pinctrl_gpio_direction_input,
        .direction_output       = stm32_gpio_direction_output,
        .to_irq                 = stm32_gpio_to_irq,
        .get_direction          = stm32_gpio_get_direction,
@@ -381,7 +371,7 @@ static int stm32_gpio_irq_request_resources(struct irq_data *irq_data)
        struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent);
        int ret;
 
-       ret = stm32_gpio_direction_input(&bank->gpio_chip, irq_data->hwirq);
+       ret = pinctrl_gpio_direction_input(&bank->gpio_chip, irq_data->hwirq);
        if (ret)
                return ret;
 
index 6fac30de1c6a884985b5ec891a7eef3f76920a80..fce92111a32e4976bd31c7e9845e4fd08a7affa3 100644 (file)
@@ -526,16 +526,11 @@ static void wmt_gpio_set_value(struct gpio_chip *chip, unsigned offset,
                wmt_clearbits(data, reg_data_out, BIT(bit));
 }
 
-static int wmt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
-{
-       return pinctrl_gpio_direction_input(chip->base + offset);
-}
-
 static int wmt_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
                                     int value)
 {
        wmt_gpio_set_value(chip, offset, value);
-       return pinctrl_gpio_direction_output(chip->base + offset);
+       return pinctrl_gpio_direction_output(chip, offset);
 }
 
 static const struct gpio_chip wmt_gpio_chip = {
@@ -544,7 +539,7 @@ static const struct gpio_chip wmt_gpio_chip = {
        .request = gpiochip_generic_request,
        .free = gpiochip_generic_free,
        .get_direction = wmt_gpio_get_direction,
-       .direction_input = wmt_gpio_direction_input,
+       .direction_input = pinctrl_gpio_direction_input,
        .direction_output = wmt_gpio_direction_output,
        .get = wmt_gpio_get_value,
        .set = wmt_gpio_set_value,
index 282cd7d24077389780268d6ed8ee96a69041d84b..3f7a7478880240a2d256caf624b61dcc8e7054af 100644 (file)
@@ -108,6 +108,7 @@ int ptp_open(struct posix_clock_context *pccontext, fmode_t fmode)
                container_of(pccontext->clk, struct ptp_clock, clock);
        struct timestamp_event_queue *queue;
        char debugfsname[32];
+       unsigned long flags;
 
        queue = kzalloc(sizeof(*queue), GFP_KERNEL);
        if (!queue)
@@ -119,7 +120,9 @@ int ptp_open(struct posix_clock_context *pccontext, fmode_t fmode)
        }
        bitmap_set(queue->mask, 0, PTP_MAX_CHANNELS);
        spin_lock_init(&queue->lock);
+       spin_lock_irqsave(&ptp->tsevqs_lock, flags);
        list_add_tail(&queue->qlist, &ptp->tsevqs);
+       spin_unlock_irqrestore(&ptp->tsevqs_lock, flags);
        pccontext->private_clkdata = queue;
 
        /* Debugfs contents */
@@ -139,16 +142,16 @@ int ptp_release(struct posix_clock_context *pccontext)
 {
        struct timestamp_event_queue *queue = pccontext->private_clkdata;
        unsigned long flags;
+       struct ptp_clock *ptp =
+               container_of(pccontext->clk, struct ptp_clock, clock);
 
-       if (queue) {
-               debugfs_remove(queue->debugfs_instance);
-               pccontext->private_clkdata = NULL;
-               spin_lock_irqsave(&queue->lock, flags);
-               list_del(&queue->qlist);
-               spin_unlock_irqrestore(&queue->lock, flags);
-               bitmap_free(queue->mask);
-               kfree(queue);
-       }
+       debugfs_remove(queue->debugfs_instance);
+       pccontext->private_clkdata = NULL;
+       spin_lock_irqsave(&ptp->tsevqs_lock, flags);
+       list_del(&queue->qlist);
+       spin_unlock_irqrestore(&ptp->tsevqs_lock, flags);
+       bitmap_free(queue->mask);
+       kfree(queue);
        return 0;
 }
 
@@ -585,7 +588,5 @@ ssize_t ptp_read(struct posix_clock_context *pccontext, uint rdflags,
 free_event:
        kfree(event);
 exit:
-       if (result < 0)
-               ptp_release(pccontext);
        return result;
 }
index 3d1b0a97301c95cc9daf9d298a8b85974af1cda5..3134568af622d396f6ab15049cd1a3ace3243269 100644 (file)
@@ -179,11 +179,11 @@ static void ptp_clock_release(struct device *dev)
        mutex_destroy(&ptp->pincfg_mux);
        mutex_destroy(&ptp->n_vclocks_mux);
        /* Delete first entry */
+       spin_lock_irqsave(&ptp->tsevqs_lock, flags);
        tsevq = list_first_entry(&ptp->tsevqs, struct timestamp_event_queue,
                                 qlist);
-       spin_lock_irqsave(&tsevq->lock, flags);
        list_del(&tsevq->qlist);
-       spin_unlock_irqrestore(&tsevq->lock, flags);
+       spin_unlock_irqrestore(&ptp->tsevqs_lock, flags);
        bitmap_free(tsevq->mask);
        kfree(tsevq);
        debugfs_remove(ptp->debugfs_root);
@@ -247,6 +247,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
        if (!queue)
                goto no_memory_queue;
        list_add_tail(&queue->qlist, &ptp->tsevqs);
+       spin_lock_init(&ptp->tsevqs_lock);
        queue->mask = bitmap_alloc(PTP_MAX_CHANNELS, GFP_KERNEL);
        if (!queue->mask)
                goto no_memory_bitmap;
@@ -407,6 +408,7 @@ void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
 {
        struct timestamp_event_queue *tsevq;
        struct pps_event_time evt;
+       unsigned long flags;
 
        switch (event->type) {
 
@@ -415,10 +417,12 @@ void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
 
        case PTP_CLOCK_EXTTS:
                /* Enqueue timestamp on selected queues */
+               spin_lock_irqsave(&ptp->tsevqs_lock, flags);
                list_for_each_entry(tsevq, &ptp->tsevqs, qlist) {
                        if (test_bit((unsigned int)event->index, tsevq->mask))
                                enqueue_external_timestamp(tsevq, event);
                }
+               spin_unlock_irqrestore(&ptp->tsevqs_lock, flags);
                wake_up_interruptible(&ptp->tsev_wq);
                break;
 
index 52f87e394aa641ffb434fa98fd7c39b6a8d1fd07..35fde0a0574606a04d6bdf0ab42a204da5fa6532 100644 (file)
@@ -44,6 +44,7 @@ struct ptp_clock {
        struct pps_device *pps_source;
        long dialed_frequency; /* remembers the frequency adjustment */
        struct list_head tsevqs; /* timestamp fifo list */
+       spinlock_t tsevqs_lock; /* protects tsevqs from concurrent access */
        struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
        wait_queue_head_t tsev_wq;
        int defunct; /* tells readers to go away when clock is being removed */
index 8ebcddf91f7b78582ab3b182879477a7be4f5d38..4b956d661755d67f1eee61a91afacbf1a766582c 100644 (file)
@@ -173,8 +173,8 @@ config PWM_CLPS711X
          will be called pwm-clps711x.
 
 config PWM_CRC
-       bool "Intel Crystalcove (CRC) PWM support"
-       depends on X86 && INTEL_SOC_PMIC
+       tristate "Intel Crystalcove (CRC) PWM support"
+       depends on INTEL_SOC_PMIC
        help
          Generic PWM framework driver for Crystalcove (CRC) PMIC based PWM
          control.
@@ -186,9 +186,19 @@ config PWM_CROS_EC
          PWM driver for exposing a PWM attached to the ChromeOS Embedded
          Controller.
 
+config PWM_DWC_CORE
+       tristate
+       depends on HAS_IOMEM
+       help
+         PWM driver for Synopsys DWC PWM Controller.
+
+         To compile this driver as a module, build the dependecies as
+         modules, this will be called pwm-dwc-core.
+
 config PWM_DWC
-       tristate "DesignWare PWM Controller"
-       depends on PCI
+       tristate "DesignWare PWM Controller (PCI bus)"
+       depends on HAS_IOMEM && PCI
+       select PWM_DWC_CORE
        help
          PWM driver for Synopsys DWC PWM Controller attached to a PCI bus.
 
@@ -407,7 +417,7 @@ config PWM_MEDIATEK
 
 config PWM_MICROCHIP_CORE
        tristate "Microchip corePWM PWM support"
-       depends on SOC_MICROCHIP_POLARFIRE || COMPILE_TEST
+       depends on ARCH_MICROCHIP_POLARFIRE || COMPILE_TEST
        depends on HAS_IOMEM && OF
        help
          PWM driver for Microchip FPGA soft IP core.
index c822389c2a24c2dd76b106ebb64b4a215f522889..c5ec9e168ee7c5b467fdcb0226b479c083eb3d30 100644 (file)
@@ -15,6 +15,7 @@ obj-$(CONFIG_PWM_CLK)         += pwm-clk.o
 obj-$(CONFIG_PWM_CLPS711X)     += pwm-clps711x.o
 obj-$(CONFIG_PWM_CRC)          += pwm-crc.o
 obj-$(CONFIG_PWM_CROS_EC)      += pwm-cros-ec.o
+obj-$(CONFIG_PWM_DWC_CORE)     += pwm-dwc-core.o
 obj-$(CONFIG_PWM_DWC)          += pwm-dwc.o
 obj-$(CONFIG_PWM_EP93XX)       += pwm-ep93xx.o
 obj-$(CONFIG_PWM_FSL_FTM)      += pwm-fsl-ftm.o
index dc66e3405bf50bf10f128d01bacb899023cb7dfe..29078486534d40323015ca255c2b5d5328854b4f 100644 (file)
@@ -89,13 +89,13 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label)
        if (test_bit(PWMF_REQUESTED, &pwm->flags))
                return -EBUSY;
 
-       if (!try_module_get(pwm->chip->ops->owner))
+       if (!try_module_get(pwm->chip->owner))
                return -ENODEV;
 
        if (pwm->chip->ops->request) {
                err = pwm->chip->ops->request(pwm->chip, pwm);
                if (err) {
-                       module_put(pwm->chip->ops->owner);
+                       module_put(pwm->chip->owner);
                        return err;
                }
        }
@@ -208,36 +208,6 @@ static void of_pwmchip_remove(struct pwm_chip *chip)
                of_node_put(chip->dev->of_node);
 }
 
-/**
- * pwm_set_chip_data() - set private chip data for a PWM
- * @pwm: PWM device
- * @data: pointer to chip-specific data
- *
- * Returns: 0 on success or a negative error code on failure.
- */
-int pwm_set_chip_data(struct pwm_device *pwm, void *data)
-{
-       if (!pwm)
-               return -EINVAL;
-
-       pwm->chip_data = data;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(pwm_set_chip_data);
-
-/**
- * pwm_get_chip_data() - get private chip data for a PWM
- * @pwm: PWM device
- *
- * Returns: A pointer to the chip-private data for the PWM device.
- */
-void *pwm_get_chip_data(struct pwm_device *pwm)
-{
-       return pwm ? pwm->chip_data : NULL;
-}
-EXPORT_SYMBOL_GPL(pwm_get_chip_data);
-
 static bool pwm_ops_check(const struct pwm_chip *chip)
 {
        const struct pwm_ops *ops = chip->ops;
@@ -253,14 +223,16 @@ static bool pwm_ops_check(const struct pwm_chip *chip)
 }
 
 /**
- * pwmchip_add() - register a new PWM chip
+ * __pwmchip_add() - register a new PWM chip
  * @chip: the PWM chip to add
+ * @owner: reference to the module providing the chip.
  *
- * Register a new PWM chip.
+ * Register a new PWM chip. @owner is supposed to be THIS_MODULE, use the
+ * pwmchip_add wrapper to do this right.
  *
  * Returns: 0 on success or a negative error code on failure.
  */
-int pwmchip_add(struct pwm_chip *chip)
+int __pwmchip_add(struct pwm_chip *chip, struct module *owner)
 {
        struct pwm_device *pwm;
        unsigned int i;
@@ -272,6 +244,8 @@ int pwmchip_add(struct pwm_chip *chip)
        if (!pwm_ops_check(chip))
                return -EINVAL;
 
+       chip->owner = owner;
+
        chip->pwms = kcalloc(chip->npwm, sizeof(*pwm), GFP_KERNEL);
        if (!chip->pwms)
                return -ENOMEM;
@@ -306,7 +280,7 @@ int pwmchip_add(struct pwm_chip *chip)
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(pwmchip_add);
+EXPORT_SYMBOL_GPL(__pwmchip_add);
 
 /**
  * pwmchip_remove() - remove a PWM chip
@@ -338,17 +312,17 @@ static void devm_pwmchip_remove(void *data)
        pwmchip_remove(chip);
 }
 
-int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip)
+int __devm_pwmchip_add(struct device *dev, struct pwm_chip *chip, struct module *owner)
 {
        int ret;
 
-       ret = pwmchip_add(chip);
+       ret = __pwmchip_add(chip, owner);
        if (ret)
                return ret;
 
        return devm_add_action_or_reset(dev, devm_pwmchip_remove, chip);
 }
-EXPORT_SYMBOL_GPL(devm_pwmchip_add);
+EXPORT_SYMBOL_GPL(__devm_pwmchip_add);
 
 /**
  * pwm_request_from_chip() - request a PWM device relative to a PWM chip
@@ -976,10 +950,9 @@ void pwm_put(struct pwm_device *pwm)
        if (pwm->chip->ops->free)
                pwm->chip->ops->free(pwm->chip, pwm);
 
-       pwm_set_chip_data(pwm, NULL);
        pwm->label = NULL;
 
-       module_put(pwm->chip->ops->owner);
+       module_put(pwm->chip->owner);
 out:
        mutex_unlock(&pwm_lock);
 }
index 583a7d69c7415173caca43124b70e8491008a17d..670d33daea846bb8c4cc183438c270d2fb403c0c 100644 (file)
@@ -181,7 +181,6 @@ static int ab8500_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
 static const struct pwm_ops ab8500_pwm_ops = {
        .apply = ab8500_pwm_apply,
        .get_state = ab8500_pwm_get_state,
-       .owner = THIS_MODULE,
 };
 
 static int ab8500_pwm_probe(struct platform_device *pdev)
index 8e7d67fb5fbefd84e32c7437335784e5b6238e0b..4d755b628d9e79650d5e71d5ab912a7995697312 100644 (file)
@@ -99,7 +99,6 @@ static int apple_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
 static const struct pwm_ops apple_pwm_ops = {
        .apply = apple_pwm_apply,
        .get_state = apple_pwm_get_state,
-       .owner = THIS_MODULE,
 };
 
 static int apple_pwm_probe(struct platform_device *pdev)
index e271d920151e471c8d6622841c7b432557da8c46..07920e0347575aeb420c303c93fdd3ae99dc8448 100644 (file)
@@ -170,7 +170,6 @@ static int atmel_hlcdc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 
 static const struct pwm_ops atmel_hlcdc_pwm_ops = {
        .apply = atmel_hlcdc_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_at91sam9x5_errata = {
index c00dd37c5fbd86b9dd060d5c16757348e145a281..98b33c016c3c76b8a9bd4cd2492b5871a33690aa 100644 (file)
@@ -364,7 +364,6 @@ static const struct pwm_ops atmel_tcb_pwm_ops = {
        .request = atmel_tcb_pwm_request,
        .free = atmel_tcb_pwm_free,
        .apply = atmel_tcb_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static struct atmel_tcb_config tcb_rm9200_config = {
index 1f73325d1bea0a3fe2669934af3930a2cce2f233..47bcc8a3bf9d50c48cf80f29e1b03defdb388e4c 100644 (file)
@@ -402,7 +402,6 @@ static int atmel_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
 static const struct pwm_ops atmel_pwm_ops = {
        .apply = atmel_pwm_apply,
        .get_state = atmel_pwm_get_state,
-       .owner = THIS_MODULE,
 };
 
 static const struct atmel_pwm_data atmel_sam9rl_pwm_data = {
@@ -547,7 +546,7 @@ disable_clk:
 static struct platform_driver atmel_pwm_driver = {
        .driver = {
                .name = "atmel-pwm",
-               .of_match_table = of_match_ptr(atmel_pwm_dt_ids),
+               .of_match_table = atmel_pwm_dt_ids,
        },
        .probe = atmel_pwm_probe,
 };
index 7d70b6f186a6f8a459d9e4e6c5693ae248f477ed..758254025683d5cf2063d27b47d938cbed2877b2 100644 (file)
@@ -183,7 +183,6 @@ static int iproc_pwmc_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 static const struct pwm_ops iproc_pwm_ops = {
        .apply = iproc_pwmc_apply,
        .get_state = iproc_pwmc_get_state,
-       .owner = THIS_MODULE,
 };
 
 static int iproc_pwmc_probe(struct platform_device *pdev)
@@ -207,18 +206,10 @@ static int iproc_pwmc_probe(struct platform_device *pdev)
        if (IS_ERR(ip->base))
                return PTR_ERR(ip->base);
 
-       ip->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(ip->clk)) {
-               dev_err(&pdev->dev, "failed to get clock: %ld\n",
-                       PTR_ERR(ip->clk));
-               return PTR_ERR(ip->clk);
-       }
-
-       ret = clk_prepare_enable(ip->clk);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "failed to enable clock: %d\n", ret);
-               return ret;
-       }
+       ip->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+       if (IS_ERR(ip->clk))
+               return dev_err_probe(&pdev->dev, PTR_ERR(ip->clk),
+                                    "failed to get clock\n");
 
        /* Set full drive and normal polarity for all channels */
        value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
@@ -230,22 +221,12 @@ static int iproc_pwmc_probe(struct platform_device *pdev)
 
        writel(value, ip->base + IPROC_PWM_CTRL_OFFSET);
 
-       ret = pwmchip_add(&ip->chip);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
-               clk_disable_unprepare(ip->clk);
-       }
-
-       return ret;
-}
-
-static void iproc_pwmc_remove(struct platform_device *pdev)
-{
-       struct iproc_pwmc *ip = platform_get_drvdata(pdev);
+       ret = devm_pwmchip_add(&pdev->dev, &ip->chip);
+       if (ret < 0)
+               return dev_err_probe(&pdev->dev, ret,
+                                    "failed to add PWM chip\n");
 
-       pwmchip_remove(&ip->chip);
-
-       clk_disable_unprepare(ip->clk);
+       return 0;
 }
 
 static const struct of_device_id bcm_iproc_pwmc_dt[] = {
@@ -260,7 +241,6 @@ static struct platform_driver iproc_pwmc_driver = {
                .of_match_table = bcm_iproc_pwmc_dt,
        },
        .probe = iproc_pwmc_probe,
-       .remove_new = iproc_pwmc_remove,
 };
 module_platform_driver(iproc_pwmc_driver);
 
index e5b00cc9f7a762d4b4fe1b2c753c62aec394f72a..15d6ed03c3ce05a58db5d65e8fb1f3532b35635f 100644 (file)
@@ -269,7 +269,6 @@ static int kona_pwmc_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 
 static const struct pwm_ops kona_pwm_ops = {
        .apply = kona_pwmc_apply,
-       .owner = THIS_MODULE,
 };
 
 static int kona_pwmc_probe(struct platform_device *pdev)
index bdfc2a5ec0d6929f9f09ddeb25766485c6771199..9777babd5b95cd9fc3a4388745840ab1a7e2bbdc 100644 (file)
@@ -129,7 +129,6 @@ static const struct pwm_ops bcm2835_pwm_ops = {
        .request = bcm2835_pwm_request,
        .free = bcm2835_pwm_free,
        .apply = bcm2835_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static int bcm2835_pwm_probe(struct platform_device *pdev)
@@ -147,41 +146,42 @@ static int bcm2835_pwm_probe(struct platform_device *pdev)
        if (IS_ERR(pc->base))
                return PTR_ERR(pc->base);
 
-       pc->clk = devm_clk_get(&pdev->dev, NULL);
+       pc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
        if (IS_ERR(pc->clk))
                return dev_err_probe(&pdev->dev, PTR_ERR(pc->clk),
                                     "clock not found\n");
 
-       ret = clk_prepare_enable(pc->clk);
-       if (ret)
-               return ret;
-
        pc->chip.dev = &pdev->dev;
        pc->chip.ops = &bcm2835_pwm_ops;
        pc->chip.npwm = 2;
 
-       platform_set_drvdata(pdev, pc);
-
-       ret = pwmchip_add(&pc->chip);
+       ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
        if (ret < 0)
-               goto add_fail;
+               return dev_err_probe(&pdev->dev, ret,
+                                    "failed to add pwmchip\n");
 
        return 0;
+}
+
+static int bcm2835_pwm_suspend(struct device *dev)
+{
+       struct bcm2835_pwm *pc = dev_get_drvdata(dev);
 
-add_fail:
        clk_disable_unprepare(pc->clk);
-       return ret;
+
+       return 0;
 }
 
-static void bcm2835_pwm_remove(struct platform_device *pdev)
+static int bcm2835_pwm_resume(struct device *dev)
 {
-       struct bcm2835_pwm *pc = platform_get_drvdata(pdev);
-
-       pwmchip_remove(&pc->chip);
+       struct bcm2835_pwm *pc = dev_get_drvdata(dev);
 
-       clk_disable_unprepare(pc->clk);
+       return clk_prepare_enable(pc->clk);
 }
 
+static DEFINE_SIMPLE_DEV_PM_OPS(bcm2835_pwm_pm_ops, bcm2835_pwm_suspend,
+                               bcm2835_pwm_resume);
+
 static const struct of_device_id bcm2835_pwm_of_match[] = {
        { .compatible = "brcm,bcm2835-pwm", },
        { /* sentinel */ }
@@ -192,9 +192,9 @@ static struct platform_driver bcm2835_pwm_driver = {
        .driver = {
                .name = "bcm2835-pwm",
                .of_match_table = bcm2835_pwm_of_match,
+               .pm = pm_ptr(&bcm2835_pwm_pm_ops),
        },
        .probe = bcm2835_pwm_probe,
-       .remove_new = bcm2835_pwm_remove,
 };
 module_platform_driver(bcm2835_pwm_driver);
 
index 0971c666afd134f28e343b7218e4dbb2a947d88f..ba2d799917695fc4d767160049dfe45a4dca83d3 100644 (file)
@@ -39,6 +39,8 @@
 #define BERLIN_PWM_TCNT                        0xc
 #define  BERLIN_PWM_MAX_TCNT           65535
 
+#define BERLIN_PWM_NUMPWMS             4
+
 struct berlin_pwm_channel {
        u32 enable;
        u32 ctrl;
@@ -50,6 +52,7 @@ struct berlin_pwm_chip {
        struct pwm_chip chip;
        struct clk *clk;
        void __iomem *base;
+       struct berlin_pwm_channel channel[BERLIN_PWM_NUMPWMS];
 };
 
 static inline struct berlin_pwm_chip *to_berlin_pwm_chip(struct pwm_chip *chip)
@@ -70,24 +73,6 @@ static inline void berlin_pwm_writel(struct berlin_pwm_chip *bpc,
        writel_relaxed(value, bpc->base + channel * 0x10 + offset);
 }
 
-static int berlin_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
-{
-       struct berlin_pwm_channel *channel;
-
-       channel = kzalloc(sizeof(*channel), GFP_KERNEL);
-       if (!channel)
-               return -ENOMEM;
-
-       return pwm_set_chip_data(pwm, channel);
-}
-
-static void berlin_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
-{
-       struct berlin_pwm_channel *channel = pwm_get_chip_data(pwm);
-
-       kfree(channel);
-}
-
 static int berlin_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
                             u64 duty_ns, u64 period_ns)
 {
@@ -202,10 +187,7 @@ static int berlin_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 }
 
 static const struct pwm_ops berlin_pwm_ops = {
-       .request = berlin_pwm_request,
-       .free = berlin_pwm_free,
        .apply = berlin_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static const struct of_device_id berlin_pwm_match[] = {
@@ -227,39 +209,23 @@ static int berlin_pwm_probe(struct platform_device *pdev)
        if (IS_ERR(bpc->base))
                return PTR_ERR(bpc->base);
 
-       bpc->clk = devm_clk_get(&pdev->dev, NULL);
+       bpc->clk = devm_clk_get_enabled(&pdev->dev, NULL);
        if (IS_ERR(bpc->clk))
                return PTR_ERR(bpc->clk);
 
-       ret = clk_prepare_enable(bpc->clk);
-       if (ret)
-               return ret;
-
        bpc->chip.dev = &pdev->dev;
        bpc->chip.ops = &berlin_pwm_ops;
-       bpc->chip.npwm = 4;
+       bpc->chip.npwm = BERLIN_PWM_NUMPWMS;
 
-       ret = pwmchip_add(&bpc->chip);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
-               clk_disable_unprepare(bpc->clk);
-               return ret;
-       }
+       ret = devm_pwmchip_add(&pdev->dev, &bpc->chip);
+       if (ret < 0)
+               return dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
 
        platform_set_drvdata(pdev, bpc);
 
        return 0;
 }
 
-static void berlin_pwm_remove(struct platform_device *pdev)
-{
-       struct berlin_pwm_chip *bpc = platform_get_drvdata(pdev);
-
-       pwmchip_remove(&bpc->chip);
-
-       clk_disable_unprepare(bpc->clk);
-}
-
 #ifdef CONFIG_PM_SLEEP
 static int berlin_pwm_suspend(struct device *dev)
 {
@@ -267,11 +233,7 @@ static int berlin_pwm_suspend(struct device *dev)
        unsigned int i;
 
        for (i = 0; i < bpc->chip.npwm; i++) {
-               struct berlin_pwm_channel *channel;
-
-               channel = pwm_get_chip_data(&bpc->chip.pwms[i]);
-               if (!channel)
-                       continue;
+               struct berlin_pwm_channel *channel = &bpc->channel[i];
 
                channel->enable = berlin_pwm_readl(bpc, i, BERLIN_PWM_ENABLE);
                channel->ctrl = berlin_pwm_readl(bpc, i, BERLIN_PWM_CONTROL);
@@ -295,11 +257,7 @@ static int berlin_pwm_resume(struct device *dev)
                return ret;
 
        for (i = 0; i < bpc->chip.npwm; i++) {
-               struct berlin_pwm_channel *channel;
-
-               channel = pwm_get_chip_data(&bpc->chip.pwms[i]);
-               if (!channel)
-                       continue;
+               struct berlin_pwm_channel *channel = &bpc->channel[i];
 
                berlin_pwm_writel(bpc, i, channel->ctrl, BERLIN_PWM_CONTROL);
                berlin_pwm_writel(bpc, i, channel->duty, BERLIN_PWM_DUTY);
@@ -316,7 +274,6 @@ static SIMPLE_DEV_PM_OPS(berlin_pwm_pm_ops, berlin_pwm_suspend,
 
 static struct platform_driver berlin_pwm_driver = {
        .probe = berlin_pwm_probe,
-       .remove_new = berlin_pwm_remove,
        .driver = {
                .name = "berlin-pwm",
                .of_match_table = berlin_pwm_match,
index a3faa9a3de7ccfa6dc4c82253de3d8ef1b0fb4ef..b723c2d4f485c762f8e301bae794c4c86c971303 100644 (file)
@@ -220,7 +220,6 @@ static int brcmstb_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 
 static const struct pwm_ops brcmstb_pwm_ops = {
        .apply = brcmstb_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static const struct of_device_id brcmstb_pwm_of_match[] = {
@@ -238,17 +237,10 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
        if (!p)
                return -ENOMEM;
 
-       p->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(p->clk)) {
-               dev_err(&pdev->dev, "failed to obtain clock\n");
-               return PTR_ERR(p->clk);
-       }
-
-       ret = clk_prepare_enable(p->clk);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "failed to enable clock: %d\n", ret);
-               return ret;
-       }
+       p->clk = devm_clk_get_enabled(&pdev->dev, NULL);
+       if (IS_ERR(p->clk))
+               return dev_err_probe(&pdev->dev, PTR_ERR(p->clk),
+                                    "failed to obtain clock\n");
 
        platform_set_drvdata(pdev, p);
 
@@ -257,30 +249,14 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
        p->chip.npwm = 2;
 
        p->base = devm_platform_ioremap_resource(pdev, 0);
-       if (IS_ERR(p->base)) {
-               ret = PTR_ERR(p->base);
-               goto out_clk;
-       }
+       if (IS_ERR(p->base))
+               return PTR_ERR(p->base);
 
-       ret = pwmchip_add(&p->chip);
-       if (ret) {
-               dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
-               goto out_clk;
-       }
+       ret = devm_pwmchip_add(&pdev->dev, &p->chip);
+       if (ret)
+               return dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
 
        return 0;
-
-out_clk:
-       clk_disable_unprepare(p->clk);
-       return ret;
-}
-
-static void brcmstb_pwm_remove(struct platform_device *pdev)
-{
-       struct brcmstb_pwm *p = platform_get_drvdata(pdev);
-
-       pwmchip_remove(&p->chip);
-       clk_disable_unprepare(p->clk);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -288,7 +264,7 @@ static int brcmstb_pwm_suspend(struct device *dev)
 {
        struct brcmstb_pwm *p = dev_get_drvdata(dev);
 
-       clk_disable(p->clk);
+       clk_disable_unprepare(p->clk);
 
        return 0;
 }
@@ -297,9 +273,7 @@ static int brcmstb_pwm_resume(struct device *dev)
 {
        struct brcmstb_pwm *p = dev_get_drvdata(dev);
 
-       clk_enable(p->clk);
-
-       return 0;
+       return clk_prepare_enable(p->clk);
 }
 #endif
 
@@ -308,7 +282,6 @@ static SIMPLE_DEV_PM_OPS(brcmstb_pwm_pm_ops, brcmstb_pwm_suspend,
 
 static struct platform_driver brcmstb_pwm_driver = {
        .probe = brcmstb_pwm_probe,
-       .remove_new = brcmstb_pwm_remove,
        .driver = {
                .name = "pwm-brcmstb",
                .of_match_table = brcmstb_pwm_of_match,
index 0ee4d2aee4df0c31d7878e1527d690f13557e335..9dd88b386907cf87a4b7ebc47055f2c84391da0d 100644 (file)
@@ -77,7 +77,6 @@ static int pwm_clk_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 
 static const struct pwm_ops pwm_clk_ops = {
        .apply = pwm_clk_apply,
-       .owner = THIS_MODULE,
 };
 
 static int pwm_clk_probe(struct platform_device *pdev)
index b0d91142da8d07a2792f9ca6542a67ac7a0ed8be..42179b3f7ec399a22f3db1adc397309e0be9208f 100644 (file)
@@ -72,7 +72,6 @@ static int clps711x_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 static const struct pwm_ops clps711x_pwm_ops = {
        .request = clps711x_pwm_request,
        .apply = clps711x_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static struct pwm_device *clps711x_pwm_xlate(struct pwm_chip *chip,
index b9f063dc6b5fd9c70631cfd473f80db748924d46..2b0b659eee9797520587ada0537abb2c75329d42 100644 (file)
@@ -184,5 +184,8 @@ static struct platform_driver crystalcove_pwm_driver = {
                .name = "crystal_cove_pwm",
        },
 };
+module_platform_driver(crystalcove_pwm_driver);
 
-builtin_platform_driver(crystalcove_pwm_driver);
+MODULE_ALIAS("platform:crystal_cove_pwm");
+MODULE_DESCRIPTION("Intel Crystalcove (CRC) PWM support");
+MODULE_LICENSE("GPL");
index baaac0c33aa0687a1a44fa7763b468417670a4e2..4fbd23e4ef693c40ba73bf84ed4851903c8b67bd 100644 (file)
  * @ec: Pointer to EC device
  * @chip: PWM controller chip
  * @use_pwm_type: Use PWM types instead of generic channels
+ * @channel: array with per-channel data
  */
 struct cros_ec_pwm_device {
        struct device *dev;
        struct cros_ec_device *ec;
        struct pwm_chip chip;
        bool use_pwm_type;
+       struct cros_ec_pwm *channel;
 };
 
 /**
@@ -43,26 +45,6 @@ static inline struct cros_ec_pwm_device *pwm_to_cros_ec_pwm(struct pwm_chip *chi
        return container_of(chip, struct cros_ec_pwm_device, chip);
 }
 
-static int cros_ec_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
-{
-       struct cros_ec_pwm *channel;
-
-       channel = kzalloc(sizeof(*channel), GFP_KERNEL);
-       if (!channel)
-               return -ENOMEM;
-
-       pwm_set_chip_data(pwm, channel);
-
-       return 0;
-}
-
-static void cros_ec_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
-{
-       struct cros_ec_pwm *channel = pwm_get_chip_data(pwm);
-
-       kfree(channel);
-}
-
 static int cros_ec_dt_type_to_pwm_type(u8 dt_index, u8 *pwm_type)
 {
        switch (dt_index) {
@@ -158,7 +140,7 @@ static int cros_ec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                             const struct pwm_state *state)
 {
        struct cros_ec_pwm_device *ec_pwm = pwm_to_cros_ec_pwm(chip);
-       struct cros_ec_pwm *channel = pwm_get_chip_data(pwm);
+       struct cros_ec_pwm *channel = &ec_pwm->channel[pwm->hwpwm];
        u16 duty_cycle;
        int ret;
 
@@ -188,7 +170,7 @@ static int cros_ec_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
                                 struct pwm_state *state)
 {
        struct cros_ec_pwm_device *ec_pwm = pwm_to_cros_ec_pwm(chip);
-       struct cros_ec_pwm *channel = pwm_get_chip_data(pwm);
+       struct cros_ec_pwm *channel = &ec_pwm->channel[pwm->hwpwm];
        int ret;
 
        ret = cros_ec_pwm_get_duty(ec_pwm, pwm->hwpwm);
@@ -237,11 +219,8 @@ cros_ec_pwm_xlate(struct pwm_chip *chip, const struct of_phandle_args *args)
 }
 
 static const struct pwm_ops cros_ec_pwm_ops = {
-       .request = cros_ec_pwm_request,
-       .free = cros_ec_pwm_free,
        .get_state      = cros_ec_pwm_get_state,
        .apply          = cros_ec_pwm_apply,
-       .owner          = THIS_MODULE,
 };
 
 /*
@@ -286,10 +265,8 @@ static int cros_ec_pwm_probe(struct platform_device *pdev)
        struct pwm_chip *chip;
        int ret;
 
-       if (!ec) {
-               dev_err(dev, "no parent EC device\n");
-               return -EINVAL;
-       }
+       if (!ec)
+               return dev_err_probe(dev, -EINVAL, "no parent EC device\n");
 
        ec_pwm = devm_kzalloc(dev, sizeof(*ec_pwm), GFP_KERNEL);
        if (!ec_pwm)
@@ -310,32 +287,23 @@ static int cros_ec_pwm_probe(struct platform_device *pdev)
                chip->npwm = CROS_EC_PWM_DT_COUNT;
        } else {
                ret = cros_ec_num_pwms(ec_pwm);
-               if (ret < 0) {
-                       dev_err(dev, "Couldn't find PWMs: %d\n", ret);
-                       return ret;
-               }
+               if (ret < 0)
+                       return dev_err_probe(dev, ret, "Couldn't find PWMs\n");
                chip->npwm = ret;
        }
 
-       dev_dbg(dev, "Probed %u PWMs\n", chip->npwm);
-
-       ret = pwmchip_add(chip);
-       if (ret < 0) {
-               dev_err(dev, "cannot register PWM: %d\n", ret);
-               return ret;
-       }
-
-       platform_set_drvdata(pdev, ec_pwm);
+       ec_pwm->channel = devm_kcalloc(dev, chip->npwm, sizeof(*ec_pwm->channel),
+                                       GFP_KERNEL);
+       if (!ec_pwm->channel)
+               return -ENOMEM;
 
-       return ret;
-}
+       dev_dbg(dev, "Probed %u PWMs\n", chip->npwm);
 
-static void cros_ec_pwm_remove(struct platform_device *dev)
-{
-       struct cros_ec_pwm_device *ec_pwm = platform_get_drvdata(dev);
-       struct pwm_chip *chip = &ec_pwm->chip;
+       ret = devm_pwmchip_add(dev, chip);
+       if (ret < 0)
+               return dev_err_probe(dev, ret, "cannot register PWM\n");
 
-       pwmchip_remove(chip);
+       return 0;
 }
 
 #ifdef CONFIG_OF
@@ -349,7 +317,6 @@ MODULE_DEVICE_TABLE(of, cros_ec_pwm_of_match);
 
 static struct platform_driver cros_ec_pwm_driver = {
        .probe = cros_ec_pwm_probe,
-       .remove_new = cros_ec_pwm_remove,
        .driver = {
                .name = "cros-ec-pwm",
                .of_match_table = of_match_ptr(cros_ec_pwm_of_match),
diff --git a/drivers/pwm/pwm-dwc-core.c b/drivers/pwm/pwm-dwc-core.c
new file mode 100644 (file)
index 0000000..ea63dd7
--- /dev/null
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DesignWare PWM Controller driver core
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ *
+ * Author: Felipe Balbi (Intel)
+ * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+ * Author: Raymond Tan <raymond.tan@intel.com>
+ */
+
+#define DEFAULT_SYMBOL_NAMESPACE dwc_pwm
+
+#include <linux/bitops.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/pwm.h>
+
+#include "pwm-dwc.h"
+
+static void __dwc_pwm_set_enable(struct dwc_pwm *dwc, int pwm, int enabled)
+{
+       u32 reg;
+
+       reg = dwc_pwm_readl(dwc, DWC_TIM_CTRL(pwm));
+
+       if (enabled)
+               reg |= DWC_TIM_CTRL_EN;
+       else
+               reg &= ~DWC_TIM_CTRL_EN;
+
+       dwc_pwm_writel(dwc, reg, DWC_TIM_CTRL(pwm));
+}
+
+static int __dwc_pwm_configure_timer(struct dwc_pwm *dwc,
+                                    struct pwm_device *pwm,
+                                    const struct pwm_state *state)
+{
+       u64 tmp;
+       u32 ctrl;
+       u32 high;
+       u32 low;
+
+       /*
+        * Calculate width of low and high period in terms of input clock
+        * periods and check are the result within HW limits between 1 and
+        * 2^32 periods.
+        */
+       tmp = DIV_ROUND_CLOSEST_ULL(state->duty_cycle, dwc->clk_ns);
+       if (tmp < 1 || tmp > (1ULL << 32))
+               return -ERANGE;
+       low = tmp - 1;
+
+       tmp = DIV_ROUND_CLOSEST_ULL(state->period - state->duty_cycle,
+                                   dwc->clk_ns);
+       if (tmp < 1 || tmp > (1ULL << 32))
+               return -ERANGE;
+       high = tmp - 1;
+
+       /*
+        * Specification says timer usage flow is to disable timer, then
+        * program it followed by enable. It also says Load Count is loaded
+        * into timer after it is enabled - either after a disable or
+        * a reset. Based on measurements it happens also without disable
+        * whenever Load Count is updated. But follow the specification.
+        */
+       __dwc_pwm_set_enable(dwc, pwm->hwpwm, false);
+
+       /*
+        * Write Load Count and Load Count 2 registers. Former defines the
+        * width of low period and latter the width of high period in terms
+        * multiple of input clock periods:
+        * Width = ((Count + 1) * input clock period).
+        */
+       dwc_pwm_writel(dwc, low, DWC_TIM_LD_CNT(pwm->hwpwm));
+       dwc_pwm_writel(dwc, high, DWC_TIM_LD_CNT2(pwm->hwpwm));
+
+       /*
+        * Set user-defined mode, timer reloads from Load Count registers
+        * when it counts down to 0.
+        * Set PWM mode, it makes output to toggle and width of low and high
+        * periods are set by Load Count registers.
+        */
+       ctrl = DWC_TIM_CTRL_MODE_USER | DWC_TIM_CTRL_PWM;
+       dwc_pwm_writel(dwc, ctrl, DWC_TIM_CTRL(pwm->hwpwm));
+
+       /*
+        * Enable timer. Output starts from low period.
+        */
+       __dwc_pwm_set_enable(dwc, pwm->hwpwm, state->enabled);
+
+       return 0;
+}
+
+static int dwc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+                        const struct pwm_state *state)
+{
+       struct dwc_pwm *dwc = to_dwc_pwm(chip);
+
+       if (state->polarity != PWM_POLARITY_INVERSED)
+               return -EINVAL;
+
+       if (state->enabled) {
+               if (!pwm->state.enabled)
+                       pm_runtime_get_sync(chip->dev);
+               return __dwc_pwm_configure_timer(dwc, pwm, state);
+       } else {
+               if (pwm->state.enabled) {
+                       __dwc_pwm_set_enable(dwc, pwm->hwpwm, false);
+                       pm_runtime_put_sync(chip->dev);
+               }
+       }
+
+       return 0;
+}
+
+static int dwc_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+                            struct pwm_state *state)
+{
+       struct dwc_pwm *dwc = to_dwc_pwm(chip);
+       u64 duty, period;
+       u32 ctrl, ld, ld2;
+
+       pm_runtime_get_sync(chip->dev);
+
+       ctrl = dwc_pwm_readl(dwc, DWC_TIM_CTRL(pwm->hwpwm));
+       ld = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT(pwm->hwpwm));
+       ld2 = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT2(pwm->hwpwm));
+
+       state->enabled = !!(ctrl & DWC_TIM_CTRL_EN);
+
+       /*
+        * If we're not in PWM, technically the output is a 50-50
+        * based on the timer load-count only.
+        */
+       if (ctrl & DWC_TIM_CTRL_PWM) {
+               duty = (ld + 1) * dwc->clk_ns;
+               period = (ld2 + 1)  * dwc->clk_ns;
+               period += duty;
+       } else {
+               duty = (ld + 1) * dwc->clk_ns;
+               period = duty * 2;
+       }
+
+       state->polarity = PWM_POLARITY_INVERSED;
+       state->period = period;
+       state->duty_cycle = duty;
+
+       pm_runtime_put_sync(chip->dev);
+
+       return 0;
+}
+
+static const struct pwm_ops dwc_pwm_ops = {
+       .apply = dwc_pwm_apply,
+       .get_state = dwc_pwm_get_state,
+};
+
+struct dwc_pwm *dwc_pwm_alloc(struct device *dev)
+{
+       struct dwc_pwm *dwc;
+
+       dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
+       if (!dwc)
+               return NULL;
+
+       dwc->clk_ns = 10;
+       dwc->chip.dev = dev;
+       dwc->chip.ops = &dwc_pwm_ops;
+       dwc->chip.npwm = DWC_TIMERS_TOTAL;
+
+       dev_set_drvdata(dev, dwc);
+       return dwc;
+}
+EXPORT_SYMBOL_GPL(dwc_pwm_alloc);
+
+MODULE_AUTHOR("Felipe Balbi (Intel)");
+MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
+MODULE_AUTHOR("Raymond Tan <raymond.tan@intel.com>");
+MODULE_DESCRIPTION("DesignWare PWM Controller");
+MODULE_LICENSE("GPL");
index 3bbb26c862c35c6f866d6129c2a488ff9979cab6..bd9cadb497d70e34b1b3eb974aa125bf6d44ab8d 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * DesignWare PWM Controller driver
+ * DesignWare PWM Controller driver (PCI part)
  *
  * Copyright (C) 2018-2020 Intel Corporation
  *
@@ -13,6 +13,8 @@
  *   periods are one or more input clock periods long.
  */
 
+#define DEFAULT_MOUDLE_NAMESPACE dwc_pwm
+
 #include <linux/bitops.h>
 #include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/pm_runtime.h>
 #include <linux/pwm.h>
 
-#define DWC_TIM_LD_CNT(n)      ((n) * 0x14)
-#define DWC_TIM_LD_CNT2(n)     (((n) * 4) + 0xb0)
-#define DWC_TIM_CUR_VAL(n)     (((n) * 0x14) + 0x04)
-#define DWC_TIM_CTRL(n)                (((n) * 0x14) + 0x08)
-#define DWC_TIM_EOI(n)         (((n) * 0x14) + 0x0c)
-#define DWC_TIM_INT_STS(n)     (((n) * 0x14) + 0x10)
-
-#define DWC_TIMERS_INT_STS     0xa0
-#define DWC_TIMERS_EOI         0xa4
-#define DWC_TIMERS_RAW_INT_STS 0xa8
-#define DWC_TIMERS_COMP_VERSION        0xac
-
-#define DWC_TIMERS_TOTAL       8
-#define DWC_CLK_PERIOD_NS      10
-
-/* Timer Control Register */
-#define DWC_TIM_CTRL_EN                BIT(0)
-#define DWC_TIM_CTRL_MODE      BIT(1)
-#define DWC_TIM_CTRL_MODE_FREE (0 << 1)
-#define DWC_TIM_CTRL_MODE_USER (1 << 1)
-#define DWC_TIM_CTRL_INT_MASK  BIT(2)
-#define DWC_TIM_CTRL_PWM       BIT(3)
-
-struct dwc_pwm_ctx {
-       u32 cnt;
-       u32 cnt2;
-       u32 ctrl;
-};
-
-struct dwc_pwm {
-       struct pwm_chip chip;
-       void __iomem *base;
-       struct dwc_pwm_ctx ctx[DWC_TIMERS_TOTAL];
-};
-#define to_dwc_pwm(p)  (container_of((p), struct dwc_pwm, chip))
-
-static inline u32 dwc_pwm_readl(struct dwc_pwm *dwc, u32 offset)
-{
-       return readl(dwc->base + offset);
-}
-
-static inline void dwc_pwm_writel(struct dwc_pwm *dwc, u32 value, u32 offset)
-{
-       writel(value, dwc->base + offset);
-}
-
-static void __dwc_pwm_set_enable(struct dwc_pwm *dwc, int pwm, int enabled)
-{
-       u32 reg;
-
-       reg = dwc_pwm_readl(dwc, DWC_TIM_CTRL(pwm));
-
-       if (enabled)
-               reg |= DWC_TIM_CTRL_EN;
-       else
-               reg &= ~DWC_TIM_CTRL_EN;
-
-       dwc_pwm_writel(dwc, reg, DWC_TIM_CTRL(pwm));
-}
-
-static int __dwc_pwm_configure_timer(struct dwc_pwm *dwc,
-                                    struct pwm_device *pwm,
-                                    const struct pwm_state *state)
-{
-       u64 tmp;
-       u32 ctrl;
-       u32 high;
-       u32 low;
-
-       /*
-        * Calculate width of low and high period in terms of input clock
-        * periods and check are the result within HW limits between 1 and
-        * 2^32 periods.
-        */
-       tmp = DIV_ROUND_CLOSEST_ULL(state->duty_cycle, DWC_CLK_PERIOD_NS);
-       if (tmp < 1 || tmp > (1ULL << 32))
-               return -ERANGE;
-       low = tmp - 1;
-
-       tmp = DIV_ROUND_CLOSEST_ULL(state->period - state->duty_cycle,
-                                   DWC_CLK_PERIOD_NS);
-       if (tmp < 1 || tmp > (1ULL << 32))
-               return -ERANGE;
-       high = tmp - 1;
-
-       /*
-        * Specification says timer usage flow is to disable timer, then
-        * program it followed by enable. It also says Load Count is loaded
-        * into timer after it is enabled - either after a disable or
-        * a reset. Based on measurements it happens also without disable
-        * whenever Load Count is updated. But follow the specification.
-        */
-       __dwc_pwm_set_enable(dwc, pwm->hwpwm, false);
-
-       /*
-        * Write Load Count and Load Count 2 registers. Former defines the
-        * width of low period and latter the width of high period in terms
-        * multiple of input clock periods:
-        * Width = ((Count + 1) * input clock period).
-        */
-       dwc_pwm_writel(dwc, low, DWC_TIM_LD_CNT(pwm->hwpwm));
-       dwc_pwm_writel(dwc, high, DWC_TIM_LD_CNT2(pwm->hwpwm));
-
-       /*
-        * Set user-defined mode, timer reloads from Load Count registers
-        * when it counts down to 0.
-        * Set PWM mode, it makes output to toggle and width of low and high
-        * periods are set by Load Count registers.
-        */
-       ctrl = DWC_TIM_CTRL_MODE_USER | DWC_TIM_CTRL_PWM;
-       dwc_pwm_writel(dwc, ctrl, DWC_TIM_CTRL(pwm->hwpwm));
-
-       /*
-        * Enable timer. Output starts from low period.
-        */
-       __dwc_pwm_set_enable(dwc, pwm->hwpwm, state->enabled);
-
-       return 0;
-}
-
-static int dwc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
-                        const struct pwm_state *state)
-{
-       struct dwc_pwm *dwc = to_dwc_pwm(chip);
-
-       if (state->polarity != PWM_POLARITY_INVERSED)
-               return -EINVAL;
-
-       if (state->enabled) {
-               if (!pwm->state.enabled)
-                       pm_runtime_get_sync(chip->dev);
-               return __dwc_pwm_configure_timer(dwc, pwm, state);
-       } else {
-               if (pwm->state.enabled) {
-                       __dwc_pwm_set_enable(dwc, pwm->hwpwm, false);
-                       pm_runtime_put_sync(chip->dev);
-               }
-       }
-
-       return 0;
-}
-
-static int dwc_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
-                            struct pwm_state *state)
-{
-       struct dwc_pwm *dwc = to_dwc_pwm(chip);
-       u64 duty, period;
-
-       pm_runtime_get_sync(chip->dev);
-
-       state->enabled = !!(dwc_pwm_readl(dwc,
-                               DWC_TIM_CTRL(pwm->hwpwm)) & DWC_TIM_CTRL_EN);
-
-       duty = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT(pwm->hwpwm));
-       duty += 1;
-       duty *= DWC_CLK_PERIOD_NS;
-       state->duty_cycle = duty;
-
-       period = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT2(pwm->hwpwm));
-       period += 1;
-       period *= DWC_CLK_PERIOD_NS;
-       period += duty;
-       state->period = period;
-
-       state->polarity = PWM_POLARITY_INVERSED;
-
-       pm_runtime_put_sync(chip->dev);
-
-       return 0;
-}
-
-static const struct pwm_ops dwc_pwm_ops = {
-       .apply = dwc_pwm_apply,
-       .get_state = dwc_pwm_get_state,
-       .owner = THIS_MODULE,
-};
-
-static struct dwc_pwm *dwc_pwm_alloc(struct device *dev)
-{
-       struct dwc_pwm *dwc;
-
-       dwc = devm_kzalloc(dev, sizeof(*dwc), GFP_KERNEL);
-       if (!dwc)
-               return NULL;
-
-       dwc->chip.dev = dev;
-       dwc->chip.ops = &dwc_pwm_ops;
-       dwc->chip.npwm = DWC_TIMERS_TOTAL;
-
-       dev_set_drvdata(dev, dwc);
-       return dwc;
-}
+#include "pwm-dwc.h"
 
 static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
 {
diff --git a/drivers/pwm/pwm-dwc.h b/drivers/pwm/pwm-dwc.h
new file mode 100644 (file)
index 0000000..6479524
--- /dev/null
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DesignWare PWM Controller driver
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ *
+ * Author: Felipe Balbi (Intel)
+ * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+ * Author: Raymond Tan <raymond.tan@intel.com>
+ */
+
+MODULE_IMPORT_NS(dwc_pwm);
+
+#define DWC_TIM_LD_CNT(n)      ((n) * 0x14)
+#define DWC_TIM_LD_CNT2(n)     (((n) * 4) + 0xb0)
+#define DWC_TIM_CUR_VAL(n)     (((n) * 0x14) + 0x04)
+#define DWC_TIM_CTRL(n)                (((n) * 0x14) + 0x08)
+#define DWC_TIM_EOI(n)         (((n) * 0x14) + 0x0c)
+#define DWC_TIM_INT_STS(n)     (((n) * 0x14) + 0x10)
+
+#define DWC_TIMERS_INT_STS     0xa0
+#define DWC_TIMERS_EOI         0xa4
+#define DWC_TIMERS_RAW_INT_STS 0xa8
+#define DWC_TIMERS_COMP_VERSION        0xac
+
+#define DWC_TIMERS_TOTAL       8
+
+/* Timer Control Register */
+#define DWC_TIM_CTRL_EN                BIT(0)
+#define DWC_TIM_CTRL_MODE      BIT(1)
+#define DWC_TIM_CTRL_MODE_FREE (0 << 1)
+#define DWC_TIM_CTRL_MODE_USER (1 << 1)
+#define DWC_TIM_CTRL_INT_MASK  BIT(2)
+#define DWC_TIM_CTRL_PWM       BIT(3)
+
+struct dwc_pwm_ctx {
+       u32 cnt;
+       u32 cnt2;
+       u32 ctrl;
+};
+
+struct dwc_pwm {
+       struct pwm_chip chip;
+       void __iomem *base;
+       unsigned int clk_ns;
+       struct dwc_pwm_ctx ctx[DWC_TIMERS_TOTAL];
+};
+#define to_dwc_pwm(p)  (container_of((p), struct dwc_pwm, chip))
+
+static inline u32 dwc_pwm_readl(struct dwc_pwm *dwc, u32 offset)
+{
+       return readl(dwc->base + offset);
+}
+
+static inline void dwc_pwm_writel(struct dwc_pwm *dwc, u32 value, u32 offset)
+{
+       writel(value, dwc->base + offset);
+}
+
+extern struct dwc_pwm *dwc_pwm_alloc(struct device *dev);
index c45a75e65c869ea48cb4ecde71937571e3309393..51e072572a87be12308d4059ac3f8f05dc21bcbf 100644 (file)
@@ -159,7 +159,6 @@ static const struct pwm_ops ep93xx_pwm_ops = {
        .request = ep93xx_pwm_request,
        .free = ep93xx_pwm_free,
        .apply = ep93xx_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static int ep93xx_pwm_probe(struct platform_device *pdev)
index b7c6045c5d0898de1424a7c022f389467524fcec..d1b6d1aa477369e4db5ffbd93a9319c137a367a7 100644 (file)
@@ -350,7 +350,6 @@ static const struct pwm_ops fsl_pwm_ops = {
        .request = fsl_pwm_request,
        .free = fsl_pwm_free,
        .apply = fsl_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static int fsl_pwm_init(struct fsl_pwm_chip *fpc)
index f7ba6fe9a349a3ebdb09f16bc50004624ab75f7d..c435776e2f78688b4739aa3aada6d2558eae5363 100644 (file)
@@ -185,7 +185,6 @@ static const struct pwm_ops hibvt_pwm_ops = {
        .get_state = hibvt_pwm_get_state,
        .apply = hibvt_pwm_apply,
 
-       .owner = THIS_MODULE,
 };
 
 static int hibvt_pwm_probe(struct platform_device *pdev)
index 326af85888e7b4bd27f852d4bc62eaea080191c1..116fa060e3029942f557fceb4d090794f87d3044 100644 (file)
@@ -208,7 +208,6 @@ static int img_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 
 static const struct pwm_ops img_pwm_ops = {
        .apply = img_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static const struct img_pwm_soc_data pistachio_pwm = {
index 98ab65c896850ef224febe09506e919f47338b3b..dc6aafeb9f7b44eb976cb89edbaf965fb648723a 100644 (file)
@@ -332,7 +332,6 @@ static const struct pwm_ops imx_tpm_pwm_ops = {
        .free = pwm_imx_tpm_free,
        .get_state = pwm_imx_tpm_get_state,
        .apply = pwm_imx_tpm_apply,
-       .owner = THIS_MODULE,
 };
 
 static int pwm_imx_tpm_probe(struct platform_device *pdev)
@@ -351,18 +350,11 @@ static int pwm_imx_tpm_probe(struct platform_device *pdev)
        if (IS_ERR(tpm->base))
                return PTR_ERR(tpm->base);
 
-       tpm->clk = devm_clk_get(&pdev->dev, NULL);
+       tpm->clk = devm_clk_get_enabled(&pdev->dev, NULL);
        if (IS_ERR(tpm->clk))
                return dev_err_probe(&pdev->dev, PTR_ERR(tpm->clk),
                                     "failed to get PWM clock\n");
 
-       ret = clk_prepare_enable(tpm->clk);
-       if (ret) {
-               dev_err(&pdev->dev,
-                       "failed to prepare or enable clock: %d\n", ret);
-               return ret;
-       }
-
        tpm->chip.dev = &pdev->dev;
        tpm->chip.ops = &imx_tpm_pwm_ops;
 
@@ -372,22 +364,11 @@ static int pwm_imx_tpm_probe(struct platform_device *pdev)
 
        mutex_init(&tpm->lock);
 
-       ret = pwmchip_add(&tpm->chip);
-       if (ret) {
-               dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
-               clk_disable_unprepare(tpm->clk);
-       }
-
-       return ret;
-}
-
-static void pwm_imx_tpm_remove(struct platform_device *pdev)
-{
-       struct imx_tpm_pwm_chip *tpm = platform_get_drvdata(pdev);
-
-       pwmchip_remove(&tpm->chip);
+       ret = devm_pwmchip_add(&pdev->dev, &tpm->chip);
+       if (ret)
+               return dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
 
-       clk_disable_unprepare(tpm->clk);
+       return 0;
 }
 
 static int __maybe_unused pwm_imx_tpm_suspend(struct device *dev)
@@ -437,7 +418,6 @@ static struct platform_driver imx_tpm_pwm_driver = {
                .pm = &imx_tpm_pwm_pm,
        },
        .probe  = pwm_imx_tpm_probe,
-       .remove_new = pwm_imx_tpm_remove,
 };
 module_platform_driver(imx_tpm_pwm_driver);
 
index 0651983bed190c411df3bdf1b7d84b1c6fbb68bc..d175d895f22a338bc46b309a5805f563366ab886 100644 (file)
@@ -146,7 +146,6 @@ static int pwm_imx1_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 
 static const struct pwm_ops pwm_imx1_ops = {
        .apply = pwm_imx1_apply,
-       .owner = THIS_MODULE,
 };
 
 static const struct of_device_id pwm_imx1_dt_ids[] = {
index 29a3089c534cdab089e672f2d693a9ab0f70afa8..7d9bc43f12b0e96cf33b15e1a6f4247f8555d907 100644 (file)
@@ -296,7 +296,6 @@ static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 static const struct pwm_ops pwm_imx27_ops = {
        .apply = pwm_imx27_apply,
        .get_state = pwm_imx27_get_state,
-       .owner = THIS_MODULE,
 };
 
 static const struct of_device_id pwm_imx27_dt_ids[] = {
index 0cd7dd548e82f22ddcc7a16363abe646478d735c..54ecae7f937ed0517e58a0c09615240c6fe71e0a 100644 (file)
@@ -107,7 +107,6 @@ static int lgm_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
 static const struct pwm_ops lgm_pwm_ops = {
        .get_state = lgm_pwm_get_state,
        .apply = lgm_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static void lgm_pwm_init(struct lgm_pwm_chip *pc)
index 47b3141135f38eccdcde2c66330a72b6000fdb0a..378ab036edfec43e82530e4d2e349e27e2c3b39b 100644 (file)
@@ -166,7 +166,6 @@ static int iqs620_pwm_notifier(struct notifier_block *notifier,
 static const struct pwm_ops iqs620_pwm_ops = {
        .apply = iqs620_pwm_apply,
        .get_state = iqs620_pwm_get_state,
-       .owner = THIS_MODULE,
 };
 
 static void iqs620_pwm_notifier_unregister(void *context)
index ef1293f2a897eeeb5daefb06afc0789898b32be4..e9375de60ad6219729e8d0f36073fda1df49cbb8 100644 (file)
@@ -27,6 +27,7 @@ struct soc_info {
 struct jz4740_pwm_chip {
        struct pwm_chip chip;
        struct regmap *map;
+       struct clk *clk[];
 };
 
 static inline struct jz4740_pwm_chip *to_jz4740(struct pwm_chip *chip)
@@ -70,14 +71,15 @@ static int jz4740_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
                return err;
        }
 
-       pwm_set_chip_data(pwm, clk);
+       jz->clk[pwm->hwpwm] = clk;
 
        return 0;
 }
 
 static void jz4740_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
 {
-       struct clk *clk = pwm_get_chip_data(pwm);
+       struct jz4740_pwm_chip *jz = to_jz4740(chip);
+       struct clk *clk = jz->clk[pwm->hwpwm];
 
        clk_disable_unprepare(clk);
        clk_put(clk);
@@ -121,9 +123,9 @@ static void jz4740_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
 static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                            const struct pwm_state *state)
 {
-       struct jz4740_pwm_chip *jz4740 = to_jz4740(pwm->chip);
+       struct jz4740_pwm_chip *jz = to_jz4740(pwm->chip);
        unsigned long long tmp = 0xffffull * NSEC_PER_SEC;
-       struct clk *clk = pwm_get_chip_data(pwm);
+       struct clk *clk = jz->clk[pwm->hwpwm];
        unsigned long period, duty;
        long rate;
        int err;
@@ -173,16 +175,16 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
        }
 
        /* Reset counter to 0 */
-       regmap_write(jz4740->map, TCU_REG_TCNTc(pwm->hwpwm), 0);
+       regmap_write(jz->map, TCU_REG_TCNTc(pwm->hwpwm), 0);
 
        /* Set duty */
-       regmap_write(jz4740->map, TCU_REG_TDHRc(pwm->hwpwm), duty);
+       regmap_write(jz->map, TCU_REG_TDHRc(pwm->hwpwm), duty);
 
        /* Set period */
-       regmap_write(jz4740->map, TCU_REG_TDFRc(pwm->hwpwm), period);
+       regmap_write(jz->map, TCU_REG_TDFRc(pwm->hwpwm), period);
 
        /* Set abrupt shutdown */
-       regmap_set_bits(jz4740->map, TCU_REG_TCSRc(pwm->hwpwm),
+       regmap_set_bits(jz->map, TCU_REG_TCSRc(pwm->hwpwm),
                        TCU_TCSR_PWM_SD);
 
        /*
@@ -199,10 +201,10 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
         * state instead of its inactive state.
         */
        if ((state->polarity == PWM_POLARITY_NORMAL) ^ state->enabled)
-               regmap_update_bits(jz4740->map, TCU_REG_TCSRc(pwm->hwpwm),
+               regmap_update_bits(jz->map, TCU_REG_TCSRc(pwm->hwpwm),
                                   TCU_TCSR_PWM_INITL_HIGH, 0);
        else
-               regmap_update_bits(jz4740->map, TCU_REG_TCSRc(pwm->hwpwm),
+               regmap_update_bits(jz->map, TCU_REG_TCSRc(pwm->hwpwm),
                                   TCU_TCSR_PWM_INITL_HIGH,
                                   TCU_TCSR_PWM_INITL_HIGH);
 
@@ -216,34 +218,34 @@ static const struct pwm_ops jz4740_pwm_ops = {
        .request = jz4740_pwm_request,
        .free = jz4740_pwm_free,
        .apply = jz4740_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static int jz4740_pwm_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct jz4740_pwm_chip *jz4740;
+       struct jz4740_pwm_chip *jz;
        const struct soc_info *info;
 
        info = device_get_match_data(dev);
        if (!info)
                return -EINVAL;
 
-       jz4740 = devm_kzalloc(dev, sizeof(*jz4740), GFP_KERNEL);
-       if (!jz4740)
+       jz = devm_kzalloc(dev, struct_size(jz, clk, info->num_pwms),
+                             GFP_KERNEL);
+       if (!jz)
                return -ENOMEM;
 
-       jz4740->map = device_node_to_regmap(dev->parent->of_node);
-       if (IS_ERR(jz4740->map)) {
-               dev_err(dev, "regmap not found: %ld\n", PTR_ERR(jz4740->map));
-               return PTR_ERR(jz4740->map);
+       jz->map = device_node_to_regmap(dev->parent->of_node);
+       if (IS_ERR(jz->map)) {
+               dev_err(dev, "regmap not found: %ld\n", PTR_ERR(jz->map));
+               return PTR_ERR(jz->map);
        }
 
-       jz4740->chip.dev = dev;
-       jz4740->chip.ops = &jz4740_pwm_ops;
-       jz4740->chip.npwm = info->num_pwms;
+       jz->chip.dev = dev;
+       jz->chip.ops = &jz4740_pwm_ops;
+       jz->chip.npwm = info->num_pwms;
 
-       return devm_pwmchip_add(dev, &jz4740->chip);
+       return devm_pwmchip_add(dev, &jz->chip);
 }
 
 static const struct soc_info jz4740_soc_info = {
index ac02d8bb4a0b5eba2877bf71b800e0bd2332617b..ac824ecc3f641045676477bb32a68d62f07cc1c5 100644 (file)
@@ -178,7 +178,6 @@ static int keembay_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 }
 
 static const struct pwm_ops keembay_pwm_ops = {
-       .owner = THIS_MODULE,
        .apply = keembay_pwm_apply,
        .get_state = keembay_pwm_get_state,
 };
index 4b133a17f4be3bea2b070237abaed93d25080ef4..32350a357278df0c1857524480266a95ee24aadc 100644 (file)
@@ -23,6 +23,7 @@ struct lp3943_pwm {
        struct pwm_chip chip;
        struct lp3943 *lp3943;
        struct lp3943_platform_data *pdata;
+       struct lp3943_pwm_map pwm_map[LP3943_NUM_PWMS];
 };
 
 static inline struct lp3943_pwm *to_lp3943_pwm(struct pwm_chip *chip)
@@ -35,13 +36,9 @@ lp3943_pwm_request_map(struct lp3943_pwm *lp3943_pwm, int hwpwm)
 {
        struct lp3943_platform_data *pdata = lp3943_pwm->pdata;
        struct lp3943 *lp3943 = lp3943_pwm->lp3943;
-       struct lp3943_pwm_map *pwm_map;
+       struct lp3943_pwm_map *pwm_map = &lp3943_pwm->pwm_map[hwpwm];
        int i, offset;
 
-       pwm_map = kzalloc(sizeof(*pwm_map), GFP_KERNEL);
-       if (!pwm_map)
-               return ERR_PTR(-ENOMEM);
-
        pwm_map->output = pdata->pwms[hwpwm]->output;
        pwm_map->num_outputs = pdata->pwms[hwpwm]->num_outputs;
 
@@ -49,10 +46,8 @@ lp3943_pwm_request_map(struct lp3943_pwm *lp3943_pwm, int hwpwm)
                offset = pwm_map->output[i];
 
                /* Return an error if the pin is already assigned */
-               if (test_and_set_bit(offset, &lp3943->pin_used)) {
-                       kfree(pwm_map);
+               if (test_and_set_bit(offset, &lp3943->pin_used))
                        return ERR_PTR(-EBUSY);
-               }
        }
 
        return pwm_map;
@@ -67,7 +62,7 @@ static int lp3943_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
        if (IS_ERR(pwm_map))
                return PTR_ERR(pwm_map);
 
-       return pwm_set_chip_data(pwm, pwm_map);
+       return 0;
 }
 
 static void lp3943_pwm_free_map(struct lp3943_pwm *lp3943_pwm,
@@ -80,14 +75,12 @@ static void lp3943_pwm_free_map(struct lp3943_pwm *lp3943_pwm,
                offset = pwm_map->output[i];
                clear_bit(offset, &lp3943->pin_used);
        }
-
-       kfree(pwm_map);
 }
 
 static void lp3943_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
 {
        struct lp3943_pwm *lp3943_pwm = to_lp3943_pwm(chip);
-       struct lp3943_pwm_map *pwm_map = pwm_get_chip_data(pwm);
+       struct lp3943_pwm_map *pwm_map = &lp3943_pwm->pwm_map[pwm->hwpwm];
 
        lp3943_pwm_free_map(lp3943_pwm, pwm_map);
 }
@@ -159,7 +152,7 @@ static int lp3943_pwm_set_mode(struct lp3943_pwm *lp3943_pwm,
 static int lp3943_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
 {
        struct lp3943_pwm *lp3943_pwm = to_lp3943_pwm(chip);
-       struct lp3943_pwm_map *pwm_map = pwm_get_chip_data(pwm);
+       struct lp3943_pwm_map *pwm_map = &lp3943_pwm->pwm_map[pwm->hwpwm];
        u8 val;
 
        if (pwm->hwpwm == 0)
@@ -178,7 +171,7 @@ static int lp3943_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
 static void lp3943_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
 {
        struct lp3943_pwm *lp3943_pwm = to_lp3943_pwm(chip);
-       struct lp3943_pwm_map *pwm_map = pwm_get_chip_data(pwm);
+       struct lp3943_pwm_map *pwm_map = &lp3943_pwm->pwm_map[pwm->hwpwm];
 
        /*
         * LP3943 outputs are open-drain, so the pin should be configured
@@ -216,7 +209,6 @@ static const struct pwm_ops lp3943_pwm_ops = {
        .request        = lp3943_pwm_request,
        .free           = lp3943_pwm_free,
        .apply          = lp3943_pwm_apply,
-       .owner          = THIS_MODULE,
 };
 
 static int lp3943_pwm_parse_dt(struct device *dev,
index 7a19a840bca5fca00d90af908b872ef8e3fdc0de..ef7d0da137ede3453684322dc3e2312d6ff0e59e 100644 (file)
@@ -341,7 +341,6 @@ static const struct pwm_ops lpc18xx_pwm_ops = {
        .apply = lpc18xx_pwm_apply,
        .request = lpc18xx_pwm_request,
        .free = lpc18xx_pwm_free,
-       .owner = THIS_MODULE,
 };
 
 static const struct of_device_id lpc18xx_pwm_of_match[] = {
index 806f0bb3ad6d8d1ba6b1c8094b15bcb979111899..78f664e41e6e3a174f16ed0ecc216f2e0b1ae087 100644 (file)
@@ -115,7 +115,6 @@ static int lpc32xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 
 static const struct pwm_ops lpc32xx_pwm_ops = {
        .apply = lpc32xx_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static int lpc32xx_pwm_probe(struct platform_device *pdev)
index 23fe332b2394788a9c610eca28d9ed853370a5a3..a6ea3ce7e0196ad0e70323bb26dbdc4c7da2b48e 100644 (file)
@@ -243,7 +243,6 @@ static int pwm_lpss_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
 static const struct pwm_ops pwm_lpss_ops = {
        .apply = pwm_lpss_apply,
        .get_state = pwm_lpss_get_state,
-       .owner = THIS_MODULE,
 };
 
 struct pwm_lpss_chip *devm_pwm_lpss_probe(struct device *dev, void __iomem *base,
index 6adb0ed019066a0f29bb70ff704958aeb020caac..373abfd25acb28509520e82ce6b3efc13896e9d9 100644 (file)
@@ -229,7 +229,6 @@ static int pwm_mediatek_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 
 static const struct pwm_ops pwm_mediatek_ops = {
        .apply = pwm_mediatek_apply,
-       .owner = THIS_MODULE,
 };
 
 static int pwm_mediatek_probe(struct platform_device *pdev)
index 25519cddc2a9dd5461671b3de449a0709a4e1bc7..5bea53243ed2fab3c170591f7b385fdb25890658 100644 (file)
@@ -335,7 +335,6 @@ static const struct pwm_ops meson_pwm_ops = {
        .free = meson_pwm_free,
        .apply = meson_pwm_apply,
        .get_state = meson_pwm_get_state,
-       .owner = THIS_MODULE,
 };
 
 static const char * const pwm_meson8b_parent_names[] = {
index e7525c98105ebc30db26648be60df27bd0b029f1..c0c53968f3e9d1a84c6f727b66bdb6396ec00f1b 100644 (file)
@@ -435,7 +435,6 @@ static int mchp_core_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm
 static const struct pwm_ops mchp_core_pwm_ops = {
        .apply = mchp_core_pwm_apply,
        .get_state = mchp_core_pwm_get_state,
-       .owner = THIS_MODULE,
 };
 
 static const struct of_device_id mchp_core_of_match[] = {
index a83bd6e18b07ff8b2e618f03bae754386b0f2d63..a72f7be36996452d4c984cd8cb30397233a2673d 100644 (file)
@@ -227,7 +227,6 @@ static int mtk_disp_pwm_get_state(struct pwm_chip *chip,
 static const struct pwm_ops mtk_disp_pwm_ops = {
        .apply = mtk_disp_pwm_apply,
        .get_state = mtk_disp_pwm_get_state,
-       .owner = THIS_MODULE,
 };
 
 static int mtk_disp_pwm_probe(struct platform_device *pdev)
@@ -247,34 +246,25 @@ static int mtk_disp_pwm_probe(struct platform_device *pdev)
 
        mdp->clk_main = devm_clk_get(&pdev->dev, "main");
        if (IS_ERR(mdp->clk_main))
-               return PTR_ERR(mdp->clk_main);
+               return dev_err_probe(&pdev->dev, PTR_ERR(mdp->clk_main),
+                                    "Failed to get main clock\n");
 
        mdp->clk_mm = devm_clk_get(&pdev->dev, "mm");
        if (IS_ERR(mdp->clk_mm))
-               return PTR_ERR(mdp->clk_mm);
+               return dev_err_probe(&pdev->dev, PTR_ERR(mdp->clk_mm),
+                                    "Failed to get mm clock\n");
 
        mdp->chip.dev = &pdev->dev;
        mdp->chip.ops = &mtk_disp_pwm_ops;
        mdp->chip.npwm = 1;
 
-       ret = pwmchip_add(&mdp->chip);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "pwmchip_add() failed: %pe\n", ERR_PTR(ret));
-               return ret;
-       }
-
-       platform_set_drvdata(pdev, mdp);
+       ret = devm_pwmchip_add(&pdev->dev, &mdp->chip);
+       if (ret < 0)
+               return dev_err_probe(&pdev->dev, ret, "pwmchip_add() failed\n");
 
        return 0;
 }
 
-static void mtk_disp_pwm_remove(struct platform_device *pdev)
-{
-       struct mtk_disp_pwm *mdp = platform_get_drvdata(pdev);
-
-       pwmchip_remove(&mdp->chip);
-}
-
 static const struct mtk_pwm_data mt2701_pwm_data = {
        .enable_mask = BIT(16),
        .con0 = 0xa8,
@@ -320,7 +310,6 @@ static struct platform_driver mtk_disp_pwm_driver = {
                .of_match_table = mtk_disp_pwm_of_match,
        },
        .probe = mtk_disp_pwm_probe,
-       .remove_new = mtk_disp_pwm_remove,
 };
 module_platform_driver(mtk_disp_pwm_driver);
 
index 766dbc58dad8409c096528ed8337560aaf41dbca..1b5e787d78f1cdb163a650e12104ea900d4b3d21 100644 (file)
@@ -115,7 +115,6 @@ static int mxs_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 
 static const struct pwm_ops mxs_pwm_ops = {
        .apply = mxs_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static int mxs_pwm_probe(struct platform_device *pdev)
index 7514ea384ec56585d33c93783fea68bc153528e0..78606039eda2eea757175f559d0ada50b7eadeec 100644 (file)
@@ -126,7 +126,6 @@ static int ntxec_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm_dev,
 }
 
 static const struct pwm_ops ntxec_pwm_ops = {
-       .owner = THIS_MODULE,
        .apply = ntxec_pwm_apply,
        /*
         * No .get_state callback, because the current state cannot be read
index 4889fbd8a4311afd33c9df8d6b5c1a991562176d..13161e08dd6eb38e86bb1863d131517f7e2f1706 100644 (file)
@@ -311,7 +311,6 @@ unlock_mutex:
 
 static const struct pwm_ops pwm_omap_dmtimer_ops = {
        .apply = pwm_omap_dmtimer_apply,
-       .owner = THIS_MODULE,
 };
 
 static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
@@ -466,7 +465,7 @@ MODULE_DEVICE_TABLE(of, pwm_omap_dmtimer_of_match);
 static struct platform_driver pwm_omap_dmtimer_driver = {
        .driver = {
                .name = "omap-dmtimer-pwm",
-               .of_match_table = of_match_ptr(pwm_omap_dmtimer_of_match),
+               .of_match_table = pwm_omap_dmtimer_of_match,
        },
        .probe = pwm_omap_dmtimer_probe,
        .remove_new = pwm_omap_dmtimer_remove,
index 3038a68412a75e1270cd44c5590fcefe765b94e6..e79b1de8c4d83f1b4ad06269a844f1b3cc932162 100644 (file)
@@ -505,7 +505,6 @@ static const struct pwm_ops pca9685_pwm_ops = {
        .get_state = pca9685_pwm_get_state,
        .request = pca9685_pwm_request,
        .free = pca9685_pwm_free,
-       .owner = THIS_MODULE,
 };
 
 static const struct regmap_config pca9685_regmap_i2c_config = {
index 1e475ed10180ea302b0c60d66949bb93ab13323d..76685f926c758603307987f9b13205cf2dc7286a 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/clk.h>
 #include <linux/io.h>
 #include <linux/pwm.h>
-#include <linux/of_device.h>
+#include <linux/of.h>
 
 #include <asm/div64.h>
 
@@ -135,7 +135,6 @@ static int pxa_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 
 static const struct pwm_ops pxa_pwm_ops = {
        .apply = pxa_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 #ifdef CONFIG_OF
index 2939b71a7ba7b5e340d418d511648d9a6da0354b..1ad814fdec6bd6483620dfae8dddfa2cd1756015 100644 (file)
@@ -135,7 +135,6 @@ static int raspberrypi_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 static const struct pwm_ops raspberrypi_pwm_ops = {
        .get_state = raspberrypi_pwm_get_state,
        .apply = raspberrypi_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static int raspberrypi_pwm_probe(struct platform_device *pdev)
index 5b5f357c44de61f8d4aa3aef115f314c15bacf1e..13269f55fccff143769687c88437c1a02d4fcc95 100644 (file)
@@ -198,7 +198,6 @@ static const struct pwm_ops rcar_pwm_ops = {
        .request = rcar_pwm_request,
        .free = rcar_pwm_free,
        .apply = rcar_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static int rcar_pwm_probe(struct platform_device *pdev)
index d7311614c846dda20dbf7696618d628bff7a3ef0..4239f2c3e8b2a3288bfe43d2a7e7a2cb98a9b2b5 100644 (file)
@@ -85,6 +85,7 @@ struct tpu_device {
 
        void __iomem *base;
        struct clk *clk;
+       struct tpu_pwm_device tpd[TPU_CHANNEL_MAX];
 };
 
 #define to_tpu_device(c)       container_of(c, struct tpu_device, chip)
@@ -215,9 +216,7 @@ static int tpu_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
        if (pwm->hwpwm >= TPU_CHANNEL_MAX)
                return -EINVAL;
 
-       tpd = kzalloc(sizeof(*tpd), GFP_KERNEL);
-       if (tpd == NULL)
-               return -ENOMEM;
+       tpd = &tpu->tpd[pwm->hwpwm];
 
        tpd->tpu = tpu;
        tpd->channel = pwm->hwpwm;
@@ -228,24 +227,22 @@ static int tpu_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
 
        tpd->timer_on = false;
 
-       pwm_set_chip_data(pwm, tpd);
-
        return 0;
 }
 
 static void tpu_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
 {
-       struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
+       struct tpu_device *tpu = to_tpu_device(chip);
+       struct tpu_pwm_device *tpd = &tpu->tpd[pwm->hwpwm];
 
        tpu_pwm_timer_stop(tpd);
-       kfree(tpd);
 }
 
 static int tpu_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
                          u64 duty_ns, u64 period_ns, bool enabled)
 {
-       struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
        struct tpu_device *tpu = to_tpu_device(chip);
+       struct tpu_pwm_device *tpd = &tpu->tpd[pwm->hwpwm];
        unsigned int prescaler;
        bool duty_only = false;
        u32 clk_rate;
@@ -353,7 +350,8 @@ static int tpu_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
 static int tpu_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
                                enum pwm_polarity polarity)
 {
-       struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
+       struct tpu_device *tpu = to_tpu_device(chip);
+       struct tpu_pwm_device *tpd = &tpu->tpd[pwm->hwpwm];
 
        tpd->polarity = polarity;
 
@@ -362,7 +360,8 @@ static int tpu_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
 
 static int tpu_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
 {
-       struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
+       struct tpu_device *tpu = to_tpu_device(chip);
+       struct tpu_pwm_device *tpd = &tpu->tpd[pwm->hwpwm];
        int ret;
 
        ret = tpu_pwm_timer_start(tpd);
@@ -384,7 +383,8 @@ static int tpu_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
 
 static void tpu_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
 {
-       struct tpu_pwm_device *tpd = pwm_get_chip_data(pwm);
+       struct tpu_device *tpu = to_tpu_device(chip);
+       struct tpu_pwm_device *tpd = &tpu->tpd[pwm->hwpwm];
 
        /* The timer must be running to modify the pin output configuration. */
        tpu_pwm_timer_start(tpd);
@@ -431,7 +431,6 @@ static const struct pwm_ops tpu_pwm_ops = {
        .request = tpu_pwm_request,
        .free = tpu_pwm_free,
        .apply = tpu_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 /* -----------------------------------------------------------------------------
index 03ee18fb82d5b40dc5699da1c5410dac830d5ac1..cce4381e188af0bfb7549db11de1d0819c893be6 100644 (file)
@@ -228,7 +228,6 @@ out:
 static const struct pwm_ops rockchip_pwm_ops = {
        .get_state = rockchip_pwm_get_state,
        .apply = rockchip_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static const struct rockchip_pwm_data pwm_data_v1 = {
index a56cecb0e46e3e207811d95104e3a3c74cad375e..bdda315b3bd30c135e4c0d10174fa52a144455fc 100644 (file)
@@ -438,7 +438,6 @@ static const struct pwm_ops rz_mtu3_pwm_ops = {
        .free = rz_mtu3_pwm_free,
        .get_state = rz_mtu3_pwm_get_state,
        .apply = rz_mtu3_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static int rz_mtu3_pwm_pm_runtime_suspend(struct device *dev)
index e8828f57ab1502f0a3450e72e1fdd8de1f401d7b..69d9f4577b3479fd6038b7e71cc56cfff48fa7c2 100644 (file)
@@ -77,6 +77,7 @@ struct samsung_pwm_channel {
  * @base_clk:          base clock used to drive the timers
  * @tclk0:             external clock 0 (can be ERR_PTR if not present)
  * @tclk1:             external clock 1 (can be ERR_PTR if not present)
+ * @channel:           per channel driver data
  */
 struct samsung_pwm_chip {
        struct pwm_chip chip;
@@ -88,6 +89,7 @@ struct samsung_pwm_chip {
        struct clk *base_clk;
        struct clk *tclk0;
        struct clk *tclk1;
+       struct samsung_pwm_channel channel[SAMSUNG_PWM_NUM];
 };
 
 #ifndef CONFIG_CLKSRC_SAMSUNG_PWM
@@ -117,21 +119,21 @@ static inline unsigned int to_tcon_channel(unsigned int channel)
        return (channel == 0) ? 0 : (channel + 1);
 }
 
-static void __pwm_samsung_manual_update(struct samsung_pwm_chip *chip,
+static void __pwm_samsung_manual_update(struct samsung_pwm_chip *our_chip,
                                      struct pwm_device *pwm)
 {
        unsigned int tcon_chan = to_tcon_channel(pwm->hwpwm);
        u32 tcon;
 
-       tcon = readl(chip->base + REG_TCON);
+       tcon = readl(our_chip->base + REG_TCON);
        tcon |= TCON_MANUALUPDATE(tcon_chan);
-       writel(tcon, chip->base + REG_TCON);
+       writel(tcon, our_chip->base + REG_TCON);
 
        tcon &= ~TCON_MANUALUPDATE(tcon_chan);
-       writel(tcon, chip->base + REG_TCON);
+       writel(tcon, our_chip->base + REG_TCON);
 }
 
-static void pwm_samsung_set_divisor(struct samsung_pwm_chip *pwm,
+static void pwm_samsung_set_divisor(struct samsung_pwm_chip *our_chip,
                                    unsigned int channel, u8 divisor)
 {
        u8 shift = TCFG1_SHIFT(channel);
@@ -139,39 +141,39 @@ static void pwm_samsung_set_divisor(struct samsung_pwm_chip *pwm,
        u32 reg;
        u8 bits;
 
-       bits = (fls(divisor) - 1) - pwm->variant.div_base;
+       bits = (fls(divisor) - 1) - our_chip->variant.div_base;
 
        spin_lock_irqsave(&samsung_pwm_lock, flags);
 
-       reg = readl(pwm->base + REG_TCFG1);
+       reg = readl(our_chip->base + REG_TCFG1);
        reg &= ~(TCFG1_MUX_MASK << shift);
        reg |= bits << shift;
-       writel(reg, pwm->base + REG_TCFG1);
+       writel(reg, our_chip->base + REG_TCFG1);
 
        spin_unlock_irqrestore(&samsung_pwm_lock, flags);
 }
 
-static int pwm_samsung_is_tdiv(struct samsung_pwm_chip *chip, unsigned int chan)
+static int pwm_samsung_is_tdiv(struct samsung_pwm_chip *our_chip, unsigned int chan)
 {
-       struct samsung_pwm_variant *variant = &chip->variant;
+       struct samsung_pwm_variant *variant = &our_chip->variant;
        u32 reg;
 
-       reg = readl(chip->base + REG_TCFG1);
+       reg = readl(our_chip->base + REG_TCFG1);
        reg >>= TCFG1_SHIFT(chan);
        reg &= TCFG1_MUX_MASK;
 
        return (BIT(reg) & variant->tclk_mask) == 0;
 }
 
-static unsigned long pwm_samsung_get_tin_rate(struct samsung_pwm_chip *chip,
+static unsigned long pwm_samsung_get_tin_rate(struct samsung_pwm_chip *our_chip,
                                              unsigned int chan)
 {
        unsigned long rate;
        u32 reg;
 
-       rate = clk_get_rate(chip->base_clk);
+       rate = clk_get_rate(our_chip->base_clk);
 
-       reg = readl(chip->base + REG_TCFG0);
+       reg = readl(our_chip->base + REG_TCFG0);
        if (chan >= 2)
                reg >>= TCFG0_PRESCALER1_SHIFT;
        reg &= TCFG0_PRESCALER_MASK;
@@ -179,28 +181,28 @@ static unsigned long pwm_samsung_get_tin_rate(struct samsung_pwm_chip *chip,
        return rate / (reg + 1);
 }
 
-static unsigned long pwm_samsung_calc_tin(struct samsung_pwm_chip *chip,
+static unsigned long pwm_samsung_calc_tin(struct samsung_pwm_chip *our_chip,
                                          unsigned int chan, unsigned long freq)
 {
-       struct samsung_pwm_variant *variant = &chip->variant;
+       struct samsung_pwm_variant *variant = &our_chip->variant;
        unsigned long rate;
        struct clk *clk;
        u8 div;
 
-       if (!pwm_samsung_is_tdiv(chip, chan)) {
-               clk = (chan < 2) ? chip->tclk0 : chip->tclk1;
+       if (!pwm_samsung_is_tdiv(our_chip, chan)) {
+               clk = (chan < 2) ? our_chip->tclk0 : our_chip->tclk1;
                if (!IS_ERR(clk)) {
                        rate = clk_get_rate(clk);
                        if (rate)
                                return rate;
                }
 
-               dev_warn(chip->chip.dev,
+               dev_warn(our_chip->chip.dev,
                        "tclk of PWM %d is inoperational, using tdiv\n", chan);
        }
 
-       rate = pwm_samsung_get_tin_rate(chip, chan);
-       dev_dbg(chip->chip.dev, "tin parent at %lu\n", rate);
+       rate = pwm_samsung_get_tin_rate(our_chip, chan);
+       dev_dbg(our_chip->chip.dev, "tin parent at %lu\n", rate);
 
        /*
         * Compare minimum PWM frequency that can be achieved with possible
@@ -220,7 +222,7 @@ static unsigned long pwm_samsung_calc_tin(struct samsung_pwm_chip *chip,
                div = variant->div_base;
        }
 
-       pwm_samsung_set_divisor(chip, chan, BIT(div));
+       pwm_samsung_set_divisor(our_chip, chan, BIT(div));
 
        return rate >> div;
 }
@@ -228,7 +230,6 @@ static unsigned long pwm_samsung_calc_tin(struct samsung_pwm_chip *chip,
 static int pwm_samsung_request(struct pwm_chip *chip, struct pwm_device *pwm)
 {
        struct samsung_pwm_chip *our_chip = to_samsung_pwm_chip(chip);
-       struct samsung_pwm_channel *our_chan;
 
        if (!(our_chip->variant.output_mask & BIT(pwm->hwpwm))) {
                dev_warn(chip->dev,
@@ -237,20 +238,11 @@ static int pwm_samsung_request(struct pwm_chip *chip, struct pwm_device *pwm)
                return -EINVAL;
        }
 
-       our_chan = kzalloc(sizeof(*our_chan), GFP_KERNEL);
-       if (!our_chan)
-               return -ENOMEM;
-
-       pwm_set_chip_data(pwm, our_chan);
+       memset(&our_chip->channel[pwm->hwpwm], 0, sizeof(our_chip->channel[pwm->hwpwm]));
 
        return 0;
 }
 
-static void pwm_samsung_free(struct pwm_chip *chip, struct pwm_device *pwm)
-{
-       kfree(pwm_get_chip_data(pwm));
-}
-
 static int pwm_samsung_enable(struct pwm_chip *chip, struct pwm_device *pwm)
 {
        struct samsung_pwm_chip *our_chip = to_samsung_pwm_chip(chip);
@@ -302,14 +294,14 @@ static void pwm_samsung_disable(struct pwm_chip *chip, struct pwm_device *pwm)
        spin_unlock_irqrestore(&samsung_pwm_lock, flags);
 }
 
-static void pwm_samsung_manual_update(struct samsung_pwm_chip *chip,
+static void pwm_samsung_manual_update(struct samsung_pwm_chip *our_chip,
                                      struct pwm_device *pwm)
 {
        unsigned long flags;
 
        spin_lock_irqsave(&samsung_pwm_lock, flags);
 
-       __pwm_samsung_manual_update(chip, pwm);
+       __pwm_samsung_manual_update(our_chip, pwm);
 
        spin_unlock_irqrestore(&samsung_pwm_lock, flags);
 }
@@ -318,7 +310,7 @@ static int __pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm,
                                int duty_ns, int period_ns, bool force_period)
 {
        struct samsung_pwm_chip *our_chip = to_samsung_pwm_chip(chip);
-       struct samsung_pwm_channel *chan = pwm_get_chip_data(pwm);
+       struct samsung_pwm_channel *chan = &our_chip->channel[pwm->hwpwm];
        u32 tin_ns = chan->tin_ns, tcnt, tcmp, oldtcmp;
 
        tcnt = readl(our_chip->base + REG_TCNTB(pwm->hwpwm));
@@ -393,7 +385,7 @@ static int pwm_samsung_config(struct pwm_chip *chip, struct pwm_device *pwm,
        return __pwm_samsung_config(chip, pwm, duty_ns, period_ns, false);
 }
 
-static void pwm_samsung_set_invert(struct samsung_pwm_chip *chip,
+static void pwm_samsung_set_invert(struct samsung_pwm_chip *our_chip,
                                   unsigned int channel, bool invert)
 {
        unsigned int tcon_chan = to_tcon_channel(channel);
@@ -402,17 +394,17 @@ static void pwm_samsung_set_invert(struct samsung_pwm_chip *chip,
 
        spin_lock_irqsave(&samsung_pwm_lock, flags);
 
-       tcon = readl(chip->base + REG_TCON);
+       tcon = readl(our_chip->base + REG_TCON);
 
        if (invert) {
-               chip->inverter_mask |= BIT(channel);
+               our_chip->inverter_mask |= BIT(channel);
                tcon |= TCON_INVERT(tcon_chan);
        } else {
-               chip->inverter_mask &= ~BIT(channel);
+               our_chip->inverter_mask &= ~BIT(channel);
                tcon &= ~TCON_INVERT(tcon_chan);
        }
 
-       writel(tcon, chip->base + REG_TCON);
+       writel(tcon, our_chip->base + REG_TCON);
 
        spin_unlock_irqrestore(&samsung_pwm_lock, flags);
 }
@@ -473,9 +465,7 @@ static int pwm_samsung_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 
 static const struct pwm_ops pwm_samsung_ops = {
        .request        = pwm_samsung_request,
-       .free           = pwm_samsung_free,
        .apply          = pwm_samsung_apply,
-       .owner          = THIS_MODULE,
 };
 
 #ifdef CONFIG_OF
@@ -517,9 +507,9 @@ static const struct of_device_id samsung_pwm_matches[] = {
 };
 MODULE_DEVICE_TABLE(of, samsung_pwm_matches);
 
-static int pwm_samsung_parse_dt(struct samsung_pwm_chip *chip)
+static int pwm_samsung_parse_dt(struct samsung_pwm_chip *our_chip)
 {
-       struct device_node *np = chip->chip.dev->of_node;
+       struct device_node *np = our_chip->chip.dev->of_node;
        const struct of_device_id *match;
        struct property *prop;
        const __be32 *cur;
@@ -529,22 +519,22 @@ static int pwm_samsung_parse_dt(struct samsung_pwm_chip *chip)
        if (!match)
                return -ENODEV;
 
-       memcpy(&chip->variant, match->data, sizeof(chip->variant));
+       memcpy(&our_chip->variant, match->data, sizeof(our_chip->variant));
 
        of_property_for_each_u32(np, "samsung,pwm-outputs", prop, cur, val) {
                if (val >= SAMSUNG_PWM_NUM) {
-                       dev_err(chip->chip.dev,
+                       dev_err(our_chip->chip.dev,
                                "%s: invalid channel index in samsung,pwm-outputs property\n",
                                                                __func__);
                        continue;
                }
-               chip->variant.output_mask |= BIT(val);
+               our_chip->variant.output_mask |= BIT(val);
        }
 
        return 0;
 }
 #else
-static int pwm_samsung_parse_dt(struct samsung_pwm_chip *chip)
+static int pwm_samsung_parse_dt(struct samsung_pwm_chip *our_chip)
 {
        return -ENODEV;
 }
@@ -553,21 +543,21 @@ static int pwm_samsung_parse_dt(struct samsung_pwm_chip *chip)
 static int pwm_samsung_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct samsung_pwm_chip *chip;
+       struct samsung_pwm_chip *our_chip;
        unsigned int chan;
        int ret;
 
-       chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
-       if (chip == NULL)
+       our_chip = devm_kzalloc(&pdev->dev, sizeof(*our_chip), GFP_KERNEL);
+       if (our_chip == NULL)
                return -ENOMEM;
 
-       chip->chip.dev = &pdev->dev;
-       chip->chip.ops = &pwm_samsung_ops;
-       chip->chip.npwm = SAMSUNG_PWM_NUM;
-       chip->inverter_mask = BIT(SAMSUNG_PWM_NUM) - 1;
+       our_chip->chip.dev = &pdev->dev;
+       our_chip->chip.ops = &pwm_samsung_ops;
+       our_chip->chip.npwm = SAMSUNG_PWM_NUM;
+       our_chip->inverter_mask = BIT(SAMSUNG_PWM_NUM) - 1;
 
        if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
-               ret = pwm_samsung_parse_dt(chip);
+               ret = pwm_samsung_parse_dt(our_chip);
                if (ret)
                        return ret;
        } else {
@@ -576,58 +566,58 @@ static int pwm_samsung_probe(struct platform_device *pdev)
                        return -EINVAL;
                }
 
-               memcpy(&chip->variant, pdev->dev.platform_data,
-                                                       sizeof(chip->variant));
+               memcpy(&our_chip->variant, pdev->dev.platform_data,
+                                                       sizeof(our_chip->variant));
        }
 
-       chip->base = devm_platform_ioremap_resource(pdev, 0);
-       if (IS_ERR(chip->base))
-               return PTR_ERR(chip->base);
+       our_chip->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(our_chip->base))
+               return PTR_ERR(our_chip->base);
 
-       chip->base_clk = devm_clk_get(&pdev->dev, "timers");
-       if (IS_ERR(chip->base_clk)) {
+       our_chip->base_clk = devm_clk_get(&pdev->dev, "timers");
+       if (IS_ERR(our_chip->base_clk)) {
                dev_err(dev, "failed to get timer base clk\n");
-               return PTR_ERR(chip->base_clk);
+               return PTR_ERR(our_chip->base_clk);
        }
 
-       ret = clk_prepare_enable(chip->base_clk);
+       ret = clk_prepare_enable(our_chip->base_clk);
        if (ret < 0) {
                dev_err(dev, "failed to enable base clock\n");
                return ret;
        }
 
        for (chan = 0; chan < SAMSUNG_PWM_NUM; ++chan)
-               if (chip->variant.output_mask & BIT(chan))
-                       pwm_samsung_set_invert(chip, chan, true);
+               if (our_chip->variant.output_mask & BIT(chan))
+                       pwm_samsung_set_invert(our_chip, chan, true);
 
        /* Following clocks are optional. */
-       chip->tclk0 = devm_clk_get(&pdev->dev, "pwm-tclk0");
-       chip->tclk1 = devm_clk_get(&pdev->dev, "pwm-tclk1");
+       our_chip->tclk0 = devm_clk_get(&pdev->dev, "pwm-tclk0");
+       our_chip->tclk1 = devm_clk_get(&pdev->dev, "pwm-tclk1");
 
-       platform_set_drvdata(pdev, chip);
+       platform_set_drvdata(pdev, our_chip);
 
-       ret = pwmchip_add(&chip->chip);
+       ret = pwmchip_add(&our_chip->chip);
        if (ret < 0) {
                dev_err(dev, "failed to register PWM chip\n");
-               clk_disable_unprepare(chip->base_clk);
+               clk_disable_unprepare(our_chip->base_clk);
                return ret;
        }
 
        dev_dbg(dev, "base_clk at %lu, tclk0 at %lu, tclk1 at %lu\n",
-               clk_get_rate(chip->base_clk),
-               !IS_ERR(chip->tclk0) ? clk_get_rate(chip->tclk0) : 0,
-               !IS_ERR(chip->tclk1) ? clk_get_rate(chip->tclk1) : 0);
+               clk_get_rate(our_chip->base_clk),
+               !IS_ERR(our_chip->tclk0) ? clk_get_rate(our_chip->tclk0) : 0,
+               !IS_ERR(our_chip->tclk1) ? clk_get_rate(our_chip->tclk1) : 0);
 
        return 0;
 }
 
 static void pwm_samsung_remove(struct platform_device *pdev)
 {
-       struct samsung_pwm_chip *chip = platform_get_drvdata(pdev);
+       struct samsung_pwm_chip *our_chip = platform_get_drvdata(pdev);
 
-       pwmchip_remove(&chip->chip);
+       pwmchip_remove(&our_chip->chip);
 
-       clk_disable_unprepare(chip->base_clk);
+       clk_disable_unprepare(our_chip->base_clk);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -639,9 +629,9 @@ static int pwm_samsung_resume(struct device *dev)
 
        for (i = 0; i < SAMSUNG_PWM_NUM; i++) {
                struct pwm_device *pwm = &chip->pwms[i];
-               struct samsung_pwm_channel *chan = pwm_get_chip_data(pwm);
+               struct samsung_pwm_channel *chan = &our_chip->channel[i];
 
-               if (!chan)
+               if (!test_bit(PWMF_REQUESTED, &pwm->flags))
                        continue;
 
                if (our_chip->variant.output_mask & BIT(i))
index eabddb7c782083cefbdffeedcc0f54184ccc814a..089e50bdbbf01759cd8e49755267dd5cdefbc27d 100644 (file)
@@ -203,7 +203,6 @@ static const struct pwm_ops pwm_sifive_ops = {
        .free = pwm_sifive_free,
        .get_state = pwm_sifive_get_state,
        .apply = pwm_sifive_apply,
-       .owner = THIS_MODULE,
 };
 
 static int pwm_sifive_clock_notifier(struct notifier_block *nb,
index 9e42e3a74ad6ec6e73de13a939fb4f517396fd15..88b01ff9e4602728df2f2d1f9de2872c685b5886 100644 (file)
@@ -200,7 +200,6 @@ static int sl28cpld_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 static const struct pwm_ops sl28cpld_pwm_ops = {
        .apply = sl28cpld_pwm_apply,
        .get_state = sl28cpld_pwm_get_state,
-       .owner = THIS_MODULE,
 };
 
 static int sl28cpld_pwm_probe(struct platform_device *pdev)
index 4e1cfd8d7c037ee449b28899dd39939449c853a1..ff991319feef80a8685d22510443ee083c81afa8 100644 (file)
@@ -189,7 +189,6 @@ static int spear_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 
 static const struct pwm_ops spear_pwm_ops = {
        .apply = spear_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static int spear_pwm_probe(struct platform_device *pdev)
@@ -207,26 +206,21 @@ static int spear_pwm_probe(struct platform_device *pdev)
        if (IS_ERR(pc->mmio_base))
                return PTR_ERR(pc->mmio_base);
 
-       pc->clk = devm_clk_get(&pdev->dev, NULL);
+       pc->clk = devm_clk_get_prepared(&pdev->dev, NULL);
        if (IS_ERR(pc->clk))
-               return PTR_ERR(pc->clk);
-
-       platform_set_drvdata(pdev, pc);
+               return dev_err_probe(&pdev->dev, PTR_ERR(pc->clk),
+                                    "Failed to get clock\n");
 
        pc->chip.dev = &pdev->dev;
        pc->chip.ops = &spear_pwm_ops;
        pc->chip.npwm = NUM_PWM;
 
-       ret = clk_prepare(pc->clk);
-       if (ret)
-               return ret;
-
        if (of_device_is_compatible(np, "st,spear1340-pwm")) {
                ret = clk_enable(pc->clk);
-               if (ret) {
-                       clk_unprepare(pc->clk);
-                       return ret;
-               }
+               if (ret)
+                       return dev_err_probe(&pdev->dev, ret,
+                                            "Failed to enable clk\n");
+
                /*
                 * Following enables PWM chip, channels would still be
                 * enabled individually through their control register
@@ -238,23 +232,11 @@ static int spear_pwm_probe(struct platform_device *pdev)
                clk_disable(pc->clk);
        }
 
-       ret = pwmchip_add(&pc->chip);
-       if (ret < 0) {
-               clk_unprepare(pc->clk);
-               dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
-       }
+       ret = devm_pwmchip_add(&pdev->dev, &pc->chip);
+       if (ret < 0)
+               return dev_err_probe(&pdev->dev, ret, "pwmchip_add() failed\n");
 
-       return ret;
-}
-
-static void spear_pwm_remove(struct platform_device *pdev)
-{
-       struct spear_pwm_chip *pc = platform_get_drvdata(pdev);
-
-       pwmchip_remove(&pc->chip);
-
-       /* clk was prepared in probe, hence unprepare it here */
-       clk_unprepare(pc->clk);
+       return 0;
 }
 
 static const struct of_device_id spear_pwm_of_match[] = {
@@ -271,7 +253,6 @@ static struct platform_driver spear_pwm_driver = {
                .of_match_table = spear_pwm_of_match,
        },
        .probe = spear_pwm_probe,
-       .remove_new = spear_pwm_remove,
 };
 
 module_platform_driver(spear_pwm_driver);
index 1499c8c1fe3753a89a655d1638f5513641c3e193..77939e1610067fe1d358893ae866ed49fe84464c 100644 (file)
@@ -40,6 +40,11 @@ struct sprd_pwm_chip {
        struct sprd_pwm_chn chn[SPRD_PWM_CHN_NUM];
 };
 
+static inline struct sprd_pwm_chip* sprd_pwm_from_chip(struct pwm_chip *chip)
+{
+       return container_of(chip, struct sprd_pwm_chip, chip);
+}
+
 /*
  * The list of clocks required by PWM channels, and each channel has 2 clocks:
  * enable clock and pwm clock.
@@ -69,8 +74,7 @@ static void sprd_pwm_write(struct sprd_pwm_chip *spc, u32 hwid,
 static int sprd_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
                              struct pwm_state *state)
 {
-       struct sprd_pwm_chip *spc =
-               container_of(chip, struct sprd_pwm_chip, chip);
+       struct sprd_pwm_chip *spc = sprd_pwm_from_chip(chip);
        struct sprd_pwm_chn *chn = &spc->chn[pwm->hwpwm];
        u32 val, duty, prescale;
        u64 tmp;
@@ -162,8 +166,7 @@ static int sprd_pwm_config(struct sprd_pwm_chip *spc, struct pwm_device *pwm,
 static int sprd_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
                          const struct pwm_state *state)
 {
-       struct sprd_pwm_chip *spc =
-               container_of(chip, struct sprd_pwm_chip, chip);
+       struct sprd_pwm_chip *spc = sprd_pwm_from_chip(chip);
        struct sprd_pwm_chn *chn = &spc->chn[pwm->hwpwm];
        struct pwm_state *cstate = &pwm->state;
        int ret;
@@ -210,7 +213,6 @@ static int sprd_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 static const struct pwm_ops sprd_pwm_ops = {
        .apply = sprd_pwm_apply,
        .get_state = sprd_pwm_get_state,
-       .owner = THIS_MODULE,
 };
 
 static int sprd_pwm_clk_init(struct sprd_pwm_chip *spc)
@@ -240,10 +242,8 @@ static int sprd_pwm_clk_init(struct sprd_pwm_chip *spc)
                chn->clk_rate = clk_get_rate(clk_pwm);
        }
 
-       if (!i) {
-               dev_err(spc->dev, "no available PWM channels\n");
-               return -ENODEV;
-       }
+       if (!i)
+               return dev_err_probe(spc->dev, -ENODEV, "no available PWM channels\n");
 
        spc->num_pwms = i;
 
@@ -264,7 +264,6 @@ static int sprd_pwm_probe(struct platform_device *pdev)
                return PTR_ERR(spc->base);
 
        spc->dev = &pdev->dev;
-       platform_set_drvdata(pdev, spc);
 
        ret = sprd_pwm_clk_init(spc);
        if (ret)
@@ -274,20 +273,13 @@ static int sprd_pwm_probe(struct platform_device *pdev)
        spc->chip.ops = &sprd_pwm_ops;
        spc->chip.npwm = spc->num_pwms;
 
-       ret = pwmchip_add(&spc->chip);
+       ret = devm_pwmchip_add(&pdev->dev, &spc->chip);
        if (ret)
                dev_err(&pdev->dev, "failed to add PWM chip\n");
 
        return ret;
 }
 
-static void sprd_pwm_remove(struct platform_device *pdev)
-{
-       struct sprd_pwm_chip *spc = platform_get_drvdata(pdev);
-
-       pwmchip_remove(&spc->chip);
-}
-
 static const struct of_device_id sprd_pwm_of_match[] = {
        { .compatible = "sprd,ums512-pwm", },
        { },
@@ -300,7 +292,6 @@ static struct platform_driver sprd_pwm_driver = {
                .of_match_table = sprd_pwm_of_match,
        },
        .probe = sprd_pwm_probe,
-       .remove_new = sprd_pwm_remove,
 };
 
 module_platform_driver(sprd_pwm_driver);
index b1d1373648a38fb39cfa9f263ba53c13564a6f3b..dc92cea31cd0782021c13fe7064e249b111a4efb 100644 (file)
@@ -79,6 +79,7 @@ struct sti_pwm_compat_data {
        unsigned int cpt_num_devs;
        unsigned int max_pwm_cnt;
        unsigned int max_prescale;
+       struct sti_cpt_ddata *ddata;
 };
 
 struct sti_pwm_chip {
@@ -314,7 +315,7 @@ static int sti_pwm_capture(struct pwm_chip *chip, struct pwm_device *pwm,
 {
        struct sti_pwm_chip *pc = to_sti_pwmchip(chip);
        struct sti_pwm_compat_data *cdata = pc->cdata;
-       struct sti_cpt_ddata *ddata = pwm_get_chip_data(pwm);
+       struct sti_cpt_ddata *ddata = &cdata->ddata[pwm->hwpwm];
        struct device *dev = pc->dev;
        unsigned int effective_ticks;
        unsigned long long high, low;
@@ -420,7 +421,6 @@ static const struct pwm_ops sti_pwm_ops = {
        .capture = sti_pwm_capture,
        .apply = sti_pwm_apply,
        .free = sti_pwm_free,
-       .owner = THIS_MODULE,
 };
 
 static irqreturn_t sti_pwm_interrupt(int irq, void *data)
@@ -440,7 +440,7 @@ static irqreturn_t sti_pwm_interrupt(int irq, void *data)
        while (cpt_int_stat) {
                devicenum = ffs(cpt_int_stat) - 1;
 
-               ddata = pwm_get_chip_data(&pc->chip.pwms[devicenum]);
+               ddata = &pc->cdata->ddata[devicenum];
 
                /*
                 * Capture input:
@@ -638,30 +638,28 @@ static int sti_pwm_probe(struct platform_device *pdev)
                        dev_err(dev, "failed to prepare clock\n");
                        return ret;
                }
+
+               cdata->ddata = devm_kzalloc(dev, cdata->cpt_num_devs * sizeof(*cdata->ddata), GFP_KERNEL);
+               if (!cdata->ddata)
+                       return -ENOMEM;
        }
 
        pc->chip.dev = dev;
        pc->chip.ops = &sti_pwm_ops;
        pc->chip.npwm = pc->cdata->pwm_num_devs;
 
-       ret = pwmchip_add(&pc->chip);
-       if (ret < 0) {
-               clk_unprepare(pc->pwm_clk);
-               clk_unprepare(pc->cpt_clk);
-               return ret;
-       }
-
        for (i = 0; i < cdata->cpt_num_devs; i++) {
-               struct sti_cpt_ddata *ddata;
-
-               ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
-               if (!ddata)
-                       return -ENOMEM;
+               struct sti_cpt_ddata *ddata = &cdata->ddata[i];
 
                init_waitqueue_head(&ddata->wait);
                mutex_init(&ddata->lock);
+       }
 
-               pwm_set_chip_data(&pc->chip.pwms[i], ddata);
+       ret = pwmchip_add(&pc->chip);
+       if (ret < 0) {
+               clk_unprepare(pc->pwm_clk);
+               clk_unprepare(pc->cpt_clk);
+               return ret;
        }
 
        platform_set_drvdata(pdev, pc);
index bb3a045a7334303941f277e07f70e14ece98180a..b67974cc18725189fd1bbc33e1449a68d035757a 100644 (file)
@@ -189,7 +189,6 @@ static int stm32_pwm_lp_get_state(struct pwm_chip *chip,
 }
 
 static const struct pwm_ops stm32_pwm_lp_ops = {
-       .owner = THIS_MODULE,
        .apply = stm32_pwm_lp_apply,
        .get_state = stm32_pwm_lp_get_state,
 };
index 3d6be7749e23142a7bdd9cda6c4640f3f6ff0246..3303a754ea020fb3300c1e9e2c46346a4709744f 100644 (file)
@@ -487,7 +487,6 @@ static int stm32_pwm_apply_locked(struct pwm_chip *chip, struct pwm_device *pwm,
 }
 
 static const struct pwm_ops stm32pwm_ops = {
-       .owner = THIS_MODULE,
        .apply = stm32_pwm_apply_locked,
        .capture = IS_ENABLED(CONFIG_DMA_ENGINE) ? stm32_pwm_capture : NULL,
 };
index e205405c4828b81561199b2d9704efa75e19f022..a46f5b4dd81622ce3651f6c35c5fc4ff0b91b83a 100644 (file)
@@ -287,7 +287,6 @@ static int stmpe_24xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 
 static const struct pwm_ops stmpe_24xx_pwm_ops = {
        .apply = stmpe_24xx_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static int __init stmpe_pwm_probe(struct platform_device *pdev)
index c84fcf1a13dc2c4d2aad25a6edcca4958d4f9f66..1a439025540d45a0cbf979cb46f8cd92bb1a691a 100644 (file)
@@ -325,7 +325,6 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 static const struct pwm_ops sun4i_pwm_ops = {
        .apply = sun4i_pwm_apply,
        .get_state = sun4i_pwm_get_state,
-       .owner = THIS_MODULE,
 };
 
 static const struct sun4i_pwm_data sun4i_pwm_dual_nobypass = {
index 7705c7b86c3a3066544bcabf8787133d8a8d6231..773e2f80526e891d6ec8feceb69f05d090bba171 100644 (file)
@@ -163,7 +163,6 @@ static int sunplus_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
 static const struct pwm_ops sunplus_pwm_ops = {
        .apply = sunplus_pwm_apply,
        .get_state = sunplus_pwm_get_state,
-       .owner = THIS_MODULE,
 };
 
 static void sunplus_pwm_clk_release(void *data)
index a169a34e07781683bf0d68de9ffa475ca12cb43d..39ea51e08c946d0ec24a7cc311fc930b3ca1b823 100644 (file)
@@ -268,7 +268,6 @@ static int tegra_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 
 static const struct pwm_ops tegra_pwm_ops = {
        .apply = tegra_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static int tegra_pwm_probe(struct platform_device *pdev)
index 8c94b266c1b2a0ee4b44a820e800ca5fa496a6ff..11e3549cf103445cfd4f6f6de5eca55769ecd1e5 100644 (file)
@@ -205,7 +205,6 @@ static int ecap_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 
 static const struct pwm_ops ecap_pwm_ops = {
        .apply = ecap_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static const struct of_device_id ecap_of_match[] = {
index ecbfd7e954ecb8a1feb10c3c2cadb156e256ac4a..66ac2655845f783a20bf5efd7f974f061ee5ee25 100644 (file)
@@ -437,7 +437,6 @@ static int ehrpwm_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 static const struct pwm_ops ehrpwm_pwm_ops = {
        .free = ehrpwm_pwm_free,
        .apply = ehrpwm_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static const struct of_device_id ehrpwm_of_match[] = {
index 8fb84b4418538023129e3288f280ed7940263912..625233f4703a972462c2d096bd1b7ca79c1fb5db 100644 (file)
@@ -189,7 +189,6 @@ static int twl4030_pwmled_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 
 static const struct pwm_ops twl4030_pwmled_ops = {
        .apply = twl4030_pwmled_apply,
-       .owner = THIS_MODULE,
 };
 
 static int twl6030_pwmled_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -342,7 +341,6 @@ static const struct pwm_ops twl6030_pwmled_ops = {
        .apply = twl6030_pwmled_apply,
        .request = twl6030_pwmled_request,
        .free = twl6030_pwmled_free,
-       .owner = THIS_MODULE,
 };
 
 static int twl_pwmled_probe(struct platform_device *pdev)
index 86567add79dbe5df1435c9f29f1575aa4da7e4b7..603d31f27470f6b80b6cc347b2fcc44d0d855bb6 100644 (file)
@@ -333,12 +333,10 @@ static const struct pwm_ops twl4030_pwm_ops = {
        .apply = twl4030_pwm_apply,
        .request = twl4030_pwm_request,
        .free = twl4030_pwm_free,
-       .owner = THIS_MODULE,
 };
 
 static const struct pwm_ops twl6030_pwm_ops = {
        .apply = twl6030_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static int twl_pwm_probe(struct platform_device *pdev)
index 7f7591a2384c558e81d81b609cb665891a656779..8d736d5581221614b92a7037bba93827b8345577 100644 (file)
@@ -129,7 +129,6 @@ static int visconti_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
 static const struct pwm_ops visconti_pwm_ops = {
        .apply = visconti_pwm_apply,
        .get_state = visconti_pwm_get_state,
-       .owner = THIS_MODULE,
 };
 
 static int visconti_pwm_probe(struct platform_device *pdev)
index 6d46db51daaccdf0cb124d34f172dfe9ff7be9d6..5568d5312d3caf4d148692fd4a383053c1576747 100644 (file)
@@ -221,7 +221,6 @@ static int vt8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
 
 static const struct pwm_ops vt8500_pwm_ops = {
        .apply = vt8500_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static const struct of_device_id vt8500_pwm_dt_ids[] = {
@@ -236,10 +235,8 @@ static int vt8500_pwm_probe(struct platform_device *pdev)
        struct device_node *np = pdev->dev.of_node;
        int ret;
 
-       if (!np) {
-               dev_err(&pdev->dev, "invalid devicetree node\n");
-               return -EINVAL;
-       }
+       if (!np)
+               return dev_err_probe(&pdev->dev, -EINVAL, "invalid devicetree node\n");
 
        vt8500 = devm_kzalloc(&pdev->dev, sizeof(*vt8500), GFP_KERNEL);
        if (vt8500 == NULL)
@@ -249,45 +246,23 @@ static int vt8500_pwm_probe(struct platform_device *pdev)
        vt8500->chip.ops = &vt8500_pwm_ops;
        vt8500->chip.npwm = VT8500_NR_PWMS;
 
-       vt8500->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(vt8500->clk)) {
-               dev_err(&pdev->dev, "clock source not specified\n");
-               return PTR_ERR(vt8500->clk);
-       }
+       vt8500->clk = devm_clk_get_prepared(&pdev->dev, NULL);
+       if (IS_ERR(vt8500->clk))
+               return dev_err_probe(&pdev->dev, PTR_ERR(vt8500->clk), "clock source not specified\n");
 
        vt8500->base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(vt8500->base))
                return PTR_ERR(vt8500->base);
 
-       ret = clk_prepare(vt8500->clk);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "failed to prepare clock\n");
-               return ret;
-       }
-
-       ret = pwmchip_add(&vt8500->chip);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "failed to add PWM chip\n");
-               clk_unprepare(vt8500->clk);
-               return ret;
-       }
-
-       platform_set_drvdata(pdev, vt8500);
-       return ret;
-}
-
-static void vt8500_pwm_remove(struct platform_device *pdev)
-{
-       struct vt8500_chip *vt8500 = platform_get_drvdata(pdev);
-
-       pwmchip_remove(&vt8500->chip);
+       ret = devm_pwmchip_add(&pdev->dev, &vt8500->chip);
+       if (ret < 0)
+               return dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n");
 
-       clk_unprepare(vt8500->clk);
+       return 0;
 }
 
 static struct platform_driver vt8500_pwm_driver = {
        .probe          = vt8500_pwm_probe,
-       .remove_new     = vt8500_pwm_remove,
        .driver         = {
                .name   = "vt8500-pwm",
                .of_match_table = vt8500_pwm_dt_ids,
index 85153ee90809c757144b2422b77853adb160c79c..5f3c2a6fed11c34631ebd26d9131dd9b812a79bb 100644 (file)
@@ -198,7 +198,6 @@ static int xilinx_pwm_get_state(struct pwm_chip *chip,
 static const struct pwm_ops xilinx_pwm_ops = {
        .apply = xilinx_pwm_apply,
        .get_state = xilinx_pwm_get_state,
-       .owner = THIS_MODULE,
 };
 
 static const struct regmap_config xilinx_pwm_regmap_config = {
index ea6fa1100a00b37316bbb1882e10dae543ee049e..6d7736a031f7fc9db12281cbd8dc6e898edf1dc3 100644 (file)
@@ -47,6 +47,7 @@
 #define MT8192_SCP2SPM_IPC_CLR         0x4094
 #define MT8192_GIPC_IN_SET             0x4098
 #define MT8192_HOST_IPC_INT_BIT                BIT(0)
+#define MT8195_CORE1_HOST_IPC_INT_BIT  BIT(4)
 
 #define MT8192_CORE0_SW_RSTN_CLR       0x10000
 #define MT8192_CORE0_SW_RSTN_SET       0x10004
 #define MT8192_CORE0_WDT_IRQ           0x10030
 #define MT8192_CORE0_WDT_CFG           0x10034
 
+#define MT8195_SYS_STATUS              0x4004
+#define MT8195_CORE0_WDT               BIT(16)
+#define MT8195_CORE1_WDT               BIT(17)
+
 #define MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS                GENMASK(7, 4)
 
+#define MT8195_CPU1_SRAM_PD                    0x1084
+#define MT8195_SSHUB2APMCU_IPC_SET             0x4088
+#define MT8195_SSHUB2APMCU_IPC_CLR             0x408C
+#define MT8195_CORE1_SW_RSTN_CLR               0x20000
+#define MT8195_CORE1_SW_RSTN_SET               0x20004
+#define MT8195_CORE1_MEM_ATT_PREDEF            0x20008
+#define MT8195_CORE1_WDT_IRQ                   0x20030
+#define MT8195_CORE1_WDT_CFG                   0x20034
+
+#define MT8195_SEC_CTRL                                0x85000
+#define MT8195_CORE_OFFSET_ENABLE_D            BIT(13)
+#define MT8195_CORE_OFFSET_ENABLE_I            BIT(12)
+#define MT8195_L2TCM_OFFSET_RANGE_0_LOW                0x850b0
+#define MT8195_L2TCM_OFFSET_RANGE_0_HIGH       0x850b4
+#define MT8195_L2TCM_OFFSET                    0x850d0
+
 #define SCP_FW_VER_LEN                 32
 #define SCP_SHARE_BUFFER_SIZE          288
 
@@ -91,17 +112,24 @@ struct mtk_scp_of_data {
        size_t ipi_buf_offset;
 };
 
+struct mtk_scp_of_cluster {
+       void __iomem *reg_base;
+       void __iomem *l1tcm_base;
+       size_t l1tcm_size;
+       phys_addr_t l1tcm_phys;
+       struct list_head mtk_scp_list;
+       /* Prevent concurrent operations of this structure and L2TCM power control. */
+       struct mutex cluster_lock;
+       u32 l2tcm_refcnt;
+};
+
 struct mtk_scp {
        struct device *dev;
        struct rproc *rproc;
        struct clk *clk;
-       void __iomem *reg_base;
        void __iomem *sram_base;
        size_t sram_size;
        phys_addr_t sram_phys;
-       void __iomem *l1tcm_base;
-       size_t l1tcm_size;
-       phys_addr_t l1tcm_phys;
 
        const struct mtk_scp_of_data *data;
 
@@ -119,6 +147,9 @@ struct mtk_scp {
        size_t dram_size;
 
        struct rproc_subdev *rpmsg_subdev;
+
+       struct list_head elem;
+       struct mtk_scp_of_cluster *cluster;
 };
 
 /**
index dcc94ee2458d8ef5f415c0ede6fdc144b7834168..a35409eda0cf2b2380bf74f59a7bef642bab1838 100644 (file)
@@ -68,8 +68,14 @@ EXPORT_SYMBOL_GPL(scp_put);
 
 static void scp_wdt_handler(struct mtk_scp *scp, u32 scp_to_host)
 {
+       struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
+       struct mtk_scp *scp_node;
+
        dev_err(scp->dev, "SCP watchdog timeout! 0x%x", scp_to_host);
-       rproc_report_crash(scp->rproc, RPROC_WATCHDOG);
+
+       /* report watchdog timeout to all cores */
+       list_for_each_entry(scp_node, &scp_cluster->mtk_scp_list, elem)
+               rproc_report_crash(scp_node->rproc, RPROC_WATCHDOG);
 }
 
 static void scp_init_ipi_handler(void *data, unsigned int len, void *priv)
@@ -106,7 +112,7 @@ static void scp_ipi_handler(struct mtk_scp *scp)
        scp_ipi_lock(scp, id);
        handler = ipi_desc[id].handler;
        if (!handler) {
-               dev_err(scp->dev, "No such ipi id = %d\n", id);
+               dev_err(scp->dev, "No handler for ipi id = %d\n", id);
                scp_ipi_unlock(scp, id);
                return;
        }
@@ -152,35 +158,45 @@ static void mt8183_scp_reset_assert(struct mtk_scp *scp)
 {
        u32 val;
 
-       val = readl(scp->reg_base + MT8183_SW_RSTN);
+       val = readl(scp->cluster->reg_base + MT8183_SW_RSTN);
        val &= ~MT8183_SW_RSTN_BIT;
-       writel(val, scp->reg_base + MT8183_SW_RSTN);
+       writel(val, scp->cluster->reg_base + MT8183_SW_RSTN);
 }
 
 static void mt8183_scp_reset_deassert(struct mtk_scp *scp)
 {
        u32 val;
 
-       val = readl(scp->reg_base + MT8183_SW_RSTN);
+       val = readl(scp->cluster->reg_base + MT8183_SW_RSTN);
        val |= MT8183_SW_RSTN_BIT;
-       writel(val, scp->reg_base + MT8183_SW_RSTN);
+       writel(val, scp->cluster->reg_base + MT8183_SW_RSTN);
 }
 
 static void mt8192_scp_reset_assert(struct mtk_scp *scp)
 {
-       writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
+       writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
 }
 
 static void mt8192_scp_reset_deassert(struct mtk_scp *scp)
 {
-       writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_CLR);
+       writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_CLR);
+}
+
+static void mt8195_scp_c1_reset_assert(struct mtk_scp *scp)
+{
+       writel(1, scp->cluster->reg_base + MT8195_CORE1_SW_RSTN_SET);
+}
+
+static void mt8195_scp_c1_reset_deassert(struct mtk_scp *scp)
+{
+       writel(1, scp->cluster->reg_base + MT8195_CORE1_SW_RSTN_CLR);
 }
 
 static void mt8183_scp_irq_handler(struct mtk_scp *scp)
 {
        u32 scp_to_host;
 
-       scp_to_host = readl(scp->reg_base + MT8183_SCP_TO_HOST);
+       scp_to_host = readl(scp->cluster->reg_base + MT8183_SCP_TO_HOST);
        if (scp_to_host & MT8183_SCP_IPC_INT_BIT)
                scp_ipi_handler(scp);
        else
@@ -188,14 +204,14 @@ static void mt8183_scp_irq_handler(struct mtk_scp *scp)
 
        /* SCP won't send another interrupt until we set SCP_TO_HOST to 0. */
        writel(MT8183_SCP_IPC_INT_BIT | MT8183_SCP_WDT_INT_BIT,
-              scp->reg_base + MT8183_SCP_TO_HOST);
+              scp->cluster->reg_base + MT8183_SCP_TO_HOST);
 }
 
 static void mt8192_scp_irq_handler(struct mtk_scp *scp)
 {
        u32 scp_to_host;
 
-       scp_to_host = readl(scp->reg_base + MT8192_SCP2APMCU_IPC_SET);
+       scp_to_host = readl(scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_SET);
 
        if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
                scp_ipi_handler(scp);
@@ -205,13 +221,48 @@ static void mt8192_scp_irq_handler(struct mtk_scp *scp)
                 * MT8192_SCP2APMCU_IPC.
                 */
                writel(MT8192_SCP_IPC_INT_BIT,
-                      scp->reg_base + MT8192_SCP2APMCU_IPC_CLR);
+                      scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_CLR);
        } else {
                scp_wdt_handler(scp, scp_to_host);
-               writel(1, scp->reg_base + MT8192_CORE0_WDT_IRQ);
+               writel(1, scp->cluster->reg_base + MT8192_CORE0_WDT_IRQ);
        }
 }
 
+static void mt8195_scp_irq_handler(struct mtk_scp *scp)
+{
+       u32 scp_to_host;
+
+       scp_to_host = readl(scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_SET);
+
+       if (scp_to_host & MT8192_SCP_IPC_INT_BIT) {
+               scp_ipi_handler(scp);
+       } else {
+               u32 reason = readl(scp->cluster->reg_base + MT8195_SYS_STATUS);
+
+               if (reason & MT8195_CORE0_WDT)
+                       writel(1, scp->cluster->reg_base + MT8192_CORE0_WDT_IRQ);
+
+               if (reason & MT8195_CORE1_WDT)
+                       writel(1, scp->cluster->reg_base + MT8195_CORE1_WDT_IRQ);
+
+               scp_wdt_handler(scp, reason);
+       }
+
+       writel(scp_to_host, scp->cluster->reg_base + MT8192_SCP2APMCU_IPC_CLR);
+}
+
+static void mt8195_scp_c1_irq_handler(struct mtk_scp *scp)
+{
+       u32 scp_to_host;
+
+       scp_to_host = readl(scp->cluster->reg_base + MT8195_SSHUB2APMCU_IPC_SET);
+
+       if (scp_to_host & MT8192_SCP_IPC_INT_BIT)
+               scp_ipi_handler(scp);
+
+       writel(scp_to_host, scp->cluster->reg_base + MT8195_SSHUB2APMCU_IPC_CLR);
+}
+
 static irqreturn_t scp_irq_handler(int irq, void *priv)
 {
        struct mtk_scp *scp = priv;
@@ -341,26 +392,26 @@ static int mt8195_scp_clk_get(struct mtk_scp *scp)
 static int mt8183_scp_before_load(struct mtk_scp *scp)
 {
        /* Clear SCP to host interrupt */
-       writel(MT8183_SCP_IPC_INT_BIT, scp->reg_base + MT8183_SCP_TO_HOST);
+       writel(MT8183_SCP_IPC_INT_BIT, scp->cluster->reg_base + MT8183_SCP_TO_HOST);
 
        /* Reset clocks before loading FW */
-       writel(0x0, scp->reg_base + MT8183_SCP_CLK_SW_SEL);
-       writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL);
+       writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_SW_SEL);
+       writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_DIV_SEL);
 
        /* Initialize TCM before loading FW. */
-       writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD);
-       writel(0x0, scp->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
+       writel(0x0, scp->cluster->reg_base + MT8183_SCP_L1_SRAM_PD);
+       writel(0x0, scp->cluster->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
 
        /* Turn on the power of SCP's SRAM before using it. */
-       writel(0x0, scp->reg_base + MT8183_SCP_SRAM_PDN);
+       writel(0x0, scp->cluster->reg_base + MT8183_SCP_SRAM_PDN);
 
        /*
         * Set I-cache and D-cache size before loading SCP FW.
         * SCP SRAM logical address may change when cache size setting differs.
         */
        writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
-              scp->reg_base + MT8183_SCP_CACHE_CON);
-       writel(MT8183_SCP_CACHESIZE_8KB, scp->reg_base + MT8183_SCP_DCACHE_CON);
+              scp->cluster->reg_base + MT8183_SCP_CACHE_CON);
+       writel(MT8183_SCP_CACHESIZE_8KB, scp->cluster->reg_base + MT8183_SCP_DCACHE_CON);
 
        return 0;
 }
@@ -386,28 +437,28 @@ static void scp_sram_power_off(void __iomem *addr, u32 reserved_mask)
 static int mt8186_scp_before_load(struct mtk_scp *scp)
 {
        /* Clear SCP to host interrupt */
-       writel(MT8183_SCP_IPC_INT_BIT, scp->reg_base + MT8183_SCP_TO_HOST);
+       writel(MT8183_SCP_IPC_INT_BIT, scp->cluster->reg_base + MT8183_SCP_TO_HOST);
 
        /* Reset clocks before loading FW */
-       writel(0x0, scp->reg_base + MT8183_SCP_CLK_SW_SEL);
-       writel(0x0, scp->reg_base + MT8183_SCP_CLK_DIV_SEL);
+       writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_SW_SEL);
+       writel(0x0, scp->cluster->reg_base + MT8183_SCP_CLK_DIV_SEL);
 
        /* Turn on the power of SCP's SRAM before using it. Enable 1 block per time*/
-       scp_sram_power_on(scp->reg_base + MT8183_SCP_SRAM_PDN, 0);
+       scp_sram_power_on(scp->cluster->reg_base + MT8183_SCP_SRAM_PDN, 0);
 
        /* Initialize TCM before loading FW. */
-       writel(0x0, scp->reg_base + MT8183_SCP_L1_SRAM_PD);
-       writel(0x0, scp->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
-       writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_P1);
-       writel(0x0, scp->reg_base + MT8186_SCP_L1_SRAM_PD_p2);
+       writel(0x0, scp->cluster->reg_base + MT8183_SCP_L1_SRAM_PD);
+       writel(0x0, scp->cluster->reg_base + MT8183_SCP_TCM_TAIL_SRAM_PD);
+       writel(0x0, scp->cluster->reg_base + MT8186_SCP_L1_SRAM_PD_P1);
+       writel(0x0, scp->cluster->reg_base + MT8186_SCP_L1_SRAM_PD_p2);
 
        /*
         * Set I-cache and D-cache size before loading SCP FW.
         * SCP SRAM logical address may change when cache size setting differs.
         */
        writel(MT8183_SCP_CACHE_CON_WAYEN | MT8183_SCP_CACHESIZE_8KB,
-              scp->reg_base + MT8183_SCP_CACHE_CON);
-       writel(MT8183_SCP_CACHESIZE_8KB, scp->reg_base + MT8183_SCP_DCACHE_CON);
+              scp->cluster->reg_base + MT8183_SCP_CACHE_CON);
+       writel(MT8183_SCP_CACHESIZE_8KB, scp->cluster->reg_base + MT8183_SCP_DCACHE_CON);
 
        return 0;
 }
@@ -415,40 +466,100 @@ static int mt8186_scp_before_load(struct mtk_scp *scp)
 static int mt8192_scp_before_load(struct mtk_scp *scp)
 {
        /* clear SPM interrupt, SCP2SPM_IPC_CLR */
-       writel(0xff, scp->reg_base + MT8192_SCP2SPM_IPC_CLR);
+       writel(0xff, scp->cluster->reg_base + MT8192_SCP2SPM_IPC_CLR);
 
-       writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
+       writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
 
        /* enable SRAM clock */
-       scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
-       scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
-       scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
-       scp_sram_power_on(scp->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
-       scp_sram_power_on(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
+       scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+       scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+       scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+       scp_sram_power_on(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
+       scp_sram_power_on(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 
        /* enable MPU for all memory regions */
-       writel(0xff, scp->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
+       writel(0xff, scp->cluster->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
+
+       return 0;
+}
+
+static int mt8195_scp_l2tcm_on(struct mtk_scp *scp)
+{
+       struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
+
+       mutex_lock(&scp_cluster->cluster_lock);
+
+       if (scp_cluster->l2tcm_refcnt == 0) {
+               /* clear SPM interrupt, SCP2SPM_IPC_CLR */
+               writel(0xff, scp->cluster->reg_base + MT8192_SCP2SPM_IPC_CLR);
+
+               /* Power on L2TCM */
+               scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+               scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+               scp_sram_power_on(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+               scp_sram_power_on(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN,
+                                 MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
+       }
+
+       scp_cluster->l2tcm_refcnt += 1;
+
+       mutex_unlock(&scp_cluster->cluster_lock);
 
        return 0;
 }
 
 static int mt8195_scp_before_load(struct mtk_scp *scp)
 {
-       /* clear SPM interrupt, SCP2SPM_IPC_CLR */
-       writel(0xff, scp->reg_base + MT8192_SCP2SPM_IPC_CLR);
+       writel(1, scp->cluster->reg_base + MT8192_CORE0_SW_RSTN_SET);
 
-       writel(1, scp->reg_base + MT8192_CORE0_SW_RSTN_SET);
+       mt8195_scp_l2tcm_on(scp);
 
-       /* enable SRAM clock */
-       scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
-       scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
-       scp_sram_power_on(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
-       scp_sram_power_on(scp->reg_base + MT8192_L1TCM_SRAM_PDN,
-                         MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
-       scp_sram_power_on(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
+       scp_sram_power_on(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 
        /* enable MPU for all memory regions */
-       writel(0xff, scp->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
+       writel(0xff, scp->cluster->reg_base + MT8192_CORE0_MEM_ATT_PREDEF);
+
+       return 0;
+}
+
+static int mt8195_scp_c1_before_load(struct mtk_scp *scp)
+{
+       u32 sec_ctrl;
+       struct mtk_scp *scp_c0;
+       struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
+
+       scp->data->scp_reset_assert(scp);
+
+       mt8195_scp_l2tcm_on(scp);
+
+       scp_sram_power_on(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0);
+
+       /* enable MPU for all memory regions */
+       writel(0xff, scp->cluster->reg_base + MT8195_CORE1_MEM_ATT_PREDEF);
+
+       /*
+        * The L2TCM_OFFSET_RANGE and L2TCM_OFFSET shift the destination address
+        * on SRAM when SCP core 1 accesses SRAM.
+        *
+        * This configuration solves booting the SCP core 0 and core 1 from
+        * different SRAM address because core 0 and core 1 both boot from
+        * the head of SRAM by default. this must be configured before boot SCP core 1.
+        *
+        * The value of L2TCM_OFFSET_RANGE is from the viewpoint of SCP core 1.
+        * When SCP core 1 issues address within the range (L2TCM_OFFSET_RANGE),
+        * the address will be added with a fixed offset (L2TCM_OFFSET) on the bus.
+        * The shift action is tranparent to software.
+        */
+       writel(0, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_LOW);
+       writel(scp->sram_size, scp->cluster->reg_base + MT8195_L2TCM_OFFSET_RANGE_0_HIGH);
+
+       scp_c0 = list_first_entry(&scp_cluster->mtk_scp_list, struct mtk_scp, elem);
+       writel(scp->sram_phys - scp_c0->sram_phys, scp->cluster->reg_base + MT8195_L2TCM_OFFSET);
+
+       /* enable SRAM offset when fetching instruction and data */
+       sec_ctrl = readl(scp->cluster->reg_base + MT8195_SEC_CTRL);
+       sec_ctrl |= MT8195_CORE_OFFSET_ENABLE_I | MT8195_CORE_OFFSET_ENABLE_D;
+       writel(sec_ctrl, scp->cluster->reg_base + MT8195_SEC_CTRL);
 
        return 0;
 }
@@ -567,11 +678,11 @@ static void *mt8192_scp_da_to_va(struct mtk_scp *scp, u64 da, size_t len)
        }
 
        /* optional memory region */
-       if (scp->l1tcm_size &&
-           da >= scp->l1tcm_phys &&
-           (da + len) <= scp->l1tcm_phys + scp->l1tcm_size) {
-               offset = da - scp->l1tcm_phys;
-               return (void __force *)scp->l1tcm_base + offset;
+       if (scp->cluster->l1tcm_size &&
+           da >= scp->cluster->l1tcm_phys &&
+           (da + len) <= scp->cluster->l1tcm_phys + scp->cluster->l1tcm_size) {
+               offset = da - scp->cluster->l1tcm_phys;
+               return (void __force *)scp->cluster->l1tcm_base + offset;
        }
 
        /* optional memory region */
@@ -595,34 +706,62 @@ static void *scp_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iome
 static void mt8183_scp_stop(struct mtk_scp *scp)
 {
        /* Disable SCP watchdog */
-       writel(0, scp->reg_base + MT8183_WDT_CFG);
+       writel(0, scp->cluster->reg_base + MT8183_WDT_CFG);
 }
 
 static void mt8192_scp_stop(struct mtk_scp *scp)
 {
        /* Disable SRAM clock */
-       scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
-       scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
-       scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
-       scp_sram_power_off(scp->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
-       scp_sram_power_off(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
+       scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+       scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+       scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+       scp_sram_power_off(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN, 0);
+       scp_sram_power_off(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
 
        /* Disable SCP watchdog */
-       writel(0, scp->reg_base + MT8192_CORE0_WDT_CFG);
+       writel(0, scp->cluster->reg_base + MT8192_CORE0_WDT_CFG);
+}
+
+static void mt8195_scp_l2tcm_off(struct mtk_scp *scp)
+{
+       struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
+
+       mutex_lock(&scp_cluster->cluster_lock);
+
+       if (scp_cluster->l2tcm_refcnt > 0)
+               scp_cluster->l2tcm_refcnt -= 1;
+
+       if (scp_cluster->l2tcm_refcnt == 0) {
+               /* Power off L2TCM */
+               scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
+               scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
+               scp_sram_power_off(scp->cluster->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
+               scp_sram_power_off(scp->cluster->reg_base + MT8192_L1TCM_SRAM_PDN,
+                                  MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
+       }
+
+       mutex_unlock(&scp_cluster->cluster_lock);
 }
 
 static void mt8195_scp_stop(struct mtk_scp *scp)
 {
-       /* Disable SRAM clock */
-       scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_0, 0);
-       scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_1, 0);
-       scp_sram_power_off(scp->reg_base + MT8192_L2TCM_SRAM_PD_2, 0);
-       scp_sram_power_off(scp->reg_base + MT8192_L1TCM_SRAM_PDN,
-                          MT8195_L1TCM_SRAM_PDN_RESERVED_RSI_BITS);
-       scp_sram_power_off(scp->reg_base + MT8192_CPU0_SRAM_PD, 0);
+       mt8195_scp_l2tcm_off(scp);
+
+       scp_sram_power_off(scp->cluster->reg_base + MT8192_CPU0_SRAM_PD, 0);
+
+       /* Disable SCP watchdog */
+       writel(0, scp->cluster->reg_base + MT8192_CORE0_WDT_CFG);
+}
+
+static void mt8195_scp_c1_stop(struct mtk_scp *scp)
+{
+       mt8195_scp_l2tcm_off(scp);
+
+       /* Power off CPU SRAM */
+       scp_sram_power_off(scp->cluster->reg_base + MT8195_CPU1_SRAM_PD, 0);
 
        /* Disable SCP watchdog */
-       writel(0, scp->reg_base + MT8192_CORE0_WDT_CFG);
+       writel(0, scp->cluster->reg_base + MT8195_CORE1_WDT_CFG);
 }
 
 static int scp_stop(struct rproc *rproc)
@@ -811,7 +950,9 @@ static void scp_remove_rpmsg_subdev(struct mtk_scp *scp)
        }
 }
 
-static int scp_probe(struct platform_device *pdev)
+static struct mtk_scp *scp_rproc_init(struct platform_device *pdev,
+                                     struct mtk_scp_of_cluster *scp_cluster,
+                                     const struct mtk_scp_of_data *of_data)
 {
        struct device *dev = &pdev->dev;
        struct device_node *np = dev->of_node;
@@ -823,52 +964,38 @@ static int scp_probe(struct platform_device *pdev)
 
        ret = rproc_of_parse_firmware(dev, 0, &fw_name);
        if (ret < 0 && ret != -EINVAL)
-               return ret;
+               return ERR_PTR(ret);
 
        rproc = devm_rproc_alloc(dev, np->name, &scp_ops, fw_name, sizeof(*scp));
-       if (!rproc)
-               return dev_err_probe(dev, -ENOMEM, "unable to allocate remoteproc\n");
+       if (!rproc) {
+               dev_err(dev, "unable to allocate remoteproc\n");
+               return ERR_PTR(-ENOMEM);
+       }
 
        scp = rproc->priv;
        scp->rproc = rproc;
        scp->dev = dev;
-       scp->data = of_device_get_match_data(dev);
+       scp->data = of_data;
+       scp->cluster = scp_cluster;
        platform_set_drvdata(pdev, scp);
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sram");
        scp->sram_base = devm_ioremap_resource(dev, res);
-       if (IS_ERR(scp->sram_base))
-               return dev_err_probe(dev, PTR_ERR(scp->sram_base),
-                                    "Failed to parse and map sram memory\n");
+       if (IS_ERR(scp->sram_base)) {
+               dev_err(dev, "Failed to parse and map sram memory\n");
+               return ERR_CAST(scp->sram_base);
+       }
 
        scp->sram_size = resource_size(res);
        scp->sram_phys = res->start;
 
-       /* l1tcm is an optional memory region */
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l1tcm");
-       scp->l1tcm_base = devm_ioremap_resource(dev, res);
-       if (IS_ERR(scp->l1tcm_base)) {
-               ret = PTR_ERR(scp->l1tcm_base);
-               if (ret != -EINVAL) {
-                       return dev_err_probe(dev, ret, "Failed to map l1tcm memory\n");
-               }
-       } else {
-               scp->l1tcm_size = resource_size(res);
-               scp->l1tcm_phys = res->start;
-       }
-
-       scp->reg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
-       if (IS_ERR(scp->reg_base))
-               return dev_err_probe(dev, PTR_ERR(scp->reg_base),
-                                    "Failed to parse and map cfg memory\n");
-
        ret = scp->data->scp_clk_get(scp);
        if (ret)
-               return ret;
+               return ERR_PTR(ret);
 
        ret = scp_map_memory_region(scp);
        if (ret)
-               return ret;
+               return ERR_PTR(ret);
 
        mutex_init(&scp->send_lock);
        for (i = 0; i < SCP_IPI_MAX; i++)
@@ -895,11 +1022,7 @@ static int scp_probe(struct platform_device *pdev)
                goto remove_subdev;
        }
 
-       ret = rproc_add(rproc);
-       if (ret)
-               goto remove_subdev;
-
-       return 0;
+       return scp;
 
 remove_subdev:
        scp_remove_rpmsg_subdev(scp);
@@ -910,15 +1033,13 @@ release_dev_mem:
                mutex_destroy(&scp->ipi_desc[i].lock);
        mutex_destroy(&scp->send_lock);
 
-       return ret;
+       return ERR_PTR(ret);
 }
 
-static void scp_remove(struct platform_device *pdev)
+static void scp_free(struct mtk_scp *scp)
 {
-       struct mtk_scp *scp = platform_get_drvdata(pdev);
        int i;
 
-       rproc_del(scp->rproc);
        scp_remove_rpmsg_subdev(scp);
        scp_ipi_unregister(scp, SCP_IPI_INIT);
        scp_unmap_memory_region(scp);
@@ -927,6 +1048,186 @@ static void scp_remove(struct platform_device *pdev)
        mutex_destroy(&scp->send_lock);
 }
 
+static int scp_add_single_core(struct platform_device *pdev,
+                              struct mtk_scp_of_cluster *scp_cluster)
+{
+       struct device *dev = &pdev->dev;
+       struct list_head *scp_list = &scp_cluster->mtk_scp_list;
+       struct mtk_scp *scp;
+       int ret;
+
+       scp = scp_rproc_init(pdev, scp_cluster, of_device_get_match_data(dev));
+       if (IS_ERR(scp))
+               return PTR_ERR(scp);
+
+       ret = rproc_add(scp->rproc);
+       if (ret) {
+               dev_err(dev, "Failed to add rproc\n");
+               scp_free(scp);
+               return ret;
+       }
+
+       list_add_tail(&scp->elem, scp_list);
+
+       return 0;
+}
+
+static int scp_add_multi_core(struct platform_device *pdev,
+                             struct mtk_scp_of_cluster *scp_cluster)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev_of_node(dev);
+       struct platform_device *cpdev;
+       struct device_node *child;
+       struct list_head *scp_list = &scp_cluster->mtk_scp_list;
+       const struct mtk_scp_of_data **cluster_of_data;
+       struct mtk_scp *scp, *temp;
+       int core_id = 0;
+       int ret;
+
+       cluster_of_data = (const struct mtk_scp_of_data **)of_device_get_match_data(dev);
+
+       for_each_available_child_of_node(np, child) {
+               if (!cluster_of_data[core_id]) {
+                       ret = -EINVAL;
+                       dev_err(dev, "Not support core %d\n", core_id);
+                       of_node_put(child);
+                       goto init_fail;
+               }
+
+               cpdev = of_find_device_by_node(child);
+               if (!cpdev) {
+                       ret = -ENODEV;
+                       dev_err(dev, "Not found platform device for core %d\n", core_id);
+                       of_node_put(child);
+                       goto init_fail;
+               }
+
+               scp = scp_rproc_init(cpdev, scp_cluster, cluster_of_data[core_id]);
+               put_device(&cpdev->dev);
+               if (IS_ERR(scp)) {
+                       ret = PTR_ERR(scp);
+                       dev_err(dev, "Failed to initialize core %d rproc\n", core_id);
+                       of_node_put(child);
+                       goto init_fail;
+               }
+
+               ret = rproc_add(scp->rproc);
+               if (ret) {
+                       dev_err(dev, "Failed to add rproc of core %d\n", core_id);
+                       of_node_put(child);
+                       scp_free(scp);
+                       goto init_fail;
+               }
+
+               list_add_tail(&scp->elem, scp_list);
+               core_id++;
+       }
+
+       /*
+        * Here we are setting the platform device for @pdev to the last @scp that was
+        * created, which is needed because (1) scp_rproc_init() is calling
+        * platform_set_drvdata() on the child platform devices and (2) we need a handle to
+        * the cluster list in scp_remove().
+        */
+       platform_set_drvdata(pdev, scp);
+
+       return 0;
+
+init_fail:
+       list_for_each_entry_safe_reverse(scp, temp, scp_list, elem) {
+               list_del(&scp->elem);
+               rproc_del(scp->rproc);
+               scp_free(scp);
+       }
+
+       return ret;
+}
+
+static bool scp_is_single_core(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev_of_node(dev);
+       struct device_node *child;
+       int num_cores = 0;
+
+       for_each_child_of_node(np, child)
+               if (of_device_is_compatible(child, "mediatek,scp-core"))
+                       num_cores++;
+
+       return num_cores < 2;
+}
+
+static int scp_cluster_init(struct platform_device *pdev, struct mtk_scp_of_cluster *scp_cluster)
+{
+       int ret;
+
+       if (scp_is_single_core(pdev))
+               ret = scp_add_single_core(pdev, scp_cluster);
+       else
+               ret = scp_add_multi_core(pdev, scp_cluster);
+
+       return ret;
+}
+
+static int scp_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct mtk_scp_of_cluster *scp_cluster;
+       struct resource *res;
+       int ret;
+
+       scp_cluster = devm_kzalloc(dev, sizeof(*scp_cluster), GFP_KERNEL);
+       if (!scp_cluster)
+               return -ENOMEM;
+
+       scp_cluster->reg_base = devm_platform_ioremap_resource_byname(pdev, "cfg");
+       if (IS_ERR(scp_cluster->reg_base))
+               return dev_err_probe(dev, PTR_ERR(scp_cluster->reg_base),
+                                    "Failed to parse and map cfg memory\n");
+
+       /* l1tcm is an optional memory region */
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "l1tcm");
+       scp_cluster->l1tcm_base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(scp_cluster->l1tcm_base)) {
+               ret = PTR_ERR(scp_cluster->l1tcm_base);
+               if (ret != -EINVAL)
+                       return dev_err_probe(dev, ret, "Failed to map l1tcm memory\n");
+
+               scp_cluster->l1tcm_base = NULL;
+       } else {
+               scp_cluster->l1tcm_size = resource_size(res);
+               scp_cluster->l1tcm_phys = res->start;
+       }
+
+       INIT_LIST_HEAD(&scp_cluster->mtk_scp_list);
+       mutex_init(&scp_cluster->cluster_lock);
+
+       ret = devm_of_platform_populate(dev);
+       if (ret)
+               return dev_err_probe(dev, ret, "Failed to populate platform devices\n");
+
+       ret = scp_cluster_init(pdev, scp_cluster);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static void scp_remove(struct platform_device *pdev)
+{
+       struct mtk_scp *scp = platform_get_drvdata(pdev);
+       struct mtk_scp_of_cluster *scp_cluster = scp->cluster;
+       struct mtk_scp *temp;
+
+       list_for_each_entry_safe_reverse(scp, temp, &scp_cluster->mtk_scp_list, elem) {
+               list_del(&scp->elem);
+               rproc_del(scp->rproc);
+               scp_free(scp);
+       }
+       mutex_destroy(&scp_cluster->cluster_lock);
+}
+
 static const struct mtk_scp_of_data mt8183_of_data = {
        .scp_clk_get = mt8183_scp_clk_get,
        .scp_before_load = mt8183_scp_before_load,
@@ -980,7 +1281,7 @@ static const struct mtk_scp_of_data mt8192_of_data = {
 static const struct mtk_scp_of_data mt8195_of_data = {
        .scp_clk_get = mt8195_scp_clk_get,
        .scp_before_load = mt8195_scp_before_load,
-       .scp_irq_handler = mt8192_scp_irq_handler,
+       .scp_irq_handler = mt8195_scp_irq_handler,
        .scp_reset_assert = mt8192_scp_reset_assert,
        .scp_reset_deassert = mt8192_scp_reset_deassert,
        .scp_stop = mt8195_scp_stop,
@@ -989,12 +1290,31 @@ static const struct mtk_scp_of_data mt8195_of_data = {
        .host_to_scp_int_bit = MT8192_HOST_IPC_INT_BIT,
 };
 
+static const struct mtk_scp_of_data mt8195_of_data_c1 = {
+       .scp_clk_get = mt8195_scp_clk_get,
+       .scp_before_load = mt8195_scp_c1_before_load,
+       .scp_irq_handler = mt8195_scp_c1_irq_handler,
+       .scp_reset_assert = mt8195_scp_c1_reset_assert,
+       .scp_reset_deassert = mt8195_scp_c1_reset_deassert,
+       .scp_stop = mt8195_scp_c1_stop,
+       .scp_da_to_va = mt8192_scp_da_to_va,
+       .host_to_scp_reg = MT8192_GIPC_IN_SET,
+       .host_to_scp_int_bit = MT8195_CORE1_HOST_IPC_INT_BIT,
+};
+
+static const struct mtk_scp_of_data *mt8195_of_data_cores[] = {
+       &mt8195_of_data,
+       &mt8195_of_data_c1,
+       NULL
+};
+
 static const struct of_device_id mtk_scp_of_match[] = {
        { .compatible = "mediatek,mt8183-scp", .data = &mt8183_of_data },
        { .compatible = "mediatek,mt8186-scp", .data = &mt8186_of_data },
        { .compatible = "mediatek,mt8188-scp", .data = &mt8188_of_data },
        { .compatible = "mediatek,mt8192-scp", .data = &mt8192_of_data },
        { .compatible = "mediatek,mt8195-scp", .data = &mt8195_of_data },
+       { .compatible = "mediatek,mt8195-scp-dual", .data = &mt8195_of_data_cores },
        {},
 };
 MODULE_DEVICE_TABLE(of, mtk_scp_of_match);
index 9c7c17b9d181f1f9f59903ce2de9e5649dd07b8c..cd0b60106ec20681aa82f841be9c48295d6c0c4c 100644 (file)
@@ -177,7 +177,7 @@ int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
        mutex_lock(&scp->send_lock);
 
         /* Wait until SCP receives the last command */
-       ret = readl_poll_timeout_atomic(scp->reg_base + scp->data->host_to_scp_reg,
+       ret = readl_poll_timeout_atomic(scp->cluster->reg_base + scp->data->host_to_scp_reg,
                                        val, !val, 0, SCP_TIMEOUT_US);
        if (ret) {
                dev_err(scp->dev, "%s: IPI timeout!\n", __func__);
@@ -192,7 +192,7 @@ int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
        scp->ipi_id_ack[id] = false;
        /* send the command to SCP */
        writel(scp->data->host_to_scp_int_bit,
-              scp->reg_base + scp->data->host_to_scp_reg);
+              scp->cluster->reg_base + scp->data->host_to_scp_reg);
 
        if (wait) {
                /* wait for SCP's ACK */
index 22fe7b5f5236da08fb9cb2ba6991c4a07f4f904a..394b2c1cb5e218bad7a11c0dfbc0c25de4bebf99 100644 (file)
@@ -2322,7 +2322,6 @@ static const struct rproc_hexagon_res msm8996_mss = {
        },
        .proxy_clk_names = (char*[]){
                        "xo",
-                       "pnoc",
                        "qdss",
                        NULL
        },
index b5447dd2dd35e461620726a88d6ee4e9bc42502f..913a5d2068e8cef52db9cb381405ce829e4362cc 100644 (file)
@@ -813,6 +813,21 @@ static const struct adsp_data sm6350_adsp_resource = {
        .ssctl_id = 0x14,
 };
 
+static const struct adsp_data sm6375_mpss_resource = {
+       .crash_reason_smem = 421,
+       .firmware_name = "modem.mdt",
+       .pas_id = 4,
+       .minidump_id = 3,
+       .auto_boot = false,
+       .proxy_pd_names = (char*[]){
+               "cx",
+               NULL
+       },
+       .ssr_name = "mpss",
+       .sysmon_name = "modem",
+       .ssctl_id = 0x12,
+};
+
 static const struct adsp_data sm8150_adsp_resource = {
                .crash_reason_smem = 423,
                .firmware_name = "adsp.mdt",
@@ -1161,6 +1176,7 @@ static const struct of_device_id adsp_of_match[] = {
        { .compatible = "qcom,qcs404-adsp-pas", .data = &adsp_resource_init },
        { .compatible = "qcom,qcs404-cdsp-pas", .data = &cdsp_resource_init },
        { .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init },
+       { .compatible = "qcom,sc7180-adsp-pas", .data = &sm8250_adsp_resource},
        { .compatible = "qcom,sc7180-mpss-pas", .data = &mpss_resource_init},
        { .compatible = "qcom,sc7280-mpss-pas", .data = &mpss_resource_init},
        { .compatible = "qcom,sc8180x-adsp-pas", .data = &sm8150_adsp_resource},
@@ -1180,6 +1196,9 @@ static const struct of_device_id adsp_of_match[] = {
        { .compatible = "qcom,sm6350-adsp-pas", .data = &sm6350_adsp_resource},
        { .compatible = "qcom,sm6350-cdsp-pas", .data = &sm6350_cdsp_resource},
        { .compatible = "qcom,sm6350-mpss-pas", .data = &mpss_resource_init},
+       { .compatible = "qcom,sm6375-adsp-pas", .data = &sm6350_adsp_resource},
+       { .compatible = "qcom,sm6375-cdsp-pas", .data = &sm8150_cdsp_resource},
+       { .compatible = "qcom,sm6375-mpss-pas", .data = &sm6375_mpss_resource},
        { .compatible = "qcom,sm8150-adsp-pas", .data = &sm8150_adsp_resource},
        { .compatible = "qcom,sm8150-cdsp-pas", .data = &sm8150_cdsp_resource},
        { .compatible = "qcom,sm8150-mpss-pas", .data = &mpss_resource_init},
index e3ce01d98b4c7d0fd710bfab1e8ca62839a656a8..cb163766c56d526333dabbdc5a42b09a1fe65546 100644 (file)
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
 #include <linux/of_reserved_mem.h>
 #include <linux/platform_device.h>
+#include <linux/property.h>
 #include <linux/regmap.h>
 #include <linux/remoteproc.h>
 #include <linux/reset.h>
@@ -341,7 +340,6 @@ static int st_rproc_parse_dt(struct platform_device *pdev)
 static int st_rproc_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       const struct of_device_id *match;
        struct st_rproc *ddata;
        struct device_node *np = dev->of_node;
        struct rproc *rproc;
@@ -349,19 +347,17 @@ static int st_rproc_probe(struct platform_device *pdev)
        int enabled;
        int ret, i;
 
-       match = of_match_device(st_rproc_match, dev);
-       if (!match || !match->data) {
-               dev_err(dev, "No device match found\n");
-               return -ENODEV;
-       }
-
        rproc = rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata));
        if (!rproc)
                return -ENOMEM;
 
        rproc->has_iommu = false;
        ddata = rproc->priv;
-       ddata->config = (struct st_rproc_config *)match->data;
+       ddata->config = (struct st_rproc_config *)device_get_match_data(dev);
+       if (!ddata->config) {
+               ret = -ENODEV;
+               goto free_rproc;
+       }
 
        platform_set_drvdata(pdev, rproc);
 
index 9d9b13530f78aa1193c68550cbecd9d7237e33d6..4f469f0bcf8b29d5cecbfa3acab4e83c0dd5c695 100644 (file)
@@ -712,9 +712,9 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev,
        unsigned int tzen;
        int err, irq;
 
-       irq = platform_get_irq(pdev, 0);
+       irq = platform_get_irq_optional(pdev, 0);
        if (irq == -EPROBE_DEFER)
-               return dev_err_probe(dev, irq, "failed to get interrupt\n");
+               return irq;
 
        if (irq > 0) {
                err = devm_request_irq(dev, irq, stm32_rproc_wdg, 0,
index feca6de68da28ee930998fa4c9b39da1c90c057e..4395edea9a6467cf823484680f7e686ab3362a4f 100644 (file)
@@ -39,12 +39,14 @@ enum zynqmp_r5_cluster_mode {
  * struct mem_bank_data - Memory Bank description
  *
  * @addr: Start address of memory bank
+ * @da: device address
  * @size: Size of Memory bank
  * @pm_domain_id: Power-domains id of memory bank for firmware to turn on/off
  * @bank_name: name of the bank for remoteproc framework
  */
 struct mem_bank_data {
        phys_addr_t addr;
+       u32 da;
        size_t size;
        u32 pm_domain_id;
        char *bank_name;
@@ -75,11 +77,19 @@ struct mbox_info {
  * Hardcoded TCM bank values. This will be removed once TCM bindings are
  * accepted for system-dt specifications and upstreamed in linux kernel
  */
-static const struct mem_bank_data zynqmp_tcm_banks[] = {
-       {0xffe00000UL, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each */
-       {0xffe20000UL, 0x10000UL, PD_R5_0_BTCM, "btcm0"},
-       {0xffe90000UL, 0x10000UL, PD_R5_1_ATCM, "atcm1"},
-       {0xffeb0000UL, 0x10000UL, PD_R5_1_BTCM, "btcm1"},
+static const struct mem_bank_data zynqmp_tcm_banks_split[] = {
+       {0xffe00000UL, 0x0, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each */
+       {0xffe20000UL, 0x20000, 0x10000UL, PD_R5_0_BTCM, "btcm0"},
+       {0xffe90000UL, 0x0, 0x10000UL, PD_R5_1_ATCM, "atcm1"},
+       {0xffeb0000UL, 0x20000, 0x10000UL, PD_R5_1_BTCM, "btcm1"},
+};
+
+/* In lockstep mode cluster combines each 64KB TCM and makes 128KB TCM */
+static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
+       {0xffe00000UL, 0x0, 0x20000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 128KB each */
+       {0xffe20000UL, 0x20000, 0x20000UL, PD_R5_0_BTCM, "btcm0"},
+       {0, 0, 0, PD_R5_1_ATCM, ""},
+       {0, 0, 0, PD_R5_1_BTCM, ""},
 };
 
 /**
@@ -526,30 +536,6 @@ static int tcm_mem_map(struct rproc *rproc,
        /* clear TCMs */
        memset_io(va, 0, mem->len);
 
-       /*
-        * The R5s expect their TCM banks to be at address 0x0 and 0x2000,
-        * while on the Linux side they are at 0xffexxxxx.
-        *
-        * Zero out the high 12 bits of the address. This will give
-        * expected values for TCM Banks 0A and 0B (0x0 and 0x20000).
-        */
-       mem->da &= 0x000fffff;
-
-       /*
-        * TCM Banks 1A and 1B still have to be translated.
-        *
-        * Below handle these two banks' absolute addresses (0xffe90000 and
-        * 0xffeb0000) and convert to the expected relative addresses
-        * (0x0 and 0x20000).
-        */
-       if (mem->da == 0x90000 || mem->da == 0xB0000)
-               mem->da -= 0x90000;
-
-       /* if translated TCM bank address is not valid report error */
-       if (mem->da != 0x0 && mem->da != 0x20000) {
-               dev_err(&rproc->dev, "invalid TCM address: %x\n", mem->da);
-               return -EINVAL;
-       }
        return 0;
 }
 
@@ -571,6 +557,7 @@ static int add_tcm_carveout_split_mode(struct rproc *rproc)
        u32 pm_domain_id;
        size_t bank_size;
        char *bank_name;
+       u32 da;
 
        r5_core = rproc->priv;
        dev = r5_core->dev;
@@ -583,6 +570,7 @@ static int add_tcm_carveout_split_mode(struct rproc *rproc)
         */
        for (i = 0; i < num_banks; i++) {
                bank_addr = r5_core->tcm_banks[i]->addr;
+               da = r5_core->tcm_banks[i]->da;
                bank_name = r5_core->tcm_banks[i]->bank_name;
                bank_size = r5_core->tcm_banks[i]->size;
                pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
@@ -595,11 +583,11 @@ static int add_tcm_carveout_split_mode(struct rproc *rproc)
                        goto release_tcm_split;
                }
 
-               dev_dbg(dev, "TCM carveout split mode %s addr=%llx, size=0x%lx",
-                       bank_name, bank_addr, bank_size);
+               dev_dbg(dev, "TCM carveout split mode %s addr=%llx, da=0x%x, size=0x%lx",
+                       bank_name, bank_addr, da, bank_size);
 
                rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr,
-                                                bank_size, bank_addr,
+                                                bank_size, da,
                                                 tcm_mem_map, tcm_mem_unmap,
                                                 bank_name);
                if (!rproc_mem) {
@@ -640,6 +628,7 @@ static int add_tcm_carveout_lockstep_mode(struct rproc *rproc)
        struct device *dev;
        u32 pm_domain_id;
        char *bank_name;
+       u32 da;
 
        r5_core = rproc->priv;
        dev = r5_core->dev;
@@ -650,14 +639,11 @@ static int add_tcm_carveout_lockstep_mode(struct rproc *rproc)
        /*
         * In lockstep mode, TCM is contiguous memory block
         * However, each TCM block still needs to be enabled individually.
-        * So, Enable each TCM block individually, but add their size
-        * to create contiguous memory region.
+        * So, Enable each TCM block individually.
+        * Although ATCM and BTCM is contiguous memory block, add two separate
+        * carveouts for both.
         */
-       bank_addr = r5_core->tcm_banks[0]->addr;
-       bank_name = r5_core->tcm_banks[0]->bank_name;
-
        for (i = 0; i < num_banks; i++) {
-               bank_size += r5_core->tcm_banks[i]->size;
                pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
 
                /* Turn on each TCM bank individually */
@@ -668,23 +654,32 @@ static int add_tcm_carveout_lockstep_mode(struct rproc *rproc)
                        dev_err(dev, "failed to turn on TCM 0x%x", pm_domain_id);
                        goto release_tcm_lockstep;
                }
-       }
 
-       dev_dbg(dev, "TCM add carveout lockstep mode %s addr=0x%llx, size=0x%lx",
-               bank_name, bank_addr, bank_size);
-
-       /* Register TCM address range, TCM map and unmap functions */
-       rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr,
-                                        bank_size, bank_addr,
-                                        tcm_mem_map, tcm_mem_unmap,
-                                        bank_name);
-       if (!rproc_mem) {
-               ret = -ENOMEM;
-               goto release_tcm_lockstep;
-       }
+               bank_size = r5_core->tcm_banks[i]->size;
+               if (bank_size == 0)
+                       continue;
 
-       /* If registration is success, add carveouts */
-       rproc_add_carveout(rproc, rproc_mem);
+               bank_addr = r5_core->tcm_banks[i]->addr;
+               da = r5_core->tcm_banks[i]->da;
+               bank_name = r5_core->tcm_banks[i]->bank_name;
+
+               /* Register TCM address range, TCM map and unmap functions */
+               rproc_mem = rproc_mem_entry_init(dev, NULL, bank_addr,
+                                                bank_size, da,
+                                                tcm_mem_map, tcm_mem_unmap,
+                                                bank_name);
+               if (!rproc_mem) {
+                       ret = -ENOMEM;
+                       zynqmp_pm_release_node(pm_domain_id);
+                       goto release_tcm_lockstep;
+               }
+
+               /* If registration is success, add carveouts */
+               rproc_add_carveout(rproc, rproc_mem);
+
+               dev_dbg(dev, "TCM carveout lockstep mode %s addr=0x%llx, da=0x%x, size=0x%lx",
+                       bank_name, bank_addr, da, bank_size);
+       }
 
        return 0;
 
@@ -895,12 +890,19 @@ free_rproc:
  */
 static int zynqmp_r5_get_tcm_node(struct zynqmp_r5_cluster *cluster)
 {
+       const struct mem_bank_data *zynqmp_tcm_banks;
        struct device *dev = cluster->dev;
        struct zynqmp_r5_core *r5_core;
        int tcm_bank_count, tcm_node;
        int i, j;
 
-       tcm_bank_count = ARRAY_SIZE(zynqmp_tcm_banks);
+       if (cluster->mode == SPLIT_MODE) {
+               zynqmp_tcm_banks = zynqmp_tcm_banks_split;
+               tcm_bank_count = ARRAY_SIZE(zynqmp_tcm_banks_split);
+       } else {
+               zynqmp_tcm_banks = zynqmp_tcm_banks_lockstep;
+               tcm_bank_count = ARRAY_SIZE(zynqmp_tcm_banks_lockstep);
+       }
 
        /* count per core tcm banks */
        tcm_bank_count = tcm_bank_count / cluster->core_count;
index 32b550c91d9f1c7f1918774181654bcf3404407a..8abc7d022ff71ab7ea6f54eff5c712b89382cf38 100644 (file)
@@ -545,7 +545,7 @@ static int rpmsg_dev_probe(struct device *dev)
                goto out;
 
        if (rpdrv->callback) {
-               strncpy(chinfo.name, rpdev->id.name, RPMSG_NAME_SIZE);
+               strscpy(chinfo.name, rpdev->id.name, sizeof(chinfo.name));
                chinfo.src = rpdev->src;
                chinfo.dst = RPMSG_ADDR_ANY;
 
index c70ad03ff2e90f7ef350d9ac15238d5eee0e8580..bde8c8d433e0aa68b1c9686f9d6d82df961fd30c 100644 (file)
@@ -50,7 +50,7 @@ static int rpmsg_ns_cb(struct rpmsg_device *rpdev, void *data, int len,
        /* don't trust the remote processor for null terminating the name */
        msg->name[RPMSG_NAME_SIZE - 1] = '\0';
 
-       strncpy(chinfo.name, msg->name, sizeof(chinfo.name));
+       strscpy_pad(chinfo.name, msg->name, sizeof(chinfo.name));
        chinfo.src = RPMSG_ADDR_ANY;
        chinfo.dst = rpmsg32_to_cpu(rpdev, msg->addr);
 
index 905ac7910c98f3f3dc1b1ac5d1b086e6e800fcb1..dc87965f8164167008a686f45f6084cce2c3ab53 100644 (file)
@@ -329,7 +329,7 @@ static int virtio_rpmsg_announce_create(struct rpmsg_device *rpdev)
            virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
                struct rpmsg_ns_msg nsm;
 
-               strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
+               strscpy_pad(nsm.name, rpdev->id.name, sizeof(nsm.name));
                nsm.addr = cpu_to_rpmsg32(rpdev, rpdev->ept->addr);
                nsm.flags = cpu_to_rpmsg32(rpdev, RPMSG_NS_CREATE);
 
@@ -353,7 +353,7 @@ static int virtio_rpmsg_announce_destroy(struct rpmsg_device *rpdev)
            virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
                struct rpmsg_ns_msg nsm;
 
-               strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
+               strscpy_pad(nsm.name, rpdev->id.name, sizeof(nsm.name));
                nsm.addr = cpu_to_rpmsg32(rpdev, rpdev->ept->addr);
                nsm.flags = cpu_to_rpmsg32(rpdev, RPMSG_NS_DESTROY);
 
@@ -424,7 +424,7 @@ static struct rpmsg_device *__rpmsg_create_channel(struct virtproc_info *vrp,
         */
        rpdev->announce = rpdev->src != RPMSG_ADDR_ANY;
 
-       strncpy(rpdev->id.name, chinfo->name, RPMSG_NAME_SIZE);
+       strscpy(rpdev->id.name, chinfo->name, sizeof(rpdev->id.name));
 
        rpdev->dev.parent = &vrp->vdev->dev;
        rpdev->dev.release = virtio_rpmsg_release_device;
index d7502433c78aa318a1a8baf1b835d1d4e1e639f3..3814e0845e7729d9d584680e16abb4ec21f0dda7 100644 (file)
@@ -1351,7 +1351,7 @@ config RTC_DRV_DIGICOLOR
 
 config RTC_DRV_IMXDI
        tristate "Freescale IMX DryIce Real Time Clock"
-       depends on ARCH_MXC
+       depends on ARCH_MXC || COMPILE_TEST
        depends on OF
        help
           Support for Freescale IMX DryIce RTC
@@ -1984,4 +1984,15 @@ config RTC_DRV_POLARFIRE_SOC
          This driver can also be built as a module, if so, the module
          will be called "rtc-mpfs".
 
+config RTC_DRV_SSD202D
+       tristate "SigmaStar SSD202D RTC"
+       depends on ARCH_MSTARV7 || COMPILE_TEST
+       default ARCH_MSTARV7
+       help
+         If you say yes here you get support for the SigmaStar SSD202D On-Chip
+         Real Time Clock.
+
+         This driver can also be built as a module, if so, the module
+         will be called "rtc-ssd20xd".
+
 endif # RTC_CLASS
index fd209883ee2efd118c992f010fde9935c186c816..7b03c3abfd786eb03356566fc839aff542da7442 100644 (file)
@@ -103,6 +103,7 @@ obj-$(CONFIG_RTC_DRV_MESON) += rtc-meson.o
 obj-$(CONFIG_RTC_DRV_MOXART)   += rtc-moxart.o
 obj-$(CONFIG_RTC_DRV_MPC5121)  += rtc-mpc5121.o
 obj-$(CONFIG_RTC_DRV_MSC313)   += rtc-msc313.o
+obj-$(CONFIG_RTC_DRV_SSD202D)  += rtc-ssd202d.o
 obj-$(CONFIG_RTC_DRV_MSM6242)  += rtc-msm6242.o
 obj-$(CONFIG_RTC_DRV_MT2712)   += rtc-mt2712.o
 obj-$(CONFIG_RTC_DRV_MT6397)   += rtc-mt6397.o
index add4f71d7b3b98db7fa0e5e993add4ba0448ecdf..c16fe711a0d94f8abfe301f0b5d74901d182bed0 100644 (file)
@@ -558,7 +558,7 @@ err_clk:
 /*
  * Disable and remove the RTC driver
  */
-static int __exit at91_rtc_remove(struct platform_device *pdev)
+static void __exit at91_rtc_remove(struct platform_device *pdev)
 {
        /* Disable all interrupts */
        at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM |
@@ -566,8 +566,6 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
                                        AT91_RTC_CALEV);
 
        clk_disable_unprepare(sclk);
-
-       return 0;
 }
 
 static void at91_rtc_shutdown(struct platform_device *pdev)
@@ -635,8 +633,14 @@ static int at91_rtc_resume(struct device *dev)
 
 static SIMPLE_DEV_PM_OPS(at91_rtc_pm_ops, at91_rtc_suspend, at91_rtc_resume);
 
-static struct platform_driver at91_rtc_driver = {
-       .remove         = __exit_p(at91_rtc_remove),
+/*
+ * at91_rtc_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver at91_rtc_driver __refdata = {
+       .remove_new     = __exit_p(at91_rtc_remove),
        .shutdown       = at91_rtc_shutdown,
        .driver         = {
                .name   = "at91_rtc",
index 3cdc015692ca639738c61943853c88b3782f0e83..1a65a4e0dc00350bcf86812d46f811fbc912956f 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright © 2014-2017 Broadcom
+ * Copyright © 2014-2023 Broadcom
  */
 
 #define pr_fmt(fmt)    KBUILD_MODNAME ": " fmt
@@ -34,6 +34,7 @@ struct brcmstb_waketmr {
        u32 rate;
        unsigned long rtc_alarm;
        bool alarm_en;
+       bool alarm_expired;
 };
 
 #define BRCMSTB_WKTMR_EVENT            0x00
@@ -64,6 +65,11 @@ static inline void brcmstb_waketmr_clear_alarm(struct brcmstb_waketmr *timer)
        writel_relaxed(reg - 1, timer->base + BRCMSTB_WKTMR_ALARM);
        writel_relaxed(WKTMR_ALARM_EVENT, timer->base + BRCMSTB_WKTMR_EVENT);
        (void)readl_relaxed(timer->base + BRCMSTB_WKTMR_EVENT);
+       if (timer->alarm_expired) {
+               timer->alarm_expired = false;
+               /* maintain call balance */
+               enable_irq(timer->alarm_irq);
+       }
 }
 
 static void brcmstb_waketmr_set_alarm(struct brcmstb_waketmr *timer,
@@ -105,10 +111,17 @@ static irqreturn_t brcmstb_alarm_irq(int irq, void *data)
                return IRQ_HANDLED;
 
        if (timer->alarm_en) {
-               if (!device_may_wakeup(timer->dev))
+               if (device_may_wakeup(timer->dev)) {
+                       disable_irq_nosync(irq);
+                       timer->alarm_expired = true;
+               } else {
                        writel_relaxed(WKTMR_ALARM_EVENT,
                                       timer->base + BRCMSTB_WKTMR_EVENT);
+               }
                rtc_update_irq(timer->rtc, 1, RTC_IRQF | RTC_AF);
+       } else {
+               writel_relaxed(WKTMR_ALARM_EVENT,
+                              timer->base + BRCMSTB_WKTMR_EVENT);
        }
 
        return IRQ_HANDLED;
@@ -221,8 +234,14 @@ static int brcmstb_waketmr_alarm_enable(struct device *dev,
                    !brcmstb_waketmr_is_pending(timer))
                        return -EINVAL;
                timer->alarm_en = true;
-               if (timer->alarm_irq)
+               if (timer->alarm_irq) {
+                       if (timer->alarm_expired) {
+                               timer->alarm_expired = false;
+                               /* maintain call balance */
+                               enable_irq(timer->alarm_irq);
+                       }
                        enable_irq(timer->alarm_irq);
+               }
        } else if (!enabled && timer->alarm_en) {
                if (timer->alarm_irq)
                        disable_irq(timer->alarm_irq);
@@ -352,6 +371,17 @@ static int brcmstb_waketmr_suspend(struct device *dev)
        return brcmstb_waketmr_prepare_suspend(timer);
 }
 
+static int brcmstb_waketmr_suspend_noirq(struct device *dev)
+{
+       struct brcmstb_waketmr *timer = dev_get_drvdata(dev);
+
+       /* Catch any alarms occurring prior to noirq */
+       if (timer->alarm_expired && device_may_wakeup(dev))
+               return -EBUSY;
+
+       return 0;
+}
+
 static int brcmstb_waketmr_resume(struct device *dev)
 {
        struct brcmstb_waketmr *timer = dev_get_drvdata(dev);
@@ -368,10 +398,17 @@ static int brcmstb_waketmr_resume(struct device *dev)
 
        return ret;
 }
+#else
+#define brcmstb_waketmr_suspend                NULL
+#define brcmstb_waketmr_suspend_noirq  NULL
+#define brcmstb_waketmr_resume         NULL
 #endif /* CONFIG_PM_SLEEP */
 
-static SIMPLE_DEV_PM_OPS(brcmstb_waketmr_pm_ops,
-                        brcmstb_waketmr_suspend, brcmstb_waketmr_resume);
+static const struct dev_pm_ops brcmstb_waketmr_pm_ops = {
+       .suspend        = brcmstb_waketmr_suspend,
+       .suspend_noirq  = brcmstb_waketmr_suspend_noirq,
+       .resume         = brcmstb_waketmr_resume,
+};
 
 static const __maybe_unused struct of_device_id brcmstb_waketmr_of_match[] = {
        { .compatible = "brcm,brcmstb-waketimer" },
index dc6b0f4a54e2ea92ac865dbdbde89ec26a6c7e0c..fa8bf82df9488e7d1c23c058b4a3032dde74bc6e 100644 (file)
@@ -227,7 +227,7 @@ static int efi_procfs(struct device *dev, struct seq_file *seq)
                           enabled == 1 ? "yes" : "no",
                           pending == 1 ? "yes" : "no");
 
-               if (eft.timezone == EFI_UNSPECIFIED_TIMEZONE)
+               if (alm.timezone == EFI_UNSPECIFIED_TIMEZONE)
                        seq_puts(seq, "Timezone\t: unspecified\n");
                else
                        /* XXX fixme: convert to string? */
index acae7f16808f76f0493c330d893d61d8bbb95270..1fdd20d01560ef4fae2b996cf366b4f812329225 100644 (file)
@@ -7,6 +7,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/mod_devicetable.h>
 #include <linux/rtc.h>
 #include <linux/platform_device.h>
 #include <linux/io.h>
@@ -148,9 +149,16 @@ static int ep93xx_rtc_probe(struct platform_device *pdev)
        return devm_rtc_register_device(ep93xx_rtc->rtc);
 }
 
+static const struct of_device_id ep93xx_rtc_of_ids[] = {
+       { .compatible = "cirrus,ep9301-rtc" },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ep93xx_rtc_of_ids);
+
 static struct platform_driver ep93xx_rtc_driver = {
        .driver         = {
                .name   = "ep93xx-rtc",
+               .of_match_table = ep93xx_rtc_of_ids,
        },
        .probe          = ep93xx_rtc_probe,
 };
index 4b712e5ab08a022e3ae963c5f52d14882c77c939..284011c419db9722634d51f50eeb5979b6665ec1 100644 (file)
@@ -830,7 +830,7 @@ err:
        return rc;
 }
 
-static int __exit dryice_rtc_remove(struct platform_device *pdev)
+static void __exit dryice_rtc_remove(struct platform_device *pdev)
 {
        struct imxdi_dev *imxdi = platform_get_drvdata(pdev);
 
@@ -840,8 +840,6 @@ static int __exit dryice_rtc_remove(struct platform_device *pdev)
        writel(0, imxdi->ioaddr + DIER);
 
        clk_disable_unprepare(imxdi->clk);
-
-       return 0;
 }
 
 static const struct of_device_id dryice_dt_ids[] = {
@@ -851,12 +849,18 @@ static const struct of_device_id dryice_dt_ids[] = {
 
 MODULE_DEVICE_TABLE(of, dryice_dt_ids);
 
-static struct platform_driver dryice_rtc_driver = {
+/*
+ * dryice_rtc_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver dryice_rtc_driver __refdata = {
        .driver = {
                   .name = "imxdi_rtc",
                   .of_match_table = dryice_dt_ids,
                   },
-       .remove = __exit_p(dryice_rtc_remove),
+       .remove_new = __exit_p(dryice_rtc_remove),
 };
 
 module_platform_driver_probe(dryice_rtc_driver, dryice_rtc_probe);
index 6c526e2ec56d8ae5313c03ea88ec4ad2bd0b87bf..db31da56bfa74d0851f51a08d7b9175dba2468a4 100644 (file)
@@ -282,7 +282,7 @@ out:
        return ret;
 }
 
-static int __exit mv_rtc_remove(struct platform_device *pdev)
+static void __exit mv_rtc_remove(struct platform_device *pdev)
 {
        struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
 
@@ -291,8 +291,6 @@ static int __exit mv_rtc_remove(struct platform_device *pdev)
 
        if (!IS_ERR(pdata->clk))
                clk_disable_unprepare(pdata->clk);
-
-       return 0;
 }
 
 #ifdef CONFIG_OF
@@ -303,8 +301,14 @@ static const struct of_device_id rtc_mv_of_match_table[] = {
 MODULE_DEVICE_TABLE(of, rtc_mv_of_match_table);
 #endif
 
-static struct platform_driver mv_rtc_driver = {
-       .remove         = __exit_p(mv_rtc_remove),
+/*
+ * mv_rtc_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver mv_rtc_driver __refdata = {
+       .remove_new     = __exit_p(mv_rtc_remove),
        .driver         = {
                .name   = "rtc-mv",
                .of_match_table = of_match_ptr(rtc_mv_of_match_table),
index 5b10ab06cd2e2fa7df95808768fc4606b674b08c..c6155c48a4ac4d627c1ac9bca480ea816f04f9e1 100644 (file)
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 #include <linux/pinctrl/pinctrl.h>
 #include <linux/pinctrl/pinconf.h>
 #include <linux/pinctrl/pinconf-generic.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
+#include <linux/property.h>
 #include <linux/rtc.h>
 #include <linux/rtc/rtc-omap.h>
 
@@ -729,16 +729,14 @@ static int omap_rtc_probe(struct platform_device *pdev)
        struct omap_rtc *rtc;
        u8 reg, mask, new_ctrl;
        const struct platform_device_id *id_entry;
-       const struct of_device_id *of_id;
        int ret;
 
        rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
        if (!rtc)
                return -ENOMEM;
 
-       of_id = of_match_device(omap_rtc_of_match, &pdev->dev);
-       if (of_id) {
-               rtc->type = of_id->data;
+       rtc->type = device_get_match_data(&pdev->dev);
+       if (rtc->type) {
                rtc->is_pmic_controller = rtc->type->has_pmic_mode &&
                        of_device_is_system_power_controller(pdev->dev.of_node);
        } else {
index 8c7a98a5452c6a2020d326499696fb4f84c9a97e..d6651611a0c6a62446fca1644333626072601ca9 100644 (file)
@@ -166,13 +166,7 @@ static int __init pcap_rtc_probe(struct platform_device *pdev)
        return devm_rtc_register_device(pcap_rtc->rtc);
 }
 
-static int __exit pcap_rtc_remove(struct platform_device *pdev)
-{
-       return 0;
-}
-
 static struct platform_driver pcap_rtc_driver = {
-       .remove = __exit_p(pcap_rtc_remove),
        .driver = {
                .name  = "pcap-rtc",
        },
index 06194674d71c57510cf1747e8fae45c32e14f84c..540042b9eec8f53fd4bdbd3db0f58ddae1a6df78 100644 (file)
@@ -438,7 +438,7 @@ static int pcf85363_probe(struct i2c_client *client)
        if (client->irq > 0 || wakeup_source) {
                regmap_write(pcf85363->regmap, CTRL_FLAGS, 0);
                regmap_update_bits(pcf85363->regmap, CTRL_PIN_IO,
-                                  PIN_IO_INTA_OUT, PIN_IO_INTAPM);
+                                  PIN_IO_INTAPM, PIN_IO_INTA_OUT);
        }
 
        if (client->irq > 0) {
index e400c78252e8276f427def6ee34a5bf8fd9dccb1..cdb39fc4cab52c7349f010632f0a88e2713e9c7f 100644 (file)
@@ -365,12 +365,11 @@ static int __init pxa_rtc_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int __exit pxa_rtc_remove(struct platform_device *pdev)
+static void __exit pxa_rtc_remove(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
 
        pxa_rtc_release(dev);
-       return 0;
 }
 
 #ifdef CONFIG_OF
@@ -403,8 +402,14 @@ static int pxa_rtc_resume(struct device *dev)
 
 static SIMPLE_DEV_PM_OPS(pxa_rtc_pm_ops, pxa_rtc_suspend, pxa_rtc_resume);
 
-static struct platform_driver pxa_rtc_driver = {
-       .remove         = __exit_p(pxa_rtc_remove),
+/*
+ * pxa_rtc_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver pxa_rtc_driver __refdata = {
+       .remove_new     = __exit_p(pxa_rtc_remove),
        .driver         = {
                .name   = "pxa-rtc",
                .of_match_table = of_match_ptr(pxa_rtc_dt_ids),
index 5dbaeb7af648b7db5ef0470d0a5a7874653e35c9..ef913cf8593f19e142987b7df176a359c8a01fd5 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/module.h>
 #include <linux/mod_devicetable.h>
 #include <linux/delay.h>
+#include <linux/property.h>
 #include <linux/regmap.h>
 #include <linux/platform_device.h>
 #include <linux/rtc.h>
@@ -55,12 +56,23 @@ struct rtc7301_priv {
        u8 bank;
 };
 
-static const struct regmap_config rtc7301_regmap_config = {
+/*
+ * When the device is memory-mapped, some platforms pack the registers into
+ * 32-bit access using the lower 8 bits at each 4-byte stride, while others
+ * expose them as simply consecutive bytes.
+ */
+static const struct regmap_config rtc7301_regmap_32_config = {
        .reg_bits = 32,
        .val_bits = 8,
        .reg_stride = 4,
 };
 
+static const struct regmap_config rtc7301_regmap_8_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .reg_stride = 1,
+};
+
 static u8 rtc7301_read(struct rtc7301_priv *priv, unsigned int reg)
 {
        int reg_stride = regmap_get_reg_stride(priv->regmap);
@@ -356,7 +368,9 @@ static int __init rtc7301_rtc_probe(struct platform_device *dev)
        void __iomem *regs;
        struct rtc7301_priv *priv;
        struct rtc_device *rtc;
+       static const struct regmap_config *mapconf;
        int ret;
+       u32 val;
 
        priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
        if (!priv)
@@ -366,8 +380,25 @@ static int __init rtc7301_rtc_probe(struct platform_device *dev)
        if (IS_ERR(regs))
                return PTR_ERR(regs);
 
+       ret = device_property_read_u32(&dev->dev, "reg-io-width", &val);
+       if (ret)
+               /* Default to 32bit accesses */
+               val = 4;
+
+       switch (val) {
+       case 1:
+               mapconf = &rtc7301_regmap_8_config;
+               break;
+       case 4:
+               mapconf = &rtc7301_regmap_32_config;
+               break;
+       default:
+               dev_err(&dev->dev, "invalid reg-io-width %d\n", val);
+               return -EINVAL;
+       }
+
        priv->regmap = devm_regmap_init_mmio(&dev->dev, regs,
-                                            &rtc7301_regmap_config);
+                                            mapconf);
        if (IS_ERR(priv->regmap))
                return PTR_ERR(priv->regmap);
 
index cd146b5741431f0c72448af9d1ad8f170800c7c2..27a191fa3704c6a3129fda18decfc50498ab693b 100644 (file)
@@ -469,7 +469,7 @@ static int __init sh_rtc_probe(struct platform_device *pdev)
 {
        struct sh_rtc *rtc;
        struct resource *res;
-       char clk_name[6];
+       char clk_name[14];
        int clk_id, ret;
 
        rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
@@ -620,7 +620,7 @@ err_unmap:
        return ret;
 }
 
-static int __exit sh_rtc_remove(struct platform_device *pdev)
+static void __exit sh_rtc_remove(struct platform_device *pdev)
 {
        struct sh_rtc *rtc = platform_get_drvdata(pdev);
 
@@ -628,8 +628,6 @@ static int __exit sh_rtc_remove(struct platform_device *pdev)
        sh_rtc_setcie(&pdev->dev, 0);
 
        clk_disable(rtc->clk);
-
-       return 0;
 }
 
 static void sh_rtc_set_irq_wake(struct device *dev, int enabled)
@@ -668,13 +666,19 @@ static const struct of_device_id sh_rtc_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, sh_rtc_of_match);
 
-static struct platform_driver sh_rtc_platform_driver = {
+/*
+ * sh_rtc_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this is ok because they cannot get unbound at
+ * runtime. So mark the driver struct with __refdata to prevent modpost
+ * triggering a section mismatch warning.
+ */
+static struct platform_driver sh_rtc_platform_driver __refdata = {
        .driver         = {
                .name   = DRV_NAME,
                .pm     = &sh_rtc_pm_ops,
                .of_match_table = sh_rtc_of_match,
        },
-       .remove         = __exit_p(sh_rtc_remove),
+       .remove_new     = __exit_p(sh_rtc_remove),
 };
 
 module_platform_driver_probe(sh_rtc_platform_driver, sh_rtc_probe);
diff --git a/drivers/rtc/rtc-ssd202d.c b/drivers/rtc/rtc-ssd202d.c
new file mode 100644 (file)
index 0000000..ed64932
--- /dev/null
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Real time clocks driver for MStar/SigmaStar SSD202D SoCs.
+ *
+ * (C) 2021 Daniel Palmer
+ * (C) 2023 Romain Perier
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/regmap.h>
+#include <linux/pm.h>
+
+#define REG_CTRL       0x0
+#define REG_CTRL1      0x4
+#define REG_ISO_CTRL   0xc
+#define REG_WRDATA_L   0x10
+#define REG_WRDATA_H   0x14
+#define REG_ISOACK     0x20
+#define REG_RDDATA_L   0x24
+#define REG_RDDATA_H   0x28
+#define REG_RDCNT_L    0x30
+#define REG_RDCNT_H    0x34
+#define REG_CNT_TRIG   0x38
+#define REG_PWRCTRL    0x3c
+#define REG_RTC_TEST   0x54
+
+#define CNT_RD_TRIG_BIT BIT(0)
+#define CNT_RD_BIT BIT(0)
+#define BASE_WR_BIT BIT(1)
+#define BASE_RD_BIT BIT(2)
+#define CNT_RST_BIT BIT(3)
+#define ISO_CTRL_ACK_MASK BIT(3)
+#define ISO_CTRL_ACK_SHIFT 3
+#define SW0_WR_BIT BIT(5)
+#define SW1_WR_BIT BIT(6)
+#define SW0_RD_BIT BIT(7)
+#define SW1_RD_BIT BIT(8)
+
+#define ISO_CTRL_MASK GENMASK(2, 0)
+
+struct ssd202d_rtc {
+       struct rtc_device *rtc_dev;
+       void __iomem *base;
+};
+
+static u8 read_iso_en(void __iomem *base)
+{
+       return readb(base + REG_RTC_TEST) & 0x1;
+}
+
+static u8 read_iso_ctrl_ack(void __iomem *base)
+{
+       return (readb(base + REG_ISOACK) & ISO_CTRL_ACK_MASK) >> ISO_CTRL_ACK_SHIFT;
+}
+
+static int ssd202d_rtc_isoctrl(struct ssd202d_rtc *priv)
+{
+       static const unsigned int sequence[] = { 0x0, 0x1, 0x3, 0x7, 0x5, 0x1, 0x0 };
+       unsigned int val;
+       struct device *dev = &priv->rtc_dev->dev;
+       int i, ret;
+
+       /*
+        * This gates iso_en by writing a special sequence of bytes to iso_ctrl
+        * and ensuring that it has been correctly applied by reading iso_ctrl_ack
+        */
+       for (i = 0; i < ARRAY_SIZE(sequence); i++) {
+               writeb(sequence[i] & ISO_CTRL_MASK, priv->base +  REG_ISO_CTRL);
+
+               ret = read_poll_timeout(read_iso_ctrl_ack, val, val == (i % 2), 100,
+                                       20 * 100, true, priv->base);
+               if (ret) {
+                       dev_dbg(dev, "Timeout waiting for ack byte %i (%x) of sequence\n", i,
+                               sequence[i]);
+                       return ret;
+               }
+       }
+
+       /*
+        * At this point iso_en should be raised for 1ms
+        */
+       ret = read_poll_timeout(read_iso_en, val, val, 100, 22 * 100, true, priv->base);
+       if (ret)
+               dev_dbg(dev, "Timeout waiting for iso_en\n");
+       mdelay(2);
+       return 0;
+}
+
+static void ssd202d_rtc_read_reg(struct ssd202d_rtc *priv, unsigned int reg,
+                                unsigned int field, unsigned int *base)
+{
+       unsigned int l, h;
+       u16 val;
+
+       /* Ask for the content of an RTC value into RDDATA by gating iso_en,
+        * then iso_en is gated and the content of RDDATA can be read
+        */
+       val = readw(priv->base + reg);
+       writew(val | field, priv->base + reg);
+       ssd202d_rtc_isoctrl(priv);
+       writew(val & ~field, priv->base + reg);
+
+       l = readw(priv->base + REG_RDDATA_L);
+       h = readw(priv->base + REG_RDDATA_H);
+
+       *base = (h << 16) | l;
+}
+
+static void ssd202d_rtc_write_reg(struct ssd202d_rtc *priv, unsigned int reg,
+                                 unsigned int field, u32 base)
+{
+       u16 val;
+
+       /* Set the content of an RTC value from WRDATA by gating iso_en */
+       val = readw(priv->base + reg);
+       writew(val | field, priv->base + reg);
+       writew(base, priv->base + REG_WRDATA_L);
+       writew(base >> 16, priv->base + REG_WRDATA_H);
+       ssd202d_rtc_isoctrl(priv);
+       writew(val & ~field, priv->base + reg);
+}
+
+static int ssd202d_rtc_read_counter(struct ssd202d_rtc *priv, unsigned int *counter)
+{
+       unsigned int l, h;
+       u16 val;
+
+       val = readw(priv->base + REG_CTRL1);
+       writew(val | CNT_RD_BIT, priv->base + REG_CTRL1);
+       ssd202d_rtc_isoctrl(priv);
+       writew(val & ~CNT_RD_BIT, priv->base + REG_CTRL1);
+
+       val = readw(priv->base + REG_CTRL1);
+       writew(val | CNT_RD_TRIG_BIT, priv->base + REG_CNT_TRIG);
+       writew(val & ~CNT_RD_TRIG_BIT, priv->base + REG_CNT_TRIG);
+
+       l = readw(priv->base + REG_RDCNT_L);
+       h = readw(priv->base + REG_RDCNT_H);
+
+       *counter = (h << 16) | l;
+
+       return 0;
+}
+
+static int ssd202d_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+       struct ssd202d_rtc *priv = dev_get_drvdata(dev);
+       unsigned int sw0, base, counter;
+       u32 seconds;
+       int ret;
+
+       /* Check that RTC is enabled by SW */
+       ssd202d_rtc_read_reg(priv, REG_CTRL, SW0_RD_BIT, &sw0);
+       if (sw0 != 1)
+               return -EINVAL;
+
+       /* Get RTC base value from RDDATA */
+       ssd202d_rtc_read_reg(priv, REG_CTRL, BASE_RD_BIT, &base);
+       /* Get RTC counter value from RDDATA */
+       ret = ssd202d_rtc_read_counter(priv, &counter);
+       if (ret)
+               return ret;
+
+       seconds = base + counter;
+
+       rtc_time64_to_tm(seconds, tm);
+
+       return 0;
+}
+
+static int ssd202d_rtc_reset_counter(struct ssd202d_rtc *priv)
+{
+       u16 val;
+
+       val = readw(priv->base + REG_CTRL);
+       writew(val | CNT_RST_BIT, priv->base + REG_CTRL);
+       ssd202d_rtc_isoctrl(priv);
+       writew(val & ~CNT_RST_BIT, priv->base + REG_CTRL);
+       ssd202d_rtc_isoctrl(priv);
+
+       return 0;
+}
+
+static int ssd202d_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+       struct ssd202d_rtc *priv = dev_get_drvdata(dev);
+       unsigned long seconds = rtc_tm_to_time64(tm);
+
+       ssd202d_rtc_write_reg(priv, REG_CTRL, BASE_WR_BIT, seconds);
+       ssd202d_rtc_reset_counter(priv);
+       ssd202d_rtc_write_reg(priv, REG_CTRL, SW0_WR_BIT, 1);
+
+       return 0;
+}
+
+static const struct rtc_class_ops ssd202d_rtc_ops = {
+       .read_time = ssd202d_rtc_read_time,
+       .set_time = ssd202d_rtc_set_time,
+};
+
+static int ssd202d_rtc_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct ssd202d_rtc *priv;
+
+       priv = devm_kzalloc(&pdev->dev, sizeof(struct ssd202d_rtc), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->base = devm_platform_ioremap_resource(pdev, 0);
+       if (IS_ERR(priv->base))
+               return PTR_ERR(priv->base);
+
+       priv->rtc_dev = devm_rtc_allocate_device(dev);
+       if (IS_ERR(priv->rtc_dev))
+               return PTR_ERR(priv->rtc_dev);
+
+       priv->rtc_dev->ops = &ssd202d_rtc_ops;
+       priv->rtc_dev->range_max = U32_MAX;
+
+       platform_set_drvdata(pdev, priv);
+
+       return devm_rtc_register_device(priv->rtc_dev);
+}
+
+static const struct of_device_id ssd202d_rtc_of_match_table[] = {
+       { .compatible = "mstar,ssd202d-rtc" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, ssd202d_rtc_of_match_table);
+
+static struct platform_driver ssd202d_rtc_driver = {
+       .probe = ssd202d_rtc_probe,
+       .driver = {
+               .name = "ssd202d-rtc",
+               .of_match_table = ssd202d_rtc_of_match_table,
+       },
+};
+module_platform_driver(ssd202d_rtc_driver);
+
+MODULE_AUTHOR("Daniel Palmer <daniel@thingy.jp>");
+MODULE_AUTHOR("Romain Perier <romain.perier@gmail.com>");
+MODULE_DESCRIPTION("MStar SSD202D RTC Driver");
+MODULE_LICENSE("GPL");
index d09e08b71cfba0792cc35f6138b5b4344c79f1f3..5dd33155d5d5038d16c53b37538d369a9890612e 100644 (file)
@@ -352,7 +352,7 @@ EXPORT_SYMBOL(ap_test_config_ctrl_domain);
 /*
  * ap_queue_info(): Check and get AP queue info.
  * Returns: 1 if APQN exists and info is filled,
- *         0 if APQN seems to exit but there is no info
+ *         0 if APQN seems to exist but there is no info
  *           available (eg. caused by an asynch pending error)
  *        -1 invalid APQN, TAPQ error or AP queue status which
  *           indicates there is no APQN.
@@ -373,36 +373,33 @@ static int ap_queue_info(ap_qid_t qid, int *q_type, unsigned int *q_fac,
        /* call TAPQ on this APQN */
        status = ap_test_queue(qid, ap_apft_available(), &tapq_info);
 
-       /* handle pending async error with return 'no info available' */
-       if (status.async)
-               return 0;
-
        switch (status.response_code) {
        case AP_RESPONSE_NORMAL:
        case AP_RESPONSE_RESET_IN_PROGRESS:
        case AP_RESPONSE_DECONFIGURED:
        case AP_RESPONSE_CHECKSTOPPED:
        case AP_RESPONSE_BUSY:
-               /*
-                * According to the architecture in all these cases the
-                * info should be filled. All bits 0 is not possible as
-                * there is at least one of the mode bits set.
-                */
-               if (WARN_ON_ONCE(!tapq_info.value))
-                       return 0;
-               *q_type = tapq_info.at;
-               *q_fac = tapq_info.fac;
-               *q_depth = tapq_info.qd;
-               *q_ml = tapq_info.ml;
-               *q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
-               *q_cstop = status.response_code == AP_RESPONSE_CHECKSTOPPED;
-               return 1;
+               /* For all these RCs the tapq info should be available */
+               break;
        default:
-               /*
-                * A response code which indicates, there is no info available.
-                */
-               return -1;
+               /* On a pending async error the info should be available */
+               if (!status.async)
+                       return -1;
+               break;
        }
+
+       /* There should be at least one of the mode bits set */
+       if (WARN_ON_ONCE(!tapq_info.value))
+               return 0;
+
+       *q_type = tapq_info.at;
+       *q_fac = tapq_info.fac;
+       *q_depth = tapq_info.qd;
+       *q_ml = tapq_info.ml;
+       *q_decfg = status.response_code == AP_RESPONSE_DECONFIGURED;
+       *q_cstop = status.response_code == AP_RESPONSE_CHECKSTOPPED;
+
+       return 1;
 }
 
 void ap_wait(enum ap_sm_wait wait)
@@ -1022,6 +1019,10 @@ EXPORT_SYMBOL(ap_driver_unregister);
 
 void ap_bus_force_rescan(void)
 {
+       /* Only trigger AP bus scans after the initial scan is done */
+       if (atomic64_read(&ap_scan_bus_count) <= 0)
+               return;
+
        /* processing a asynchronous bus rescan */
        del_timer(&ap_config_timer);
        queue_work(system_long_wq, &ap_scan_work);
index 359a35f894d5189bc8ec2142a532a9d0b72684c7..b0771ca0849b99dec76ac84c07b85e28d5809312 100644 (file)
@@ -206,7 +206,6 @@ struct ap_queue {
        bool config;                    /* configured state */
        bool chkstop;                   /* checkstop state */
        ap_qid_t qid;                   /* AP queue id. */
-       bool interrupt;                 /* indicate if interrupts are enabled */
        bool se_bound;                  /* SE bound state */
        unsigned int assoc_idx;         /* SE association index */
        int queue_count;                /* # messages currently on AP queue. */
index 993240370ecf7c478639849d12716bb388b5815f..3934a0cc13e7629735f5e8d200b98f4a5451e5f5 100644 (file)
@@ -200,13 +200,13 @@ static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
                        return AP_SM_WAIT_AGAIN;
                }
                aq->sm_state = AP_SM_STATE_IDLE;
-               return AP_SM_WAIT_NONE;
+               break;
        case AP_RESPONSE_NO_PENDING_REPLY:
                if (aq->queue_count > 0)
-                       return aq->interrupt ?
+                       return status.irq_enabled ?
                                AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
                aq->sm_state = AP_SM_STATE_IDLE;
-               return AP_SM_WAIT_NONE;
+               break;
        default:
                aq->dev_state = AP_DEV_STATE_ERROR;
                aq->last_err_rc = status.response_code;
@@ -215,6 +215,16 @@ static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
                            AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
                return AP_SM_WAIT_NONE;
        }
+       /* Check and maybe enable irq support (again) on this queue */
+       if (!status.irq_enabled && status.queue_empty) {
+               void *lsi_ptr = ap_airq_ptr();
+
+               if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0) {
+                       aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
+                       return AP_SM_WAIT_AGAIN;
+               }
+       }
+       return AP_SM_WAIT_NONE;
 }
 
 /**
@@ -254,7 +264,7 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
                fallthrough;
        case AP_RESPONSE_Q_FULL:
                aq->sm_state = AP_SM_STATE_QUEUE_FULL;
-               return aq->interrupt ?
+               return status.irq_enabled ?
                        AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
        case AP_RESPONSE_RESET_IN_PROGRESS:
                aq->sm_state = AP_SM_STATE_RESET_WAIT;
@@ -307,7 +317,6 @@ static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
        case AP_RESPONSE_NORMAL:
        case AP_RESPONSE_RESET_IN_PROGRESS:
                aq->sm_state = AP_SM_STATE_RESET_WAIT;
-               aq->interrupt = false;
                aq->rapq_fbit = 0;
                aq->se_bound = false;
                return AP_SM_WAIT_LOW_TIMEOUT;
@@ -383,7 +392,6 @@ static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
 
        if (status.irq_enabled == 1) {
                /* Irqs are now enabled */
-               aq->interrupt = true;
                aq->sm_state = (aq->queue_count > 0) ?
                        AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
        }
@@ -626,16 +634,21 @@ static ssize_t interrupt_show(struct device *dev,
                              struct device_attribute *attr, char *buf)
 {
        struct ap_queue *aq = to_ap_queue(dev);
+       struct ap_queue_status status;
        int rc = 0;
 
        spin_lock_bh(&aq->lock);
-       if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
+       if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT) {
                rc = sysfs_emit(buf, "Enable Interrupt pending.\n");
-       else if (aq->interrupt)
-               rc = sysfs_emit(buf, "Interrupts enabled.\n");
-       else
-               rc = sysfs_emit(buf, "Interrupts disabled.\n");
+       } else {
+               status = ap_tapq(aq->qid, NULL);
+               if (status.irq_enabled)
+                       rc = sysfs_emit(buf, "Interrupts enabled.\n");
+               else
+                       rc = sysfs_emit(buf, "Interrupts disabled.\n");
+       }
        spin_unlock_bh(&aq->lock);
+
        return rc;
 }
 
@@ -1032,7 +1045,6 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
        if (ap_sb_available() && is_prot_virt_guest())
                aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
        aq->qid = qid;
-       aq->interrupt = false;
        spin_lock_init(&aq->lock);
        INIT_LIST_HEAD(&aq->pendingq);
        INIT_LIST_HEAD(&aq->requestq);
index c815722d0ac87ea1eebdb50b52db00b7fb26980c..050462d952224455860f4274b8d3e12ba602cba6 100644 (file)
@@ -52,7 +52,7 @@ static ssize_t online_show(struct device *dev,
 {
        struct zcrypt_card *zc = dev_get_drvdata(dev);
        struct ap_card *ac = to_ap_card(dev);
-       int online = ac->config && zc->online ? 1 : 0;
+       int online = ac->config && !ac->chkstop && zc->online ? 1 : 0;
 
        return sysfs_emit(buf, "%d\n", online);
 }
@@ -70,7 +70,7 @@ static ssize_t online_store(struct device *dev,
        if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
                return -EINVAL;
 
-       if (online && !ac->config)
+       if (online && (!ac->config || ac->chkstop))
                return -ENODEV;
 
        zc->online = online;
index 112a80e8e6c2c394ae88f300a5fde6364b5390bd..67d8e0ae0eec5e867b6c7261634df5af993fb7b9 100644 (file)
@@ -42,7 +42,7 @@ static ssize_t online_show(struct device *dev,
 {
        struct zcrypt_queue *zq = dev_get_drvdata(dev);
        struct ap_queue *aq = to_ap_queue(dev);
-       int online = aq->config && zq->online ? 1 : 0;
+       int online = aq->config && !aq->chkstop && zq->online ? 1 : 0;
 
        return sysfs_emit(buf, "%d\n", online);
 }
@@ -59,7 +59,8 @@ static ssize_t online_store(struct device *dev,
        if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1)
                return -EINVAL;
 
-       if (online && (!aq->config || !aq->card->config))
+       if (online && (!aq->config || !aq->card->config ||
+                      aq->chkstop || aq->card->chkstop))
                return -ENODEV;
        if (online && !zc->online)
                return -EINVAL;
index 6af2511e070cc2405230a0223cad36355d012c5d..cf8506d0f185c0bd6b921cbc6ed507c1be38088b 100644 (file)
@@ -3675,7 +3675,7 @@ static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
 {
        /*
-        * check if weed have to switch to non-packing mode or if
+        * check if we have to switch to non-packing mode or if
         * we have to get a pci flag out on the queue
         */
        if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
index 35dbfacecf1cd52dda2b596bbd7f5d2694efe35d..70c9dd6b6a31e78f6d3bed61daa3304f17e813a0 100644 (file)
@@ -1169,6 +1169,7 @@ config SPI_XTENSA_XTFPGA
 config SPI_ZYNQ_QSPI
        tristate "Xilinx Zynq QSPI controller"
        depends on ARCH_ZYNQ || COMPILE_TEST
+       depends on SPI_MEM
        help
          This enables support for the Zynq Quad SPI controller
          in master mode.
index 791df0e691054762f139a49df38bb66ea1b14fcf..8ead7acb99f3481aa941b5e8879ac05444e8b46f 100644 (file)
@@ -3317,33 +3317,52 @@ void spi_unregister_controller(struct spi_controller *ctlr)
 }
 EXPORT_SYMBOL_GPL(spi_unregister_controller);
 
+static inline int __spi_check_suspended(const struct spi_controller *ctlr)
+{
+       return ctlr->flags & SPI_CONTROLLER_SUSPENDED ? -ESHUTDOWN : 0;
+}
+
+static inline void __spi_mark_suspended(struct spi_controller *ctlr)
+{
+       mutex_lock(&ctlr->bus_lock_mutex);
+       ctlr->flags |= SPI_CONTROLLER_SUSPENDED;
+       mutex_unlock(&ctlr->bus_lock_mutex);
+}
+
+static inline void __spi_mark_resumed(struct spi_controller *ctlr)
+{
+       mutex_lock(&ctlr->bus_lock_mutex);
+       ctlr->flags &= ~SPI_CONTROLLER_SUSPENDED;
+       mutex_unlock(&ctlr->bus_lock_mutex);
+}
+
 int spi_controller_suspend(struct spi_controller *ctlr)
 {
-       int ret;
+       int ret = 0;
 
        /* Basically no-ops for non-queued controllers */
-       if (!ctlr->queued)
-               return 0;
-
-       ret = spi_stop_queue(ctlr);
-       if (ret)
-               dev_err(&ctlr->dev, "queue stop failed\n");
+       if (ctlr->queued) {
+               ret = spi_stop_queue(ctlr);
+               if (ret)
+                       dev_err(&ctlr->dev, "queue stop failed\n");
+       }
 
+       __spi_mark_suspended(ctlr);
        return ret;
 }
 EXPORT_SYMBOL_GPL(spi_controller_suspend);
 
 int spi_controller_resume(struct spi_controller *ctlr)
 {
-       int ret;
-
-       if (!ctlr->queued)
-               return 0;
+       int ret = 0;
 
-       ret = spi_start_queue(ctlr);
-       if (ret)
-               dev_err(&ctlr->dev, "queue restart failed\n");
+       __spi_mark_resumed(ctlr);
 
+       if (ctlr->queued) {
+               ret = spi_start_queue(ctlr);
+               if (ret)
+                       dev_err(&ctlr->dev, "queue restart failed\n");
+       }
        return ret;
 }
 EXPORT_SYMBOL_GPL(spi_controller_resume);
@@ -4147,8 +4166,7 @@ static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct s
        ctlr->cur_msg = msg;
        ret = __spi_pump_transfer_message(ctlr, msg, was_busy);
        if (ret)
-               goto out;
-
+               dev_err(&ctlr->dev, "noqueue transfer failed\n");
        ctlr->cur_msg = NULL;
        ctlr->fallback = false;
 
@@ -4164,7 +4182,6 @@ static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct s
                spi_idle_runtime_pm(ctlr);
        }
 
-out:
        mutex_unlock(&ctlr->io_mutex);
 }
 
@@ -4187,6 +4204,11 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
        int status;
        struct spi_controller *ctlr = spi->controller;
 
+       if (__spi_check_suspended(ctlr)) {
+               dev_warn_once(&spi->dev, "Attempted to sync while suspend\n");
+               return -ESHUTDOWN;
+       }
+
        status = __spi_validate(spi, message);
        if (status != 0)
                return status;
index 57cc1960d0598f5442724a528020cb34487bd00b..a3cb68cfa0f9b7059f8911deb924ab8970f07357 100644 (file)
@@ -258,7 +258,6 @@ static const struct pwm_ops gb_pwm_ops = {
        .request = gb_pwm_request,
        .free = gb_pwm_free,
        .apply = gb_pwm_apply,
-       .owner = THIS_MODULE,
 };
 
 static int gb_pwm_probe(struct gbphy_device *gbphy_dev,
index 75c985da75b5c601c3eab05fc622978bb2671f5d..1265fc58a232865da24f665aa506b3b4ac104e13 100644 (file)
@@ -26,18 +26,6 @@ config VIDEO_ATOMISP
          To compile this driver as a module, choose M here: the
          module will be called atomisp
 
-config VIDEO_ATOMISP_ISP2401
-       bool "Use Intel Atom ISP on Cherrytail/Anniedale (ISP2401)"
-       depends on VIDEO_ATOMISP
-       help
-         Enable support for Atom ISP2401-based boards.
-
-         Select this option for  Anniedale (Merrifield+ / Moorefield)
-         and Cherrytrail SoCs.
-
-         Disabling it enables support for Atom ISP2400-based boards
-         (Merrifield and Baytrail SoCs).
-
 if VIDEO_ATOMISP
 source "drivers/staging/media/atomisp/i2c/Kconfig"
 endif
index 38b37012410980c16643bcc5a11ca9cfa8f89e9b..fcd3e51ae9ce92094e3a3575357453f583f48590 100644 (file)
@@ -150,13 +150,8 @@ atomisp-objs += \
        pci/hive_isp_css_common/host/timed_ctrl.o \
        pci/hive_isp_css_common/host/vmem.o \
        pci/hive_isp_css_shared/host/tag.o \
-       pci/system_local.o
-
-# These will be needed when clean merge CHT support nicely into the driver
-# Keep them here handy for when we get to that point
-#
-
-obj-cht = \
+       pci/system_local.o \
+       pci/runtime/isys/src/ibuf_ctrl_rmgr.o \
        pci/css_2401_system/host/csi_rx.o \
        pci/css_2401_system/host/ibuf_ctrl.o \
        pci/css_2401_system/host/isys_dma.o \
@@ -306,11 +301,4 @@ DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__
 #DEFINES += -DPUNIT_CAMERA_BUSY
 #DEFINES += -DUSE_KMEM_CACHE
 
-ifeq ($(CONFIG_VIDEO_ATOMISP_ISP2401),y)
-atomisp-objs += \
-       $(obj-cht) \
-       pci/runtime/isys/src/ibuf_ctrl_rmgr.o
-DEFINES += -DISP2401
-endif
-
 ccflags-y += $(INCLUDES) $(DEFINES) -fno-common
index ecf8ba67b7afafd2814b2f3b901c07163de5d6df..d99cc898cd99128e5c5bdade55422192a7148f81 100644 (file)
@@ -60,9 +60,6 @@ TODO
 
 2. Items which SHOULD also be fixed eventually:
 
-* Remove VIDEO_ATOMISP_ISP2401, making the driver to auto-detect the
-  register address differences between ISP2400 and ISP2401
-
 * The driver is intended to drive the PCI exposed versions of the device.
   It will not detect those devices enumerated via ACPI as a field of the
   i915 GPU driver (only a problem on BYT).
index 2d4165cda2f1fb05782dbd522c4567f41822a53f..f62d183b787f5938500db2a6a282a44a2b4ca805 100644 (file)
@@ -57,18 +57,6 @@ config VIDEO_ATOMISP_GC0310
          This is a Video4Linux2 sensor-level driver for the Galaxycore
          GC0310 0.3MP sensor.
 
-config VIDEO_ATOMISP_OV5693
-       tristate "Omnivision ov5693 sensor support"
-       depends on ACPI
-       depends on I2C && VIDEO_DEV
-       help
-         This is a Video4Linux2 sensor-level driver for the Micron
-         ov5693 5 Mpixel camera.
-
-         ov5693 is video camera sensor.
-
-         It currently only works with the atomisp driver.
-
 #
 # Kconfig for flash drivers
 #
index fc55af5f34226ef5b7e683af050ca88111a837e7..e946cc91e5ff29eb14092c87ba7b6417b016d18d 100644 (file)
@@ -3,7 +3,6 @@
 # Makefile for sensor drivers
 #
 
-obj-$(CONFIG_VIDEO_ATOMISP_OV5693)     += ov5693/
 obj-$(CONFIG_VIDEO_ATOMISP_MT9M114)    += atomisp-mt9m114.o
 obj-$(CONFIG_VIDEO_ATOMISP_GC2235)     += atomisp-gc2235.o
 obj-$(CONFIG_VIDEO_ATOMISP_OV2722)     += atomisp-ov2722.o
index 9a11793f34f74b624193d5a570c278a2fe50b14d..58cddf11c9ab6a39e704ba1ae1b0c47efc8da87b 100644 (file)
@@ -83,7 +83,6 @@ struct gc0310_device {
        struct mutex input_lock;
        bool is_streaming;
 
-       struct fwnode_handle *ep_fwnode;
        struct gpio_desc *reset;
        struct gpio_desc *powerdown;
 
@@ -442,11 +441,6 @@ static int gc0310_s_stream(struct v4l2_subdev *sd, int enable)
        dev_dbg(&client->dev, "%s S enable=%d\n", __func__, enable);
        mutex_lock(&dev->input_lock);
 
-       if (dev->is_streaming == enable) {
-               dev_warn(&client->dev, "stream already %s\n", enable ? "started" : "stopped");
-               goto error_unlock;
-       }
-
        if (enable) {
                ret = pm_runtime_get_sync(&client->dev);
                if (ret < 0)
@@ -498,7 +492,6 @@ static int gc0310_s_stream(struct v4l2_subdev *sd, int enable)
 error_power_down:
        pm_runtime_put(&client->dev);
        dev->is_streaming = false;
-error_unlock:
        mutex_unlock(&dev->input_lock);
        return ret;
 }
@@ -599,37 +592,37 @@ static void gc0310_remove(struct i2c_client *client)
        media_entity_cleanup(&dev->sd.entity);
        v4l2_ctrl_handler_free(&dev->ctrls.handler);
        mutex_destroy(&dev->input_lock);
-       fwnode_handle_put(dev->ep_fwnode);
        pm_runtime_disable(&client->dev);
 }
 
 static int gc0310_probe(struct i2c_client *client)
 {
+       struct fwnode_handle *ep_fwnode;
        struct gc0310_device *dev;
        int ret;
 
-       dev = devm_kzalloc(&client->dev, sizeof(*dev), GFP_KERNEL);
-       if (!dev)
-               return -ENOMEM;
-
        /*
         * Sometimes the fwnode graph is initialized by the bridge driver.
         * Bridge drivers doing this may also add GPIO mappings, wait for this.
         */
-       dev->ep_fwnode = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev), NULL);
-       if (!dev->ep_fwnode)
+       ep_fwnode = fwnode_graph_get_next_endpoint(dev_fwnode(&client->dev), NULL);
+       if (!ep_fwnode)
                return dev_err_probe(&client->dev, -EPROBE_DEFER, "waiting for fwnode graph endpoint\n");
 
+       fwnode_handle_put(ep_fwnode);
+
+       dev = devm_kzalloc(&client->dev, sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
+
        dev->reset = devm_gpiod_get(&client->dev, "reset", GPIOD_OUT_HIGH);
        if (IS_ERR(dev->reset)) {
-               fwnode_handle_put(dev->ep_fwnode);
                return dev_err_probe(&client->dev, PTR_ERR(dev->reset),
                                     "getting reset GPIO\n");
        }
 
        dev->powerdown = devm_gpiod_get(&client->dev, "powerdown", GPIOD_OUT_HIGH);
        if (IS_ERR(dev->powerdown)) {
-               fwnode_handle_put(dev->ep_fwnode);
                return dev_err_probe(&client->dev, PTR_ERR(dev->powerdown),
                                     "getting powerdown GPIO\n");
        }
@@ -652,7 +645,6 @@ static int gc0310_probe(struct i2c_client *client)
        dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
        dev->pad.flags = MEDIA_PAD_FL_SOURCE;
        dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
-       dev->sd.fwnode = dev->ep_fwnode;
 
        ret = gc0310_init_controls(dev);
        if (ret) {
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/Makefile b/drivers/staging/media/atomisp/i2c/ov5693/Makefile
deleted file mode 100644 (file)
index 3275f2b..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_VIDEO_ATOMISP_OV5693) += atomisp-ov5693.o
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h b/drivers/staging/media/atomisp/i2c/ov5693/ad5823.h
deleted file mode 100644 (file)
index f1362cd..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Support for AD5823 VCM.
- *
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- *
- */
-
-#ifndef __AD5823_H__
-#define __AD5823_H__
-
-#include <linux/types.h>
-
-#define AD5823_VCM_ADDR        0x0c
-
-#define AD5823_REG_RESET               0x01
-#define AD5823_REG_MODE                        0x02
-#define AD5823_REG_VCM_MOVE_TIME       0x03
-#define AD5823_REG_VCM_CODE_MSB                0x04
-#define AD5823_REG_VCM_CODE_LSB                0x05
-#define AD5823_REG_VCM_THRESHOLD_MSB   0x06
-#define AD5823_REG_VCM_THRESHOLD_LSB   0x07
-
-#define AD5823_REG_LENGTH              0x1
-
-#define AD5823_RING_CTRL_ENABLE                0x04
-#define AD5823_RING_CTRL_DISABLE       0x00
-
-#define AD5823_RESONANCE_PERIOD                100000
-#define AD5823_RESONANCE_COEF          512
-#define AD5823_HIGH_FREQ_RANGE         0x80
-
-#define VCM_CODE_MSB_MASK              0xfc
-#define AD5823_INIT_FOCUS_POS           350
-
-enum ad5823_tok_type {
-       AD5823_8BIT  = 0x1,
-       AD5823_16BIT = 0x2,
-};
-
-enum ad5823_vcm_mode {
-       AD5823_ARC_RES0 = 0x0,  /* Actuator response control RES1 */
-       AD5823_ARC_RES1 = 0x1,  /* Actuator response control RES0.5 */
-       AD5823_ARC_RES2 = 0x2,  /* Actuator response control RES2 */
-       AD5823_ESRC = 0x3,      /* Enhanced slew rate control */
-       AD5823_DIRECT = 0x4,    /* Direct control */
-};
-
-#define AD5823_INVALID_CONFIG  0xffffffff
-#define AD5823_MAX_FOCUS_POS   1023
-#define DELAY_PER_STEP_NS      1000000
-#define DELAY_MAX_PER_STEP_NS  (1000000 * 1023)
-#endif
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
deleted file mode 100644 (file)
index 460a4e3..0000000
+++ /dev/null
@@ -1,1763 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Support for OmniVision OV5693 1080p HD camera sensor.
- *
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- *
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/kmod.h>
-#include <linux/device.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/i2c.h>
-#include <linux/moduleparam.h>
-#include <media/v4l2-device.h>
-#include <linux/io.h>
-#include <linux/acpi.h>
-#include "../../include/linux/atomisp_gmin_platform.h"
-
-#include "ov5693.h"
-#include "ad5823.h"
-
-#define __cci_delay(t) \
-       do { \
-               if ((t) < 10) { \
-                       usleep_range((t) * 1000, ((t) + 1) * 1000); \
-               } else { \
-                       msleep((t)); \
-               } \
-       } while (0)
-
-/* Value 30ms reached through experimentation on byt ecs.
- * The DS specifies a much lower value but when using a smaller value
- * the I2C bus sometimes locks up permanently when starting the camera.
- * This issue could not be reproduced on cht, so we can reduce the
- * delay value to a lower value when insmod.
- */
-static uint up_delay = 30;
-module_param(up_delay, uint, 0644);
-MODULE_PARM_DESC(up_delay,
-                "Delay prior to the first CCI transaction for ov5693");
-
-static int vcm_ad_i2c_wr8(struct i2c_client *client, u8 reg, u8 val)
-{
-       int err;
-       struct i2c_msg msg;
-       u8 buf[2];
-
-       buf[0] = reg;
-       buf[1] = val;
-
-       msg.addr = VCM_ADDR;
-       msg.flags = 0;
-       msg.len = 2;
-       msg.buf = &buf[0];
-
-       err = i2c_transfer(client->adapter, &msg, 1);
-       if (err != 1) {
-               dev_err(&client->dev, "%s: vcm i2c fail, err code = %d\n",
-                       __func__, err);
-               return -EIO;
-       }
-       return 0;
-}
-
-static int ad5823_i2c_write(struct i2c_client *client, u8 reg, u8 val)
-{
-       struct i2c_msg msg;
-       u8 buf[2];
-
-       buf[0] = reg;
-       buf[1] = val;
-       msg.addr = AD5823_VCM_ADDR;
-       msg.flags = 0;
-       msg.len = 0x02;
-       msg.buf = &buf[0];
-
-       if (i2c_transfer(client->adapter, &msg, 1) != 1)
-               return -EIO;
-       return 0;
-}
-
-static int ad5823_i2c_read(struct i2c_client *client, u8 reg, u8 *val)
-{
-       struct i2c_msg msg[2];
-       u8 buf[2];
-
-       buf[0] = reg;
-       buf[1] = 0;
-
-       msg[0].addr = AD5823_VCM_ADDR;
-       msg[0].flags = 0;
-       msg[0].len = 0x01;
-       msg[0].buf = &buf[0];
-
-       msg[1].addr = 0x0c;
-       msg[1].flags = I2C_M_RD;
-       msg[1].len = 0x01;
-       msg[1].buf = &buf[1];
-       *val = 0;
-       if (i2c_transfer(client->adapter, msg, 2) != 2)
-               return -EIO;
-       *val = buf[1];
-       return 0;
-}
-
-static const u32 ov5693_embedded_effective_size = 28;
-
-/* i2c read/write stuff */
-static int ov5693_read_reg(struct i2c_client *client,
-                          u16 data_length, u16 reg, u16 *val)
-{
-       int err;
-       struct i2c_msg msg[2];
-       unsigned char data[6];
-
-       if (!client->adapter) {
-               dev_err(&client->dev, "%s error, no client->adapter\n",
-                       __func__);
-               return -ENODEV;
-       }
-
-       if (data_length != OV5693_8BIT && data_length != OV5693_16BIT
-           && data_length != OV5693_32BIT) {
-               dev_err(&client->dev, "%s error, invalid data length\n",
-                       __func__);
-               return -EINVAL;
-       }
-
-       memset(msg, 0, sizeof(msg));
-
-       msg[0].addr = client->addr;
-       msg[0].flags = 0;
-       msg[0].len = I2C_MSG_LENGTH;
-       msg[0].buf = data;
-
-       /* high byte goes out first */
-       data[0] = (u8)(reg >> 8);
-       data[1] = (u8)(reg & 0xff);
-
-       msg[1].addr = client->addr;
-       msg[1].len = data_length;
-       msg[1].flags = I2C_M_RD;
-       msg[1].buf = data;
-
-       err = i2c_transfer(client->adapter, msg, 2);
-       if (err != 2) {
-               if (err >= 0)
-                       err = -EIO;
-               dev_err(&client->dev,
-                       "read from offset 0x%x error %d", reg, err);
-               return err;
-       }
-
-       *val = 0;
-       /* high byte comes first */
-       if (data_length == OV5693_8BIT)
-               *val = (u8)data[0];
-       else if (data_length == OV5693_16BIT)
-               *val = be16_to_cpu(*(__be16 *)&data[0]);
-       else
-               *val = be32_to_cpu(*(__be32 *)&data[0]);
-
-       return 0;
-}
-
-static int ov5693_i2c_write(struct i2c_client *client, u16 len, u8 *data)
-{
-       struct i2c_msg msg;
-       const int num_msg = 1;
-       int ret;
-
-       msg.addr = client->addr;
-       msg.flags = 0;
-       msg.len = len;
-       msg.buf = data;
-       ret = i2c_transfer(client->adapter, &msg, 1);
-
-       return ret == num_msg ? 0 : -EIO;
-}
-
-static int vcm_dw_i2c_write(struct i2c_client *client, u16 data)
-{
-       struct i2c_msg msg;
-       const int num_msg = 1;
-       int ret;
-       __be16 val;
-
-       val = cpu_to_be16(data);
-       msg.addr = VCM_ADDR;
-       msg.flags = 0;
-       msg.len = OV5693_16BIT;
-       msg.buf = (void *)&val;
-
-       ret = i2c_transfer(client->adapter, &msg, 1);
-
-       return ret == num_msg ? 0 : -EIO;
-}
-
-/*
- * Theory: per datasheet, the two VCMs both allow for a 2-byte read.
- * The DW9714 doesn't actually specify what this does (it has a
- * two-byte write-only protocol, but specifies the read sequence as
- * legal), but it returns the same data (zeroes) always, after an
- * undocumented initial NAK.  The AD5823 has a one-byte address
- * register to which all writes go, and subsequent reads will cycle
- * through the 8 bytes of registers.  Notably, the default values (the
- * device is always power-cycled affirmatively, so we can rely on
- * these) in AD5823 are not pairwise repetitions of the same 16 bit
- * word.  So all we have to do is sequentially read two bytes at a
- * time and see if we detect a difference in any of the first four
- * pairs.
- */
-static int vcm_detect(struct i2c_client *client)
-{
-       int i, ret;
-       struct i2c_msg msg;
-       u16 data0 = 0, data;
-
-       for (i = 0; i < 4; i++) {
-               msg.addr = VCM_ADDR;
-               msg.flags = I2C_M_RD;
-               msg.len = sizeof(data);
-               msg.buf = (u8 *)&data;
-               ret = i2c_transfer(client->adapter, &msg, 1);
-
-               /*
-                * DW9714 always fails the first read and returns
-                * zeroes for subsequent ones
-                */
-               if (i == 0 && ret == -EREMOTEIO) {
-                       data0 = 0;
-                       continue;
-               }
-
-               if (i == 0)
-                       data0 = data;
-
-               if (data != data0)
-                       return VCM_AD5823;
-       }
-       return ret == 1 ? VCM_DW9714 : ret;
-}
-
-static int ov5693_write_reg(struct i2c_client *client, u16 data_length,
-                           u16 reg, u16 val)
-{
-       int ret;
-       unsigned char data[4] = {0};
-       __be16 *wreg = (void *)data;
-       const u16 len = data_length + sizeof(u16); /* 16-bit address + data */
-
-       if (data_length != OV5693_8BIT && data_length != OV5693_16BIT) {
-               dev_err(&client->dev,
-                       "%s error, invalid data_length\n", __func__);
-               return -EINVAL;
-       }
-
-       /* high byte goes out first */
-       *wreg = cpu_to_be16(reg);
-
-       if (data_length == OV5693_8BIT) {
-               data[2] = (u8)(val);
-       } else {
-               /* OV5693_16BIT */
-               __be16 *wdata = (void *)&data[2];
-
-               *wdata = cpu_to_be16(val);
-       }
-
-       ret = ov5693_i2c_write(client, len, data);
-       if (ret)
-               dev_err(&client->dev,
-                       "write error: wrote 0x%x to offset 0x%x error %d",
-                       val, reg, ret);
-
-       return ret;
-}
-
-/*
- * ov5693_write_reg_array - Initializes a list of OV5693 registers
- * @client: i2c driver client structure
- * @reglist: list of registers to be written
- *
- * This function initializes a list of registers. When consecutive addresses
- * are found in a row on the list, this function creates a buffer and sends
- * consecutive data in a single i2c_transfer().
- *
- * __ov5693_flush_reg_array, __ov5693_buf_reg_array() and
- * __ov5693_write_reg_is_consecutive() are internal functions to
- * ov5693_write_reg_array_fast() and should be not used anywhere else.
- *
- */
-
-static int __ov5693_flush_reg_array(struct i2c_client *client,
-                                   struct ov5693_write_ctrl *ctrl)
-{
-       u16 size;
-       __be16 *reg = (void *)&ctrl->buffer.addr;
-
-       if (ctrl->index == 0)
-               return 0;
-
-       size = sizeof(u16) + ctrl->index; /* 16-bit address + data */
-
-       *reg = cpu_to_be16(ctrl->buffer.addr);
-       ctrl->index = 0;
-
-       return ov5693_i2c_write(client, size, (u8 *)reg);
-}
-
-static int __ov5693_buf_reg_array(struct i2c_client *client,
-                                 struct ov5693_write_ctrl *ctrl,
-                                 const struct ov5693_reg *next)
-{
-       int size;
-       __be16 *data16;
-
-       switch (next->type) {
-       case OV5693_8BIT:
-               size = 1;
-               ctrl->buffer.data[ctrl->index] = (u8)next->val;
-               break;
-       case OV5693_16BIT:
-               size = 2;
-
-               data16 = (void *)&ctrl->buffer.data[ctrl->index];
-               *data16 = cpu_to_be16((u16)next->val);
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       /* When first item is added, we need to store its starting address */
-       if (ctrl->index == 0)
-               ctrl->buffer.addr = next->reg;
-
-       ctrl->index += size;
-
-       /*
-        * Buffer cannot guarantee free space for u32? Better flush it to avoid
-        * possible lack of memory for next item.
-        */
-       if (ctrl->index + sizeof(u16) >= OV5693_MAX_WRITE_BUF_SIZE)
-               return __ov5693_flush_reg_array(client, ctrl);
-
-       return 0;
-}
-
-static int __ov5693_write_reg_is_consecutive(struct i2c_client *client,
-       struct ov5693_write_ctrl *ctrl,
-       const struct ov5693_reg *next)
-{
-       if (ctrl->index == 0)
-               return 1;
-
-       return ctrl->buffer.addr + ctrl->index == next->reg;
-}
-
-static int ov5693_write_reg_array(struct i2c_client *client,
-                                 const struct ov5693_reg *reglist)
-{
-       const struct ov5693_reg *next = reglist;
-       struct ov5693_write_ctrl ctrl;
-       int err;
-
-       ctrl.index = 0;
-       for (; next->type != OV5693_TOK_TERM; next++) {
-               switch (next->type & OV5693_TOK_MASK) {
-               case OV5693_TOK_DELAY:
-                       err = __ov5693_flush_reg_array(client, &ctrl);
-                       if (err)
-                               return err;
-                       msleep(next->val);
-                       break;
-               default:
-                       /*
-                        * If next address is not consecutive, data needs to be
-                        * flushed before proceed.
-                        */
-                       if (!__ov5693_write_reg_is_consecutive(client, &ctrl,
-                                                              next)) {
-                               err = __ov5693_flush_reg_array(client, &ctrl);
-                               if (err)
-                                       return err;
-                       }
-                       err = __ov5693_buf_reg_array(client, &ctrl, next);
-                       if (err) {
-                               dev_err(&client->dev,
-                                       "%s: write error, aborted\n",
-                                       __func__);
-                               return err;
-                       }
-                       break;
-               }
-       }
-
-       return __ov5693_flush_reg_array(client, &ctrl);
-}
-
-static long __ov5693_set_exposure(struct v4l2_subdev *sd, int coarse_itg,
-                                 int gain, int digitgain)
-
-{
-       struct i2c_client *client = v4l2_get_subdevdata(sd);
-       struct ov5693_device *dev = to_ov5693_sensor(sd);
-       u16 vts, hts;
-       int ret, exp_val;
-
-       hts = ov5693_res[dev->fmt_idx].pixels_per_line;
-       vts = ov5693_res[dev->fmt_idx].lines_per_frame;
-       /*
-        * If coarse_itg is larger than 1<<15, can not write to reg directly.
-        * The way is to write coarse_itg/2 to the reg, meanwhile write 2*hts
-        * to the reg.
-        */
-       if (coarse_itg > (1 << 15)) {
-               hts = hts * 2;
-               coarse_itg = (int)coarse_itg / 2;
-       }
-       /* group hold */
-       ret = ov5693_write_reg(client, OV5693_8BIT,
-                              OV5693_GROUP_ACCESS, 0x00);
-       if (ret) {
-               dev_err(&client->dev, "%s: write %x error, aborted\n",
-                       __func__, OV5693_GROUP_ACCESS);
-               return ret;
-       }
-
-       ret = ov5693_write_reg(client, OV5693_8BIT,
-                              OV5693_TIMING_HTS_H, (hts >> 8) & 0xFF);
-       if (ret) {
-               dev_err(&client->dev, "%s: write %x error, aborted\n",
-                       __func__, OV5693_TIMING_HTS_H);
-               return ret;
-       }
-
-       ret = ov5693_write_reg(client, OV5693_8BIT,
-                              OV5693_TIMING_HTS_L, hts & 0xFF);
-       if (ret) {
-               dev_err(&client->dev, "%s: write %x error, aborted\n",
-                       __func__, OV5693_TIMING_HTS_L);
-               return ret;
-       }
-       /* Increase the VTS to match exposure + MARGIN */
-       if (coarse_itg > vts - OV5693_INTEGRATION_TIME_MARGIN)
-               vts = (u16)coarse_itg + OV5693_INTEGRATION_TIME_MARGIN;
-
-       ret = ov5693_write_reg(client, OV5693_8BIT,
-                              OV5693_TIMING_VTS_H, (vts >> 8) & 0xFF);
-       if (ret) {
-               dev_err(&client->dev, "%s: write %x error, aborted\n",
-                       __func__, OV5693_TIMING_VTS_H);
-               return ret;
-       }
-
-       ret = ov5693_write_reg(client, OV5693_8BIT,
-                              OV5693_TIMING_VTS_L, vts & 0xFF);
-       if (ret) {
-               dev_err(&client->dev, "%s: write %x error, aborted\n",
-                       __func__, OV5693_TIMING_VTS_L);
-               return ret;
-       }
-
-       /* set exposure */
-
-       /* Lower four bit should be 0*/
-       exp_val = coarse_itg << 4;
-       ret = ov5693_write_reg(client, OV5693_8BIT,
-                              OV5693_EXPOSURE_L, exp_val & 0xFF);
-       if (ret) {
-               dev_err(&client->dev, "%s: write %x error, aborted\n",
-                       __func__, OV5693_EXPOSURE_L);
-               return ret;
-       }
-
-       ret = ov5693_write_reg(client, OV5693_8BIT,
-                              OV5693_EXPOSURE_M, (exp_val >> 8) & 0xFF);
-       if (ret) {
-               dev_err(&client->dev, "%s: write %x error, aborted\n",
-                       __func__, OV5693_EXPOSURE_M);
-               return ret;
-       }
-
-       ret = ov5693_write_reg(client, OV5693_8BIT,
-                              OV5693_EXPOSURE_H, (exp_val >> 16) & 0x0F);
-       if (ret) {
-               dev_err(&client->dev, "%s: write %x error, aborted\n",
-                       __func__, OV5693_EXPOSURE_H);
-               return ret;
-       }
-
-       /* Analog gain */
-       ret = ov5693_write_reg(client, OV5693_8BIT,
-                              OV5693_AGC_L, gain & 0xff);
-       if (ret) {
-               dev_err(&client->dev, "%s: write %x error, aborted\n",
-                       __func__, OV5693_AGC_L);
-               return ret;
-       }
-
-       ret = ov5693_write_reg(client, OV5693_8BIT,
-                              OV5693_AGC_H, (gain >> 8) & 0xff);
-       if (ret) {
-               dev_err(&client->dev, "%s: write %x error, aborted\n",
-                       __func__, OV5693_AGC_H);
-               return ret;
-       }
-
-       /* Digital gain */
-       if (digitgain) {
-               ret = ov5693_write_reg(client, OV5693_16BIT,
-                                      OV5693_MWB_RED_GAIN_H, digitgain);
-               if (ret) {
-                       dev_err(&client->dev, "%s: write %x error, aborted\n",
-                               __func__, OV5693_MWB_RED_GAIN_H);
-                       return ret;
-               }
-
-               ret = ov5693_write_reg(client, OV5693_16BIT,
-                                      OV5693_MWB_GREEN_GAIN_H, digitgain);
-               if (ret) {
-                       dev_err(&client->dev, "%s: write %x error, aborted\n",
-                               __func__, OV5693_MWB_RED_GAIN_H);
-                       return ret;
-               }
-
-               ret = ov5693_write_reg(client, OV5693_16BIT,
-                                      OV5693_MWB_BLUE_GAIN_H, digitgain);
-               if (ret) {
-                       dev_err(&client->dev, "%s: write %x error, aborted\n",
-                               __func__, OV5693_MWB_RED_GAIN_H);
-                       return ret;
-               }
-       }
-
-       /* End group */
-       ret = ov5693_write_reg(client, OV5693_8BIT,
-                              OV5693_GROUP_ACCESS, 0x10);
-       if (ret)
-               return ret;
-
-       /* Delay launch group */
-       ret = ov5693_write_reg(client, OV5693_8BIT,
-                              OV5693_GROUP_ACCESS, 0xa0);
-       if (ret)
-               return ret;
-       return ret;
-}
-
-static int ov5693_set_exposure(struct v4l2_subdev *sd, int exposure,
-                              int gain, int digitgain)
-{
-       struct ov5693_device *dev = to_ov5693_sensor(sd);
-       int ret;
-
-       mutex_lock(&dev->input_lock);
-       ret = __ov5693_set_exposure(sd, exposure, gain, digitgain);
-       mutex_unlock(&dev->input_lock);
-
-       return ret;
-}
-
-static long ov5693_s_exposure(struct v4l2_subdev *sd,
-                             struct atomisp_exposure *exposure)
-{
-       u16 coarse_itg = exposure->integration_time[0];
-       u16 analog_gain = exposure->gain[0];
-       u16 digital_gain = exposure->gain[1];
-
-       /* we should not accept the invalid value below */
-       if (analog_gain == 0) {
-               struct i2c_client *client = v4l2_get_subdevdata(sd);
-
-               v4l2_err(client, "%s: invalid value\n", __func__);
-               return -EINVAL;
-       }
-       return ov5693_set_exposure(sd, coarse_itg, analog_gain, digital_gain);
-}
-
-static int ov5693_read_otp_reg_array(struct i2c_client *client, u16 size,
-                                    u16 addr, u8 *buf)
-{
-       u16 index;
-       int ret;
-       u16 *pVal = NULL;
-
-       for (index = 0; index <= size; index++) {
-               pVal = (u16 *)(buf + index);
-               ret =
-                   ov5693_read_reg(client, OV5693_8BIT, addr + index,
-                                   pVal);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static int __ov5693_otp_read(struct v4l2_subdev *sd, u8 *buf)
-{
-       struct i2c_client *client = v4l2_get_subdevdata(sd);
-       struct ov5693_device *dev = to_ov5693_sensor(sd);
-       int ret;
-       int i;
-       u8 *b = buf;
-
-       dev->otp_size = 0;
-       for (i = 1; i < OV5693_OTP_BANK_MAX; i++) {
-               /*set bank NO and OTP read mode. */
-               ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_OTP_BANK_REG,
-                                      (i | 0xc0));     //[7:6] 2'b11 [5:0] bank no
-               if (ret) {
-                       dev_err(&client->dev, "failed to prepare OTP page\n");
-                       return ret;
-               }
-               //pr_debug("write 0x%x->0x%x\n",OV5693_OTP_BANK_REG,(i|0xc0));
-
-               /*enable read */
-               ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_OTP_READ_REG,
-                                      OV5693_OTP_MODE_READ);   // enable :1
-               if (ret) {
-                       dev_err(&client->dev,
-                               "failed to set OTP reading mode page");
-                       return ret;
-               }
-               //pr_debug("write 0x%x->0x%x\n",OV5693_OTP_READ_REG,OV5693_OTP_MODE_READ);
-
-               /* Reading the OTP data array */
-               ret = ov5693_read_otp_reg_array(client, OV5693_OTP_BANK_SIZE,
-                                               OV5693_OTP_START_ADDR,
-                                               b);
-               if (ret) {
-                       dev_err(&client->dev, "failed to read OTP data\n");
-                       return ret;
-               }
-
-               //pr_debug("BANK[%2d] %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", i, *b, *(b+1), *(b+2), *(b+3), *(b+4), *(b+5), *(b+6), *(b+7), *(b+8), *(b+9), *(b+10), *(b+11), *(b+12), *(b+13), *(b+14), *(b+15));
-
-               //Intel OTP map, try to read 320byts first.
-               if (i == 21) {
-                       if ((*b) == 0) {
-                               dev->otp_size = 320;
-                               break;
-                       } else {
-                               b = buf;
-                               continue;
-                       }
-               } else if (i ==
-                          24) {                //if the first 320bytes data doesn't not exist, try to read the next 32bytes data.
-                       if ((*b) == 0) {
-                               dev->otp_size = 32;
-                               break;
-                       } else {
-                               b = buf;
-                               continue;
-                       }
-               } else if (i ==
-                          27) {                //if the prvious 32bytes data doesn't exist, try to read the next 32bytes data again.
-                       if ((*b) == 0) {
-                               dev->otp_size = 32;
-                               break;
-                       } else {
-                               dev->otp_size = 0;      // no OTP data.
-                               break;
-                       }
-               }
-
-               b = b + OV5693_OTP_BANK_SIZE;
-       }
-       return 0;
-}
-
-/*
- * Read otp data and store it into a kmalloced buffer.
- * The caller must kfree the buffer when no more needed.
- * @size: set to the size of the returned otp data.
- */
-static void *ov5693_otp_read(struct v4l2_subdev *sd)
-{
-       struct i2c_client *client = v4l2_get_subdevdata(sd);
-       u8 *buf;
-       int ret;
-
-       buf = devm_kzalloc(&client->dev, (OV5693_OTP_DATA_SIZE + 16), GFP_KERNEL);
-       if (!buf)
-               return ERR_PTR(-ENOMEM);
-
-       //otp valid after mipi on and sw stream on
-       ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_FRAME_OFF_NUM, 0x00);
-
-       ret = ov5693_write_reg(client, OV5693_8BIT,
-                              OV5693_SW_STREAM, OV5693_START_STREAMING);
-
-       ret = __ov5693_otp_read(sd, buf);
-
-       //mipi off and sw stream off after otp read
-       ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_FRAME_OFF_NUM, 0x0f);
-
-       ret = ov5693_write_reg(client, OV5693_8BIT,
-                              OV5693_SW_STREAM, OV5693_STOP_STREAMING);
-
-       /* Driver has failed to find valid data */
-       if (ret) {
-               dev_err(&client->dev, "sensor found no valid OTP data\n");
-               return ERR_PTR(ret);
-       }
-
-       return buf;
-}
-
-static long ov5693_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
-{
-       switch (cmd) {
-       case ATOMISP_IOC_S_EXPOSURE:
-               return ov5693_s_exposure(sd, arg);
-       default:
-               return -EINVAL;
-       }
-       return 0;
-}
-
-/*
- * This returns the exposure time being used. This should only be used
- * for filling in EXIF data, not for actual image processing.
- */
-static int ov5693_q_exposure(struct v4l2_subdev *sd, s32 *value)
-{
-       struct i2c_client *client = v4l2_get_subdevdata(sd);
-       u16 reg_v, reg_v2;
-       int ret;
-
-       /* get exposure */
-       ret = ov5693_read_reg(client, OV5693_8BIT,
-                             OV5693_EXPOSURE_L,
-                             &reg_v);
-       if (ret)
-               goto err;
-
-       ret = ov5693_read_reg(client, OV5693_8BIT,
-                             OV5693_EXPOSURE_M,
-                             &reg_v2);
-       if (ret)
-               goto err;
-
-       reg_v += reg_v2 << 8;
-       ret = ov5693_read_reg(client, OV5693_8BIT,
-                             OV5693_EXPOSURE_H,
-                             &reg_v2);
-       if (ret)
-               goto err;
-
-       *value = reg_v + (((u32)reg_v2 << 16));
-err:
-       return ret;
-}
-
-static int ad5823_t_focus_vcm(struct v4l2_subdev *sd, u16 val)
-{
-       struct i2c_client *client = v4l2_get_subdevdata(sd);
-       int ret;
-       u8 vcm_code;
-
-       ret = ad5823_i2c_read(client, AD5823_REG_VCM_CODE_MSB, &vcm_code);
-       if (ret)
-               return ret;
-
-       /* set reg VCM_CODE_MSB Bit[1:0] */
-       vcm_code = (vcm_code & VCM_CODE_MSB_MASK) |
-                  ((val >> 8) & ~VCM_CODE_MSB_MASK);
-       ret = ad5823_i2c_write(client, AD5823_REG_VCM_CODE_MSB, vcm_code);
-       if (ret)
-               return ret;
-
-       /* set reg VCM_CODE_LSB Bit[7:0] */
-       ret = ad5823_i2c_write(client, AD5823_REG_VCM_CODE_LSB, (val & 0xff));
-       if (ret)
-               return ret;
-
-       /* set required vcm move time */
-       vcm_code = AD5823_RESONANCE_PERIOD / AD5823_RESONANCE_COEF
-                  - AD5823_HIGH_FREQ_RANGE;
-       ret = ad5823_i2c_write(client, AD5823_REG_VCM_MOVE_TIME, vcm_code);
-
-       return ret;
-}
-
-static int ad5823_t_focus_abs(struct v4l2_subdev *sd, s32 value)
-{
-       value = min(value, AD5823_MAX_FOCUS_POS);
-       return ad5823_t_focus_vcm(sd, value);
-}
-
-static int ov5693_t_focus_abs(struct v4l2_subdev *sd, s32 value)
-{
-       struct ov5693_device *dev = to_ov5693_sensor(sd);
-       struct i2c_client *client = v4l2_get_subdevdata(sd);
-       int ret = 0;
-
-       dev_dbg(&client->dev, "%s: FOCUS_POS: 0x%x\n", __func__, value);
-       value = clamp(value, 0, OV5693_VCM_MAX_FOCUS_POS);
-       if (dev->vcm == VCM_DW9714) {
-               if (dev->vcm_update) {
-                       ret = vcm_dw_i2c_write(client, VCM_PROTECTION_OFF);
-                       if (ret)
-                               return ret;
-                       ret = vcm_dw_i2c_write(client, DIRECT_VCM);
-                       if (ret)
-                               return ret;
-                       ret = vcm_dw_i2c_write(client, VCM_PROTECTION_ON);
-                       if (ret)
-                               return ret;
-                       dev->vcm_update = false;
-               }
-               ret = vcm_dw_i2c_write(client,
-                                      vcm_val(value, VCM_DEFAULT_S));
-       } else if (dev->vcm == VCM_AD5823) {
-               ad5823_t_focus_abs(sd, value);
-       }
-       if (ret == 0) {
-               dev->number_of_steps = value - dev->focus;
-               dev->focus = value;
-               dev->timestamp_t_focus_abs = ktime_get();
-       } else
-               dev_err(&client->dev,
-                       "%s: i2c failed. ret %d\n", __func__, ret);
-
-       return ret;
-}
-
-static int ov5693_t_focus_rel(struct v4l2_subdev *sd, s32 value)
-{
-       struct ov5693_device *dev = to_ov5693_sensor(sd);
-
-       return ov5693_t_focus_abs(sd, dev->focus + value);
-}
-
-#define DELAY_PER_STEP_NS      1000000
-#define DELAY_MAX_PER_STEP_NS  (1000000 * 1023)
-static int ov5693_q_focus_status(struct v4l2_subdev *sd, s32 *value)
-{
-       u32 status = 0;
-       struct ov5693_device *dev = to_ov5693_sensor(sd);
-       ktime_t temptime;
-       ktime_t timedelay = ns_to_ktime(min_t(u32,
-                                             abs(dev->number_of_steps) * DELAY_PER_STEP_NS,
-                                             DELAY_MAX_PER_STEP_NS));
-
-       temptime = ktime_sub(ktime_get(), (dev->timestamp_t_focus_abs));
-       if (ktime_compare(temptime, timedelay) <= 0) {
-               status |= ATOMISP_FOCUS_STATUS_MOVING;
-               status |= ATOMISP_FOCUS_HP_IN_PROGRESS;
-       } else {
-               status |= ATOMISP_FOCUS_STATUS_ACCEPTS_NEW_MOVE;
-               status |= ATOMISP_FOCUS_HP_COMPLETE;
-       }
-
-       *value = status;
-
-       return 0;
-}
-
-static int ov5693_q_focus_abs(struct v4l2_subdev *sd, s32 *value)
-{
-       struct ov5693_device *dev = to_ov5693_sensor(sd);
-       s32 val;
-
-       ov5693_q_focus_status(sd, &val);
-
-       if (val & ATOMISP_FOCUS_STATUS_MOVING)
-               *value  = dev->focus - dev->number_of_steps;
-       else
-               *value  = dev->focus;
-
-       return 0;
-}
-
-static int ov5693_t_vcm_slew(struct v4l2_subdev *sd, s32 value)
-{
-       struct ov5693_device *dev = to_ov5693_sensor(sd);
-
-       dev->number_of_steps = value;
-       dev->vcm_update = true;
-       return 0;
-}
-
-static int ov5693_t_vcm_timing(struct v4l2_subdev *sd, s32 value)
-{
-       struct ov5693_device *dev = to_ov5693_sensor(sd);
-
-       dev->number_of_steps = value;
-       dev->vcm_update = true;
-       return 0;
-}
-
-static int ov5693_s_ctrl(struct v4l2_ctrl *ctrl)
-{
-       struct ov5693_device *dev =
-           container_of(ctrl->handler, struct ov5693_device, ctrl_handler);
-       struct i2c_client *client = v4l2_get_subdevdata(&dev->sd);
-       int ret = 0;
-
-       switch (ctrl->id) {
-       case V4L2_CID_FOCUS_ABSOLUTE:
-               dev_dbg(&client->dev, "%s: CID_FOCUS_ABSOLUTE:%d.\n",
-                       __func__, ctrl->val);
-               ret = ov5693_t_focus_abs(&dev->sd, ctrl->val);
-               break;
-       case V4L2_CID_FOCUS_RELATIVE:
-               dev_dbg(&client->dev, "%s: CID_FOCUS_RELATIVE:%d.\n",
-                       __func__, ctrl->val);
-               ret = ov5693_t_focus_rel(&dev->sd, ctrl->val);
-               break;
-       case V4L2_CID_VCM_SLEW:
-               ret = ov5693_t_vcm_slew(&dev->sd, ctrl->val);
-               break;
-       case V4L2_CID_VCM_TIMING:
-               ret = ov5693_t_vcm_timing(&dev->sd, ctrl->val);
-               break;
-       default:
-               ret = -EINVAL;
-       }
-       return ret;
-}
-
-static int ov5693_g_volatile_ctrl(struct v4l2_ctrl *ctrl)
-{
-       struct ov5693_device *dev =
-           container_of(ctrl->handler, struct ov5693_device, ctrl_handler);
-       int ret = 0;
-
-       switch (ctrl->id) {
-       case V4L2_CID_EXPOSURE_ABSOLUTE:
-               ret = ov5693_q_exposure(&dev->sd, &ctrl->val);
-               break;
-       case V4L2_CID_FOCUS_ABSOLUTE:
-               ret = ov5693_q_focus_abs(&dev->sd, &ctrl->val);
-               break;
-       case V4L2_CID_FOCUS_STATUS:
-               ret = ov5693_q_focus_status(&dev->sd, &ctrl->val);
-               break;
-       default:
-               ret = -EINVAL;
-       }
-
-       return ret;
-}
-
-static const struct v4l2_ctrl_ops ctrl_ops = {
-       .s_ctrl = ov5693_s_ctrl,
-       .g_volatile_ctrl = ov5693_g_volatile_ctrl
-};
-
-static const struct v4l2_ctrl_config ov5693_controls[] = {
-       {
-               .ops = &ctrl_ops,
-               .id = V4L2_CID_EXPOSURE_ABSOLUTE,
-               .type = V4L2_CTRL_TYPE_INTEGER,
-               .name = "exposure",
-               .min = 0x0,
-               .max = 0xffff,
-               .step = 0x01,
-               .def = 0x00,
-               .flags = 0,
-       },
-       {
-               .ops = &ctrl_ops,
-               .id = V4L2_CID_FOCUS_ABSOLUTE,
-               .type = V4L2_CTRL_TYPE_INTEGER,
-               .name = "focus move absolute",
-               .min = 0,
-               .max = OV5693_VCM_MAX_FOCUS_POS,
-               .step = 1,
-               .def = 0,
-               .flags = 0,
-       },
-       {
-               .ops = &ctrl_ops,
-               .id = V4L2_CID_FOCUS_RELATIVE,
-               .type = V4L2_CTRL_TYPE_INTEGER,
-               .name = "focus move relative",
-               .min = OV5693_VCM_MAX_FOCUS_NEG,
-               .max = OV5693_VCM_MAX_FOCUS_POS,
-               .step = 1,
-               .def = 0,
-               .flags = 0,
-       },
-       {
-               .ops = &ctrl_ops,
-               .id = V4L2_CID_FOCUS_STATUS,
-               .type = V4L2_CTRL_TYPE_INTEGER,
-               .name = "focus status",
-               .min = 0,
-               .max = 100,             /* allow enum to grow in the future */
-               .step = 1,
-               .def = 0,
-               .flags = 0,
-       },
-       {
-               .ops = &ctrl_ops,
-               .id = V4L2_CID_VCM_SLEW,
-               .type = V4L2_CTRL_TYPE_INTEGER,
-               .name = "vcm slew",
-               .min = 0,
-               .max = OV5693_VCM_SLEW_STEP_MAX,
-               .step = 1,
-               .def = 0,
-               .flags = 0,
-       },
-       {
-               .ops = &ctrl_ops,
-               .id = V4L2_CID_VCM_TIMING,
-               .type = V4L2_CTRL_TYPE_INTEGER,
-               .name = "vcm step time",
-               .min = 0,
-               .max = OV5693_VCM_SLEW_TIME_MAX,
-               .step = 1,
-               .def = 0,
-               .flags = 0,
-       },
-};
-
-static int ov5693_init(struct v4l2_subdev *sd)
-{
-       struct ov5693_device *dev = to_ov5693_sensor(sd);
-       struct i2c_client *client = v4l2_get_subdevdata(sd);
-       int ret;
-
-       pr_info("%s\n", __func__);
-       mutex_lock(&dev->input_lock);
-       dev->vcm_update = false;
-
-       if (dev->vcm == VCM_AD5823) {
-               ret = vcm_ad_i2c_wr8(client, 0x01, 0x01); /* vcm init test */
-               if (ret)
-                       dev_err(&client->dev,
-                               "vcm reset failed\n");
-               /*change the mode*/
-               ret = ad5823_i2c_write(client, AD5823_REG_VCM_CODE_MSB,
-                                      AD5823_RING_CTRL_ENABLE);
-               if (ret)
-                       dev_err(&client->dev,
-                               "vcm enable ringing failed\n");
-               ret = ad5823_i2c_write(client, AD5823_REG_MODE,
-                                      AD5823_ARC_RES1);
-               if (ret)
-                       dev_err(&client->dev,
-                               "vcm change mode failed\n");
-       }
-
-       /*change initial focus value for ad5823*/
-       if (dev->vcm == VCM_AD5823) {
-               dev->focus = AD5823_INIT_FOCUS_POS;
-               ov5693_t_focus_abs(sd, AD5823_INIT_FOCUS_POS);
-       } else {
-               dev->focus = 0;
-               ov5693_t_focus_abs(sd, 0);
-       }
-
-       mutex_unlock(&dev->input_lock);
-
-       return 0;
-}
-
-static int power_ctrl(struct v4l2_subdev *sd, bool flag)
-{
-       int ret;
-       struct ov5693_device *dev = to_ov5693_sensor(sd);
-
-       if (!dev || !dev->platform_data)
-               return -ENODEV;
-
-       /*
-        * This driver assumes "internal DVDD, PWDNB tied to DOVDD".
-        * In this set up only gpio0 (XSHUTDN) should be available
-        * but in some products (for example ECS) gpio1 (PWDNB) is
-        * also available. If gpio1 is available we emulate it being
-        * tied to DOVDD here.
-        */
-       if (flag) {
-               ret = dev->platform_data->v2p8_ctrl(sd, 1);
-               dev->platform_data->gpio1_ctrl(sd, 1);
-               if (ret == 0) {
-                       ret = dev->platform_data->v1p8_ctrl(sd, 1);
-                       if (ret) {
-                               dev->platform_data->gpio1_ctrl(sd, 0);
-                               ret = dev->platform_data->v2p8_ctrl(sd, 0);
-                       }
-               }
-       } else {
-               dev->platform_data->gpio1_ctrl(sd, 0);
-               ret = dev->platform_data->v1p8_ctrl(sd, 0);
-               ret |= dev->platform_data->v2p8_ctrl(sd, 0);
-       }
-
-       return ret;
-}
-
-static int gpio_ctrl(struct v4l2_subdev *sd, bool flag)
-{
-       struct ov5693_device *dev = to_ov5693_sensor(sd);
-
-       if (!dev || !dev->platform_data)
-               return -ENODEV;
-
-       return dev->platform_data->gpio0_ctrl(sd, flag);
-}
-
-static int __power_up(struct v4l2_subdev *sd)
-{
-       struct ov5693_device *dev = to_ov5693_sensor(sd);
-       struct i2c_client *client = v4l2_get_subdevdata(sd);
-       int ret;
-
-       if (!dev->platform_data) {
-               dev_err(&client->dev,
-                       "no camera_sensor_platform_data");
-               return -ENODEV;
-       }
-
-       /* power control */
-       ret = power_ctrl(sd, 1);
-       if (ret)
-               goto fail_power;
-
-       /* according to DS, at least 5ms is needed between DOVDD and PWDN */
-       /* add this delay time to 10~11ms*/
-       usleep_range(10000, 11000);
-
-       /* gpio ctrl */
-       ret = gpio_ctrl(sd, 1);
-       if (ret) {
-               ret = gpio_ctrl(sd, 1);
-               if (ret)
-                       goto fail_power;
-       }
-
-       /* flis clock control */
-       ret = dev->platform_data->flisclk_ctrl(sd, 1);
-       if (ret)
-               goto fail_clk;
-
-       __cci_delay(up_delay);
-
-       return 0;
-
-fail_clk:
-       gpio_ctrl(sd, 0);
-fail_power:
-       power_ctrl(sd, 0);
-       dev_err(&client->dev, "sensor power-up failed\n");
-
-       return ret;
-}
-
-static int power_down(struct v4l2_subdev *sd)
-{
-       struct ov5693_device *dev = to_ov5693_sensor(sd);
-       struct i2c_client *client = v4l2_get_subdevdata(sd);
-       int ret = 0;
-
-       dev->focus = OV5693_INVALID_CONFIG;
-       if (!dev->platform_data) {
-               dev_err(&client->dev,
-                       "no camera_sensor_platform_data");
-               return -ENODEV;
-       }
-
-       ret = dev->platform_data->flisclk_ctrl(sd, 0);
-       if (ret)
-               dev_err(&client->dev, "flisclk failed\n");
-
-       /* gpio ctrl */
-       ret = gpio_ctrl(sd, 0);
-       if (ret) {
-               ret = gpio_ctrl(sd, 0);
-               if (ret)
-                       dev_err(&client->dev, "gpio failed 2\n");
-       }
-
-       /* power control */
-       ret = power_ctrl(sd, 0);
-       if (ret)
-               dev_err(&client->dev, "vprog failed.\n");
-
-       return ret;
-}
-
-static int power_up(struct v4l2_subdev *sd)
-{
-       static const int retry_count = 4;
-       int i, ret;
-
-       for (i = 0; i < retry_count; i++) {
-               ret = __power_up(sd);
-               if (!ret)
-                       return 0;
-
-               power_down(sd);
-       }
-       return ret;
-}
-
-static int ov5693_s_power(struct v4l2_subdev *sd, int on)
-{
-       int ret;
-
-       pr_info("%s: on %d\n", __func__, on);
-       if (on == 0)
-               return power_down(sd);
-       else {
-               ret = power_up(sd);
-               if (!ret) {
-                       ret = ov5693_init(sd);
-                       /* restore settings */
-                       ov5693_res = ov5693_res_preview;
-                       N_RES = N_RES_PREVIEW;
-               }
-       }
-       return ret;
-}
-
-/*
- * distance - calculate the distance
- * @res: resolution
- * @w: width
- * @h: height
- *
- * Get the gap between res_w/res_h and w/h.
- * distance = (res_w/res_h - w/h) / (w/h) * 8192
- * res->width/height smaller than w/h wouldn't be considered.
- * The gap of ratio larger than 1/8 wouldn't be considered.
- * Returns the value of gap or -1 if fail.
- */
-#define LARGEST_ALLOWED_RATIO_MISMATCH 1024
-static int distance(struct ov5693_resolution *res, u32 w, u32 h)
-{
-       int ratio;
-       int distance;
-
-       if (w == 0 || h == 0 ||
-           res->width < w || res->height < h)
-               return -1;
-
-       ratio = res->width << 13;
-       ratio /= w;
-       ratio *= h;
-       ratio /= res->height;
-
-       distance = abs(ratio - 8192);
-
-       if (distance > LARGEST_ALLOWED_RATIO_MISMATCH)
-               return -1;
-
-       return distance;
-}
-
-/* Return the nearest higher resolution index
- * Firstly try to find the approximate aspect ratio resolution
- * If we find multiple same AR resolutions, choose the
- * minimal size.
- */
-static int nearest_resolution_index(int w, int h)
-{
-       int i;
-       int idx = -1;
-       int dist;
-       int min_dist = INT_MAX;
-       int min_res_w = INT_MAX;
-       struct ov5693_resolution *tmp_res = NULL;
-
-       for (i = 0; i < N_RES; i++) {
-               tmp_res = &ov5693_res[i];
-               dist = distance(tmp_res, w, h);
-               if (dist == -1)
-                       continue;
-               if (dist < min_dist) {
-                       min_dist = dist;
-                       idx = i;
-                       min_res_w = ov5693_res[i].width;
-                       continue;
-               }
-               if (dist == min_dist && ov5693_res[i].width < min_res_w)
-                       idx = i;
-       }
-
-       return idx;
-}
-
-static int get_resolution_index(int w, int h)
-{
-       int i;
-
-       for (i = 0; i < N_RES; i++) {
-               if (w != ov5693_res[i].width)
-                       continue;
-               if (h != ov5693_res[i].height)
-                       continue;
-
-               return i;
-       }
-
-       return -1;
-}
-
-/* TODO: remove it. */
-static int startup(struct v4l2_subdev *sd)
-{
-       struct ov5693_device *dev = to_ov5693_sensor(sd);
-       struct i2c_client *client = v4l2_get_subdevdata(sd);
-       int ret = 0;
-
-       ret = ov5693_write_reg(client, OV5693_8BIT,
-                              OV5693_SW_RESET, 0x01);
-       if (ret) {
-               dev_err(&client->dev, "ov5693 reset err.\n");
-               return ret;
-       }
-
-       ret = ov5693_write_reg_array(client, ov5693_global_setting);
-       if (ret) {
-               dev_err(&client->dev, "ov5693 write register err.\n");
-               return ret;
-       }
-
-       ret = ov5693_write_reg_array(client, ov5693_res[dev->fmt_idx].regs);
-       if (ret) {
-               dev_err(&client->dev, "ov5693 write register err.\n");
-               return ret;
-       }
-
-       return ret;
-}
-
-static int ov5693_set_fmt(struct v4l2_subdev *sd,
-                         struct v4l2_subdev_state *sd_state,
-                         struct v4l2_subdev_format *format)
-{
-       struct v4l2_mbus_framefmt *fmt = &format->format;
-       struct ov5693_device *dev = to_ov5693_sensor(sd);
-       struct i2c_client *client = v4l2_get_subdevdata(sd);
-       struct camera_mipi_info *ov5693_info = NULL;
-       int ret = 0;
-       int idx;
-
-       if (format->pad)
-               return -EINVAL;
-       if (!fmt)
-               return -EINVAL;
-       ov5693_info = v4l2_get_subdev_hostdata(sd);
-       if (!ov5693_info)
-               return -EINVAL;
-
-       mutex_lock(&dev->input_lock);
-       idx = nearest_resolution_index(fmt->width, fmt->height);
-       if (idx == -1) {
-               /* return the largest resolution */
-               fmt->width = ov5693_res[N_RES - 1].width;
-               fmt->height = ov5693_res[N_RES - 1].height;
-       } else {
-               fmt->width = ov5693_res[idx].width;
-               fmt->height = ov5693_res[idx].height;
-       }
-
-       fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
-       if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
-               sd_state->pads->try_fmt = *fmt;
-               mutex_unlock(&dev->input_lock);
-               return 0;
-       }
-
-       dev->fmt_idx = get_resolution_index(fmt->width, fmt->height);
-       if (dev->fmt_idx == -1) {
-               dev_err(&client->dev, "get resolution fail\n");
-               mutex_unlock(&dev->input_lock);
-               return -EINVAL;
-       }
-
-       ret = startup(sd);
-       if (ret) {
-               int i = 0;
-
-               dev_err(&client->dev, "ov5693 startup err, retry to power up\n");
-               for (i = 0; i < OV5693_POWER_UP_RETRY_NUM; i++) {
-                       dev_err(&client->dev,
-                               "ov5693 retry to power up %d/%d times, result: ",
-                               i + 1, OV5693_POWER_UP_RETRY_NUM);
-                       power_down(sd);
-                       ret = power_up(sd);
-                       if (!ret) {
-                               mutex_unlock(&dev->input_lock);
-                               ov5693_init(sd);
-                               mutex_lock(&dev->input_lock);
-                       } else {
-                               dev_err(&client->dev, "power up failed, continue\n");
-                               continue;
-                       }
-                       ret = startup(sd);
-                       if (ret) {
-                               dev_err(&client->dev, " startup FAILED!\n");
-                       } else {
-                               dev_err(&client->dev, " startup SUCCESS!\n");
-                               break;
-                       }
-               }
-       }
-
-       /*
-        * After sensor settings are set to HW, sometimes stream is started.
-        * This would cause ISP timeout because ISP is not ready to receive
-        * data yet. So add stop streaming here.
-        */
-       ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_SW_STREAM,
-                              OV5693_STOP_STREAMING);
-       if (ret)
-               dev_warn(&client->dev, "ov5693 stream off err\n");
-
-       ov5693_info->metadata_width = fmt->width * 10 / 8;
-       ov5693_info->metadata_height = 1;
-       ov5693_info->metadata_effective_width = &ov5693_embedded_effective_size;
-
-       mutex_unlock(&dev->input_lock);
-       return ret;
-}
-
-static int ov5693_get_fmt(struct v4l2_subdev *sd,
-                         struct v4l2_subdev_state *sd_state,
-                         struct v4l2_subdev_format *format)
-{
-       struct v4l2_mbus_framefmt *fmt = &format->format;
-       struct ov5693_device *dev = to_ov5693_sensor(sd);
-
-       if (format->pad)
-               return -EINVAL;
-
-       if (!fmt)
-               return -EINVAL;
-
-       fmt->width = ov5693_res[dev->fmt_idx].width;
-       fmt->height = ov5693_res[dev->fmt_idx].height;
-       fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10;
-
-       return 0;
-}
-
-static int ov5693_detect(struct i2c_client *client)
-{
-       struct i2c_adapter *adapter = client->adapter;
-       u16 high, low;
-       int ret;
-       u16 id;
-       u8 revision;
-
-       if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
-               return -ENODEV;
-
-       ret = ov5693_read_reg(client, OV5693_8BIT,
-                             OV5693_SC_CMMN_CHIP_ID_H, &high);
-       if (ret) {
-               dev_err(&client->dev, "sensor_id_high = 0x%x\n", high);
-               return -ENODEV;
-       }
-       ret = ov5693_read_reg(client, OV5693_8BIT,
-                             OV5693_SC_CMMN_CHIP_ID_L, &low);
-       if (ret)
-               return ret;
-       id = ((((u16)high) << 8) | (u16)low);
-
-       if (id != OV5693_ID) {
-               dev_err(&client->dev, "sensor ID error 0x%x\n", id);
-               return -ENODEV;
-       }
-
-       ret = ov5693_read_reg(client, OV5693_8BIT,
-                             OV5693_SC_CMMN_SUB_ID, &high);
-       revision = (u8)high & 0x0f;
-
-       dev_dbg(&client->dev, "sensor_revision = 0x%x\n", revision);
-       dev_dbg(&client->dev, "detect ov5693 success\n");
-       return 0;
-}
-
-static int ov5693_s_stream(struct v4l2_subdev *sd, int enable)
-{
-       struct ov5693_device *dev = to_ov5693_sensor(sd);
-       struct i2c_client *client = v4l2_get_subdevdata(sd);
-       int ret;
-
-       mutex_lock(&dev->input_lock);
-
-       ret = ov5693_write_reg(client, OV5693_8BIT, OV5693_SW_STREAM,
-                              enable ? OV5693_START_STREAMING :
-                              OV5693_STOP_STREAMING);
-
-       mutex_unlock(&dev->input_lock);
-
-       return ret;
-}
-
-static int ov5693_s_config(struct v4l2_subdev *sd,
-                          int irq, void *platform_data)
-{
-       struct ov5693_device *dev = to_ov5693_sensor(sd);
-       struct i2c_client *client = v4l2_get_subdevdata(sd);
-       int ret = 0;
-
-       if (!platform_data)
-               return -ENODEV;
-
-       dev->platform_data =
-           (struct camera_sensor_platform_data *)platform_data;
-
-       mutex_lock(&dev->input_lock);
-       /* power off the module, then power on it in future
-        * as first power on by board may not fulfill the
-        * power on sequqence needed by the module
-        */
-       ret = power_down(sd);
-       if (ret) {
-               dev_err(&client->dev, "ov5693 power-off err.\n");
-               goto fail_power_off;
-       }
-
-       ret = power_up(sd);
-       if (ret) {
-               dev_err(&client->dev, "ov5693 power-up err.\n");
-               goto fail_power_on;
-       }
-
-       if (!dev->vcm)
-               dev->vcm = vcm_detect(client);
-
-       ret = dev->platform_data->csi_cfg(sd, 1);
-       if (ret)
-               goto fail_csi_cfg;
-
-       /* config & detect sensor */
-       ret = ov5693_detect(client);
-       if (ret) {
-               dev_err(&client->dev, "ov5693_detect err s_config.\n");
-               goto fail_csi_cfg;
-       }
-
-       dev->otp_data = ov5693_otp_read(sd);
-
-       /* turn off sensor, after probed */
-       ret = power_down(sd);
-       if (ret) {
-               dev_err(&client->dev, "ov5693 power-off err.\n");
-               goto fail_csi_cfg;
-       }
-       mutex_unlock(&dev->input_lock);
-
-       return ret;
-
-fail_csi_cfg:
-       dev->platform_data->csi_cfg(sd, 0);
-fail_power_on:
-       power_down(sd);
-       dev_err(&client->dev, "sensor power-gating failed\n");
-fail_power_off:
-       mutex_unlock(&dev->input_lock);
-       return ret;
-}
-
-static int ov5693_g_frame_interval(struct v4l2_subdev *sd,
-                                  struct v4l2_subdev_frame_interval *interval)
-{
-       struct ov5693_device *dev = to_ov5693_sensor(sd);
-
-       interval->interval.numerator = 1;
-       interval->interval.denominator = ov5693_res[dev->fmt_idx].fps;
-
-       return 0;
-}
-
-static int ov5693_enum_mbus_code(struct v4l2_subdev *sd,
-                                struct v4l2_subdev_state *sd_state,
-                                struct v4l2_subdev_mbus_code_enum *code)
-{
-       if (code->index >= MAX_FMTS)
-               return -EINVAL;
-
-       code->code = MEDIA_BUS_FMT_SBGGR10_1X10;
-       return 0;
-}
-
-static int ov5693_enum_frame_size(struct v4l2_subdev *sd,
-                                 struct v4l2_subdev_state *sd_state,
-                                 struct v4l2_subdev_frame_size_enum *fse)
-{
-       int index = fse->index;
-
-       if (index >= N_RES)
-               return -EINVAL;
-
-       fse->min_width = ov5693_res[index].width;
-       fse->min_height = ov5693_res[index].height;
-       fse->max_width = ov5693_res[index].width;
-       fse->max_height = ov5693_res[index].height;
-
-       return 0;
-}
-
-static const struct v4l2_subdev_video_ops ov5693_video_ops = {
-       .s_stream = ov5693_s_stream,
-       .g_frame_interval = ov5693_g_frame_interval,
-};
-
-static const struct v4l2_subdev_core_ops ov5693_core_ops = {
-       .s_power = ov5693_s_power,
-       .ioctl = ov5693_ioctl,
-};
-
-static const struct v4l2_subdev_pad_ops ov5693_pad_ops = {
-       .enum_mbus_code = ov5693_enum_mbus_code,
-       .enum_frame_size = ov5693_enum_frame_size,
-       .get_fmt = ov5693_get_fmt,
-       .set_fmt = ov5693_set_fmt,
-};
-
-static const struct v4l2_subdev_ops ov5693_ops = {
-       .core = &ov5693_core_ops,
-       .video = &ov5693_video_ops,
-       .pad = &ov5693_pad_ops,
-};
-
-static void ov5693_remove(struct i2c_client *client)
-{
-       struct v4l2_subdev *sd = i2c_get_clientdata(client);
-       struct ov5693_device *dev = to_ov5693_sensor(sd);
-
-       dev_dbg(&client->dev, "ov5693_remove...\n");
-
-       dev->platform_data->csi_cfg(sd, 0);
-
-       v4l2_device_unregister_subdev(sd);
-
-       atomisp_gmin_remove_subdev(sd);
-
-       media_entity_cleanup(&dev->sd.entity);
-       v4l2_ctrl_handler_free(&dev->ctrl_handler);
-       kfree(dev);
-}
-
-static int ov5693_probe(struct i2c_client *client)
-{
-       struct ov5693_device *dev;
-       int i2c;
-       int ret;
-       void *pdata;
-       unsigned int i;
-
-       /*
-        * Firmware workaround: Some modules use a "secondary default"
-        * address of 0x10 which doesn't appear on schematics, and
-        * some BIOS versions haven't gotten the memo.  Work around
-        * via config.
-        */
-       i2c = gmin_get_var_int(&client->dev, false, "I2CAddr", -1);
-       if (i2c != -1) {
-               dev_info(&client->dev,
-                        "Overriding firmware-provided I2C address (0x%x) with 0x%x\n",
-                        client->addr, i2c);
-               client->addr = i2c;
-       }
-
-       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
-       if (!dev)
-               return -ENOMEM;
-
-       mutex_init(&dev->input_lock);
-
-       dev->fmt_idx = 0;
-       v4l2_i2c_subdev_init(&dev->sd, client, &ov5693_ops);
-
-       pdata = gmin_camera_platform_data(&dev->sd,
-                                         ATOMISP_INPUT_FORMAT_RAW_10,
-                                         atomisp_bayer_order_bggr);
-       if (!pdata) {
-               ret = -EINVAL;
-               goto out_free;
-       }
-
-       ret = ov5693_s_config(&dev->sd, client->irq, pdata);
-       if (ret)
-               goto out_free;
-
-       ret = atomisp_register_i2c_module(&dev->sd, pdata, RAW_CAMERA);
-       if (ret)
-               goto out_free;
-
-       dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
-       dev->pad.flags = MEDIA_PAD_FL_SOURCE;
-       dev->format.code = MEDIA_BUS_FMT_SBGGR10_1X10;
-       dev->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
-       ret =
-           v4l2_ctrl_handler_init(&dev->ctrl_handler,
-                                  ARRAY_SIZE(ov5693_controls));
-       if (ret) {
-               ov5693_remove(client);
-               return ret;
-       }
-
-       for (i = 0; i < ARRAY_SIZE(ov5693_controls); i++)
-               v4l2_ctrl_new_custom(&dev->ctrl_handler, &ov5693_controls[i],
-                                    NULL);
-
-       if (dev->ctrl_handler.error) {
-               ov5693_remove(client);
-               return dev->ctrl_handler.error;
-       }
-
-       /* Use same lock for controls as for everything else. */
-       dev->ctrl_handler.lock = &dev->input_lock;
-       dev->sd.ctrl_handler = &dev->ctrl_handler;
-
-       ret = media_entity_pads_init(&dev->sd.entity, 1, &dev->pad);
-       if (ret)
-               ov5693_remove(client);
-
-       return ret;
-out_free:
-       v4l2_device_unregister_subdev(&dev->sd);
-       kfree(dev);
-       return ret;
-}
-
-static const struct acpi_device_id ov5693_acpi_match[] = {
-       {"INT33BE"},
-       {},
-};
-MODULE_DEVICE_TABLE(acpi, ov5693_acpi_match);
-
-static struct i2c_driver ov5693_driver = {
-       .driver = {
-               .name = "ov5693",
-               .acpi_match_table = ov5693_acpi_match,
-       },
-       .probe = ov5693_probe,
-       .remove = ov5693_remove,
-};
-module_i2c_driver(ov5693_driver);
-
-MODULE_DESCRIPTION("A low-level driver for OmniVision 5693 sensors");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h b/drivers/staging/media/atomisp/i2c/ov5693/ov5693.h
deleted file mode 100644 (file)
index 5e17eaf..0000000
+++ /dev/null
@@ -1,1331 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Support for OmniVision OV5693 5M camera sensor.
- *
- * Copyright (c) 2013 Intel Corporation. All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- *
- */
-
-#ifndef __OV5693_H__
-#define __OV5693_H__
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/i2c.h>
-#include <linux/delay.h>
-#include <linux/videodev2.h>
-#include <linux/spinlock.h>
-#include <media/v4l2-subdev.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ctrls.h>
-#include <linux/v4l2-mediabus.h>
-#include <media/media-entity.h>
-
-#include "../../include/linux/atomisp_platform.h"
-
-/*
- * FIXME: non-preview resolutions are currently broken
- */
-#define ENABLE_NON_PREVIEW     0
-
-#define OV5693_POWER_UP_RETRY_NUM 5
-
-/* Defines for register writes and register array processing */
-#define I2C_MSG_LENGTH         0x2
-#define I2C_RETRY_COUNT                5
-
-#define OV5693_FOCAL_LENGTH_NUM        334     /*3.34mm*/
-#define OV5693_FOCAL_LENGTH_DEM        100
-#define OV5693_F_NUMBER_DEFAULT_NUM    24
-#define OV5693_F_NUMBER_DEM    10
-
-#define MAX_FMTS               1
-
-/* sensor_mode_data read_mode adaptation */
-#define OV5693_READ_MODE_BINNING_ON    0x0400
-#define OV5693_READ_MODE_BINNING_OFF   0x00
-#define OV5693_INTEGRATION_TIME_MARGIN 8
-
-#define OV5693_MAX_EXPOSURE_VALUE      0xFFF1
-#define OV5693_MAX_GAIN_VALUE          0xFF
-
-/*
- * focal length bits definition:
- * bits 31-16: numerator, bits 15-0: denominator
- */
-#define OV5693_FOCAL_LENGTH_DEFAULT 0x1B70064
-
-/*
- * current f-number bits definition:
- * bits 31-16: numerator, bits 15-0: denominator
- */
-#define OV5693_F_NUMBER_DEFAULT 0x18000a
-
-/*
- * f-number range bits definition:
- * bits 31-24: max f-number numerator
- * bits 23-16: max f-number denominator
- * bits 15-8: min f-number numerator
- * bits 7-0: min f-number denominator
- */
-#define OV5693_F_NUMBER_RANGE 0x180a180a
-#define OV5693_ID      0x5690
-
-#define OV5693_FINE_INTG_TIME_MIN 0
-#define OV5693_FINE_INTG_TIME_MAX_MARGIN 0
-#define OV5693_COARSE_INTG_TIME_MIN 1
-#define OV5693_COARSE_INTG_TIME_MAX_MARGIN 6
-
-#define OV5693_BIN_FACTOR_MAX 4
-/*
- * OV5693 System control registers
- */
-#define OV5693_SW_SLEEP                                0x0100
-#define OV5693_SW_RESET                                0x0103
-#define OV5693_SW_STREAM                       0x0100
-
-#define OV5693_SC_CMMN_CHIP_ID_H               0x300A
-#define OV5693_SC_CMMN_CHIP_ID_L               0x300B
-#define OV5693_SC_CMMN_SCCB_ID                 0x300C
-#define OV5693_SC_CMMN_SUB_ID                  0x302A /* process, version*/
-/*Bit[7:4] Group control, Bit[3:0] Group ID*/
-#define OV5693_GROUP_ACCESS                    0x3208
-/*
-*Bit[3:0] Bit[19:16] of exposure,
-*remaining 16 bits lies in Reg0x3501&Reg0x3502
-*/
-#define OV5693_EXPOSURE_H                      0x3500
-#define OV5693_EXPOSURE_M                      0x3501
-#define OV5693_EXPOSURE_L                      0x3502
-/*Bit[1:0] means Bit[9:8] of gain*/
-#define OV5693_AGC_H                           0x350A
-#define OV5693_AGC_L                           0x350B /*Bit[7:0] of gain*/
-
-#define OV5693_HORIZONTAL_START_H              0x3800 /*Bit[11:8]*/
-#define OV5693_HORIZONTAL_START_L              0x3801 /*Bit[7:0]*/
-#define OV5693_VERTICAL_START_H                        0x3802 /*Bit[11:8]*/
-#define OV5693_VERTICAL_START_L                        0x3803 /*Bit[7:0]*/
-#define OV5693_HORIZONTAL_END_H                        0x3804 /*Bit[11:8]*/
-#define OV5693_HORIZONTAL_END_L                        0x3805 /*Bit[7:0]*/
-#define OV5693_VERTICAL_END_H                  0x3806 /*Bit[11:8]*/
-#define OV5693_VERTICAL_END_L                  0x3807 /*Bit[7:0]*/
-#define OV5693_HORIZONTAL_OUTPUT_SIZE_H                0x3808 /*Bit[3:0]*/
-#define OV5693_HORIZONTAL_OUTPUT_SIZE_L                0x3809 /*Bit[7:0]*/
-#define OV5693_VERTICAL_OUTPUT_SIZE_H          0x380a /*Bit[3:0]*/
-#define OV5693_VERTICAL_OUTPUT_SIZE_L          0x380b /*Bit[7:0]*/
-/*High 8-bit, and low 8-bit HTS address is 0x380d*/
-#define OV5693_TIMING_HTS_H                    0x380C
-/*High 8-bit, and low 8-bit HTS address is 0x380d*/
-#define OV5693_TIMING_HTS_L                    0x380D
-/*High 8-bit, and low 8-bit HTS address is 0x380f*/
-#define OV5693_TIMING_VTS_H                    0x380e
-/*High 8-bit, and low 8-bit HTS address is 0x380f*/
-#define OV5693_TIMING_VTS_L                    0x380f
-
-#define OV5693_MWB_RED_GAIN_H                  0x3400
-#define OV5693_MWB_GREEN_GAIN_H                        0x3402
-#define OV5693_MWB_BLUE_GAIN_H                 0x3404
-#define OV5693_MWB_GAIN_MAX                    0x0fff
-
-#define OV5693_START_STREAMING                 0x01
-#define OV5693_STOP_STREAMING                  0x00
-
-#define VCM_ADDR           0x0c
-#define VCM_CODE_MSB       0x04
-
-#define OV5693_INVALID_CONFIG  0xffffffff
-
-#define OV5693_VCM_SLEW_STEP                   0x30F0
-#define OV5693_VCM_SLEW_STEP_MAX               0x7
-#define OV5693_VCM_SLEW_STEP_MASK              0x7
-#define OV5693_VCM_CODE                                0x30F2
-#define OV5693_VCM_SLEW_TIME                   0x30F4
-#define OV5693_VCM_SLEW_TIME_MAX               0xffff
-#define OV5693_VCM_ENABLE                      0x8000
-
-#define OV5693_VCM_MAX_FOCUS_NEG       -1023
-#define OV5693_VCM_MAX_FOCUS_POS       1023
-
-#define DLC_ENABLE 1
-#define DLC_DISABLE 0
-#define VCM_PROTECTION_OFF     0xeca3
-#define VCM_PROTECTION_ON      0xdc51
-#define VCM_DEFAULT_S 0x0
-#define vcm_step_s(a) (u8)(a & 0xf)
-#define vcm_step_mclk(a) (u8)((a >> 4) & 0x3)
-#define vcm_dlc_mclk(dlc, mclk) (u16)((dlc << 3) | mclk | 0xa104)
-#define vcm_tsrc(tsrc) (u16)(tsrc << 3 | 0xf200)
-#define vcm_val(data, s) (u16)(data << 4 | s)
-#define DIRECT_VCM vcm_dlc_mclk(0, 0)
-
-/* Defines for OTP Data Registers */
-#define OV5693_FRAME_OFF_NUM           0x4202
-#define OV5693_OTP_BYTE_MAX            32      //change to 32 as needed by otpdata
-#define OV5693_OTP_SHORT_MAX           16
-#define OV5693_OTP_START_ADDR          0x3D00
-#define OV5693_OTP_END_ADDR            0x3D0F
-#define OV5693_OTP_DATA_SIZE           320
-#define OV5693_OTP_PROGRAM_REG         0x3D80
-#define OV5693_OTP_READ_REG            0x3D81  // 1:Enable 0:disable
-#define OV5693_OTP_BANK_REG            0x3D84  //otp bank and mode
-#define OV5693_OTP_READY_REG_DONE      1
-#define OV5693_OTP_BANK_MAX            28
-#define OV5693_OTP_BANK_SIZE           16      //16 bytes per bank
-#define OV5693_OTP_READ_ONETIME                16
-#define OV5693_OTP_MODE_READ           1
-
-struct regval_list {
-       u16 reg_num;
-       u8 value;
-};
-
-struct ov5693_resolution {
-       u8 *desc;
-       const struct ov5693_reg *regs;
-       int res;
-       int width;
-       int height;
-       int fps;
-       int pix_clk_freq;
-       u16 pixels_per_line;
-       u16 lines_per_frame;
-       bool used;
-};
-
-struct ov5693_format {
-       u8 *desc;
-       u32 pixelformat;
-       struct ov5693_reg *regs;
-};
-
-enum vcm_type {
-       VCM_UNKNOWN,
-       VCM_AD5823,
-       VCM_DW9714,
-};
-
-/*
- * ov5693 device structure.
- */
-struct ov5693_device {
-       struct v4l2_subdev sd;
-       struct media_pad pad;
-       struct v4l2_mbus_framefmt format;
-       struct mutex input_lock;
-       struct v4l2_ctrl_handler ctrl_handler;
-
-       struct camera_sensor_platform_data *platform_data;
-       ktime_t timestamp_t_focus_abs;
-       int fmt_idx;
-       int run_mode;
-       int otp_size;
-       u8 *otp_data;
-       u32 focus;
-       s16 number_of_steps;
-       u8 res;
-       u8 type;
-       bool vcm_update;
-       enum vcm_type vcm;
-};
-
-enum ov5693_tok_type {
-       OV5693_8BIT  = 0x0001,
-       OV5693_16BIT = 0x0002,
-       OV5693_32BIT = 0x0004,
-       OV5693_TOK_TERM   = 0xf000,     /* terminating token for reg list */
-       OV5693_TOK_DELAY  = 0xfe00,     /* delay token for reg list */
-       OV5693_TOK_MASK = 0xfff0
-};
-
-/**
- * struct ov5693_reg - MI sensor  register format
- * @type: type of the register
- * @reg: 16-bit offset to register
- * @val: 8/16/32-bit register value
- *
- * Define a structure for sensor register initialization values
- */
-struct ov5693_reg {
-       enum ov5693_tok_type type;
-       u16 reg;
-       u32 val;        /* @set value for read/mod/write, @mask */
-};
-
-#define to_ov5693_sensor(x) container_of(x, struct ov5693_device, sd)
-
-#define OV5693_MAX_WRITE_BUF_SIZE      30
-
-struct ov5693_write_buffer {
-       u16 addr;
-       u8 data[OV5693_MAX_WRITE_BUF_SIZE];
-};
-
-struct ov5693_write_ctrl {
-       int index;
-       struct ov5693_write_buffer buffer;
-};
-
-static struct ov5693_reg const ov5693_global_setting[] = {
-       {OV5693_8BIT, 0x0103, 0x01},
-       {OV5693_8BIT, 0x3001, 0x0a},
-       {OV5693_8BIT, 0x3002, 0x80},
-       {OV5693_8BIT, 0x3006, 0x00},
-       {OV5693_8BIT, 0x3011, 0x21},
-       {OV5693_8BIT, 0x3012, 0x09},
-       {OV5693_8BIT, 0x3013, 0x10},
-       {OV5693_8BIT, 0x3014, 0x00},
-       {OV5693_8BIT, 0x3015, 0x08},
-       {OV5693_8BIT, 0x3016, 0xf0},
-       {OV5693_8BIT, 0x3017, 0xf0},
-       {OV5693_8BIT, 0x3018, 0xf0},
-       {OV5693_8BIT, 0x301b, 0xb4},
-       {OV5693_8BIT, 0x301d, 0x02},
-       {OV5693_8BIT, 0x3021, 0x00},
-       {OV5693_8BIT, 0x3022, 0x01},
-       {OV5693_8BIT, 0x3028, 0x44},
-       {OV5693_8BIT, 0x3098, 0x02},
-       {OV5693_8BIT, 0x3099, 0x19},
-       {OV5693_8BIT, 0x309a, 0x02},
-       {OV5693_8BIT, 0x309b, 0x01},
-       {OV5693_8BIT, 0x309c, 0x00},
-       {OV5693_8BIT, 0x30a0, 0xd2},
-       {OV5693_8BIT, 0x30a2, 0x01},
-       {OV5693_8BIT, 0x30b2, 0x00},
-       {OV5693_8BIT, 0x30b3, 0x7d},
-       {OV5693_8BIT, 0x30b4, 0x03},
-       {OV5693_8BIT, 0x30b5, 0x04},
-       {OV5693_8BIT, 0x30b6, 0x01},
-       {OV5693_8BIT, 0x3104, 0x21},
-       {OV5693_8BIT, 0x3106, 0x00},
-       {OV5693_8BIT, 0x3400, 0x04},
-       {OV5693_8BIT, 0x3401, 0x00},
-       {OV5693_8BIT, 0x3402, 0x04},
-       {OV5693_8BIT, 0x3403, 0x00},
-       {OV5693_8BIT, 0x3404, 0x04},
-       {OV5693_8BIT, 0x3405, 0x00},
-       {OV5693_8BIT, 0x3406, 0x01},
-       {OV5693_8BIT, 0x3500, 0x00},
-       {OV5693_8BIT, 0x3503, 0x07},
-       {OV5693_8BIT, 0x3504, 0x00},
-       {OV5693_8BIT, 0x3505, 0x00},
-       {OV5693_8BIT, 0x3506, 0x00},
-       {OV5693_8BIT, 0x3507, 0x02},
-       {OV5693_8BIT, 0x3508, 0x00},
-       {OV5693_8BIT, 0x3509, 0x10},
-       {OV5693_8BIT, 0x350a, 0x00},
-       {OV5693_8BIT, 0x350b, 0x40},
-       {OV5693_8BIT, 0x3601, 0x0a},
-       {OV5693_8BIT, 0x3602, 0x38},
-       {OV5693_8BIT, 0x3612, 0x80},
-       {OV5693_8BIT, 0x3620, 0x54},
-       {OV5693_8BIT, 0x3621, 0xc7},
-       {OV5693_8BIT, 0x3622, 0x0f},
-       {OV5693_8BIT, 0x3625, 0x10},
-       {OV5693_8BIT, 0x3630, 0x55},
-       {OV5693_8BIT, 0x3631, 0xf4},
-       {OV5693_8BIT, 0x3632, 0x00},
-       {OV5693_8BIT, 0x3633, 0x34},
-       {OV5693_8BIT, 0x3634, 0x02},
-       {OV5693_8BIT, 0x364d, 0x0d},
-       {OV5693_8BIT, 0x364f, 0xdd},
-       {OV5693_8BIT, 0x3660, 0x04},
-       {OV5693_8BIT, 0x3662, 0x10},
-       {OV5693_8BIT, 0x3663, 0xf1},
-       {OV5693_8BIT, 0x3665, 0x00},
-       {OV5693_8BIT, 0x3666, 0x20},
-       {OV5693_8BIT, 0x3667, 0x00},
-       {OV5693_8BIT, 0x366a, 0x80},
-       {OV5693_8BIT, 0x3680, 0xe0},
-       {OV5693_8BIT, 0x3681, 0x00},
-       {OV5693_8BIT, 0x3700, 0x42},
-       {OV5693_8BIT, 0x3701, 0x14},
-       {OV5693_8BIT, 0x3702, 0xa0},
-       {OV5693_8BIT, 0x3703, 0xd8},
-       {OV5693_8BIT, 0x3704, 0x78},
-       {OV5693_8BIT, 0x3705, 0x02},
-       {OV5693_8BIT, 0x370a, 0x00},
-       {OV5693_8BIT, 0x370b, 0x20},
-       {OV5693_8BIT, 0x370c, 0x0c},
-       {OV5693_8BIT, 0x370d, 0x11},
-       {OV5693_8BIT, 0x370e, 0x00},
-       {OV5693_8BIT, 0x370f, 0x40},
-       {OV5693_8BIT, 0x3710, 0x00},
-       {OV5693_8BIT, 0x371a, 0x1c},
-       {OV5693_8BIT, 0x371b, 0x05},
-       {OV5693_8BIT, 0x371c, 0x01},
-       {OV5693_8BIT, 0x371e, 0xa1},
-       {OV5693_8BIT, 0x371f, 0x0c},
-       {OV5693_8BIT, 0x3721, 0x00},
-       {OV5693_8BIT, 0x3724, 0x10},
-       {OV5693_8BIT, 0x3726, 0x00},
-       {OV5693_8BIT, 0x372a, 0x01},
-       {OV5693_8BIT, 0x3730, 0x10},
-       {OV5693_8BIT, 0x3738, 0x22},
-       {OV5693_8BIT, 0x3739, 0xe5},
-       {OV5693_8BIT, 0x373a, 0x50},
-       {OV5693_8BIT, 0x373b, 0x02},
-       {OV5693_8BIT, 0x373c, 0x41},
-       {OV5693_8BIT, 0x373f, 0x02},
-       {OV5693_8BIT, 0x3740, 0x42},
-       {OV5693_8BIT, 0x3741, 0x02},
-       {OV5693_8BIT, 0x3742, 0x18},
-       {OV5693_8BIT, 0x3743, 0x01},
-       {OV5693_8BIT, 0x3744, 0x02},
-       {OV5693_8BIT, 0x3747, 0x10},
-       {OV5693_8BIT, 0x374c, 0x04},
-       {OV5693_8BIT, 0x3751, 0xf0},
-       {OV5693_8BIT, 0x3752, 0x00},
-       {OV5693_8BIT, 0x3753, 0x00},
-       {OV5693_8BIT, 0x3754, 0xc0},
-       {OV5693_8BIT, 0x3755, 0x00},
-       {OV5693_8BIT, 0x3756, 0x1a},
-       {OV5693_8BIT, 0x3758, 0x00},
-       {OV5693_8BIT, 0x3759, 0x0f},
-       {OV5693_8BIT, 0x376b, 0x44},
-       {OV5693_8BIT, 0x375c, 0x04},
-       {OV5693_8BIT, 0x3774, 0x10},
-       {OV5693_8BIT, 0x3776, 0x00},
-       {OV5693_8BIT, 0x377f, 0x08},
-       {OV5693_8BIT, 0x3780, 0x22},
-       {OV5693_8BIT, 0x3781, 0x0c},
-       {OV5693_8BIT, 0x3784, 0x2c},
-       {OV5693_8BIT, 0x3785, 0x1e},
-       {OV5693_8BIT, 0x378f, 0xf5},
-       {OV5693_8BIT, 0x3791, 0xb0},
-       {OV5693_8BIT, 0x3795, 0x00},
-       {OV5693_8BIT, 0x3796, 0x64},
-       {OV5693_8BIT, 0x3797, 0x11},
-       {OV5693_8BIT, 0x3798, 0x30},
-       {OV5693_8BIT, 0x3799, 0x41},
-       {OV5693_8BIT, 0x379a, 0x07},
-       {OV5693_8BIT, 0x379b, 0xb0},
-       {OV5693_8BIT, 0x379c, 0x0c},
-       {OV5693_8BIT, 0x37c5, 0x00},
-       {OV5693_8BIT, 0x37c6, 0x00},
-       {OV5693_8BIT, 0x37c7, 0x00},
-       {OV5693_8BIT, 0x37c9, 0x00},
-       {OV5693_8BIT, 0x37ca, 0x00},
-       {OV5693_8BIT, 0x37cb, 0x00},
-       {OV5693_8BIT, 0x37de, 0x00},
-       {OV5693_8BIT, 0x37df, 0x00},
-       {OV5693_8BIT, 0x3800, 0x00},
-       {OV5693_8BIT, 0x3801, 0x00},
-       {OV5693_8BIT, 0x3802, 0x00},
-       {OV5693_8BIT, 0x3804, 0x0a},
-       {OV5693_8BIT, 0x3805, 0x3f},
-       {OV5693_8BIT, 0x3810, 0x00},
-       {OV5693_8BIT, 0x3812, 0x00},
-       {OV5693_8BIT, 0x3823, 0x00},
-       {OV5693_8BIT, 0x3824, 0x00},
-       {OV5693_8BIT, 0x3825, 0x00},
-       {OV5693_8BIT, 0x3826, 0x00},
-       {OV5693_8BIT, 0x3827, 0x00},
-       {OV5693_8BIT, 0x382a, 0x04},
-       {OV5693_8BIT, 0x3a04, 0x06},
-       {OV5693_8BIT, 0x3a05, 0x14},
-       {OV5693_8BIT, 0x3a06, 0x00},
-       {OV5693_8BIT, 0x3a07, 0xfe},
-       {OV5693_8BIT, 0x3b00, 0x00},
-       {OV5693_8BIT, 0x3b02, 0x00},
-       {OV5693_8BIT, 0x3b03, 0x00},
-       {OV5693_8BIT, 0x3b04, 0x00},
-       {OV5693_8BIT, 0x3b05, 0x00},
-       {OV5693_8BIT, 0x3e07, 0x20},
-       {OV5693_8BIT, 0x4000, 0x08},
-       {OV5693_8BIT, 0x4001, 0x04},
-       {OV5693_8BIT, 0x4002, 0x45},
-       {OV5693_8BIT, 0x4004, 0x08},
-       {OV5693_8BIT, 0x4005, 0x18},
-       {OV5693_8BIT, 0x4006, 0x20},
-       {OV5693_8BIT, 0x4008, 0x24},
-       {OV5693_8BIT, 0x4009, 0x10},
-       {OV5693_8BIT, 0x400c, 0x00},
-       {OV5693_8BIT, 0x400d, 0x00},
-       {OV5693_8BIT, 0x4058, 0x00},
-       {OV5693_8BIT, 0x404e, 0x37},
-       {OV5693_8BIT, 0x404f, 0x8f},
-       {OV5693_8BIT, 0x4058, 0x00},
-       {OV5693_8BIT, 0x4101, 0xb2},
-       {OV5693_8BIT, 0x4303, 0x00},
-       {OV5693_8BIT, 0x4304, 0x08},
-       {OV5693_8BIT, 0x4307, 0x31},
-       {OV5693_8BIT, 0x4311, 0x04},
-       {OV5693_8BIT, 0x4315, 0x01},
-       {OV5693_8BIT, 0x4511, 0x05},
-       {OV5693_8BIT, 0x4512, 0x01},
-       {OV5693_8BIT, 0x4806, 0x00},
-       {OV5693_8BIT, 0x4816, 0x52},
-       {OV5693_8BIT, 0x481f, 0x30},
-       {OV5693_8BIT, 0x4826, 0x2c},
-       {OV5693_8BIT, 0x4831, 0x64},
-       {OV5693_8BIT, 0x4d00, 0x04},
-       {OV5693_8BIT, 0x4d01, 0x71},
-       {OV5693_8BIT, 0x4d02, 0xfd},
-       {OV5693_8BIT, 0x4d03, 0xf5},
-       {OV5693_8BIT, 0x4d04, 0x0c},
-       {OV5693_8BIT, 0x4d05, 0xcc},
-       {OV5693_8BIT, 0x4837, 0x0a},
-       {OV5693_8BIT, 0x5000, 0x06},
-       {OV5693_8BIT, 0x5001, 0x01},
-       {OV5693_8BIT, 0x5003, 0x20},
-       {OV5693_8BIT, 0x5046, 0x0a},
-       {OV5693_8BIT, 0x5013, 0x00},
-       {OV5693_8BIT, 0x5046, 0x0a},
-       {OV5693_8BIT, 0x5780, 0x1c},
-       {OV5693_8BIT, 0x5786, 0x20},
-       {OV5693_8BIT, 0x5787, 0x10},
-       {OV5693_8BIT, 0x5788, 0x18},
-       {OV5693_8BIT, 0x578a, 0x04},
-       {OV5693_8BIT, 0x578b, 0x02},
-       {OV5693_8BIT, 0x578c, 0x02},
-       {OV5693_8BIT, 0x578e, 0x06},
-       {OV5693_8BIT, 0x578f, 0x02},
-       {OV5693_8BIT, 0x5790, 0x02},
-       {OV5693_8BIT, 0x5791, 0xff},
-       {OV5693_8BIT, 0x5842, 0x01},
-       {OV5693_8BIT, 0x5843, 0x2b},
-       {OV5693_8BIT, 0x5844, 0x01},
-       {OV5693_8BIT, 0x5845, 0x92},
-       {OV5693_8BIT, 0x5846, 0x01},
-       {OV5693_8BIT, 0x5847, 0x8f},
-       {OV5693_8BIT, 0x5848, 0x01},
-       {OV5693_8BIT, 0x5849, 0x0c},
-       {OV5693_8BIT, 0x5e00, 0x00},
-       {OV5693_8BIT, 0x5e10, 0x0c},
-       {OV5693_8BIT, 0x0100, 0x00},
-       {OV5693_TOK_TERM, 0, 0}
-};
-
-#if ENABLE_NON_PREVIEW
-/*
- * 654x496 30fps 17ms VBlanking 2lane 10Bit (Scaling)
- */
-static struct ov5693_reg const ov5693_654x496[] = {
-       {OV5693_8BIT, 0x3501, 0x3d},
-       {OV5693_8BIT, 0x3502, 0x00},
-       {OV5693_8BIT, 0x3708, 0xe6},
-       {OV5693_8BIT, 0x3709, 0xc7},
-       {OV5693_8BIT, 0x3803, 0x00},
-       {OV5693_8BIT, 0x3806, 0x07},
-       {OV5693_8BIT, 0x3807, 0xa3},
-       {OV5693_8BIT, 0x3808, 0x02},
-       {OV5693_8BIT, 0x3809, 0x90},
-       {OV5693_8BIT, 0x380a, 0x01},
-       {OV5693_8BIT, 0x380b, 0xf0},
-       {OV5693_8BIT, 0x380c, 0x0a},
-       {OV5693_8BIT, 0x380d, 0x80},
-       {OV5693_8BIT, 0x380e, 0x07},
-       {OV5693_8BIT, 0x380f, 0xc0},
-       {OV5693_8BIT, 0x3811, 0x08},
-       {OV5693_8BIT, 0x3813, 0x02},
-       {OV5693_8BIT, 0x3814, 0x31},
-       {OV5693_8BIT, 0x3815, 0x31},
-       {OV5693_8BIT, 0x3820, 0x04},
-       {OV5693_8BIT, 0x3821, 0x1f},
-       {OV5693_8BIT, 0x5002, 0x80},
-       {OV5693_8BIT, 0x0100, 0x01},
-       {OV5693_TOK_TERM, 0, 0}
-};
-
-/*
- * 1296x976 30fps 17ms VBlanking 2lane 10Bit (Scaling)
-*DS from 2592x1952
-*/
-static struct ov5693_reg const ov5693_1296x976[] = {
-       {OV5693_8BIT, 0x3501, 0x7b},
-       {OV5693_8BIT, 0x3502, 0x00},
-       {OV5693_8BIT, 0x3708, 0xe2},
-       {OV5693_8BIT, 0x3709, 0xc3},
-
-       {OV5693_8BIT, 0x3800, 0x00},
-       {OV5693_8BIT, 0x3801, 0x00},
-       {OV5693_8BIT, 0x3802, 0x00},
-       {OV5693_8BIT, 0x3803, 0x00},
-
-       {OV5693_8BIT, 0x3804, 0x0a},
-       {OV5693_8BIT, 0x3805, 0x3f},
-       {OV5693_8BIT, 0x3806, 0x07},
-       {OV5693_8BIT, 0x3807, 0xA3},
-
-       {OV5693_8BIT, 0x3808, 0x05},
-       {OV5693_8BIT, 0x3809, 0x10},
-       {OV5693_8BIT, 0x380a, 0x03},
-       {OV5693_8BIT, 0x380b, 0xD0},
-
-       {OV5693_8BIT, 0x380c, 0x0a},
-       {OV5693_8BIT, 0x380d, 0x80},
-       {OV5693_8BIT, 0x380e, 0x07},
-       {OV5693_8BIT, 0x380f, 0xc0},
-
-       {OV5693_8BIT, 0x3810, 0x00},
-       {OV5693_8BIT, 0x3811, 0x10},
-       {OV5693_8BIT, 0x3812, 0x00},
-       {OV5693_8BIT, 0x3813, 0x02},
-
-       {OV5693_8BIT, 0x3814, 0x11},    /*X subsample control*/
-       {OV5693_8BIT, 0x3815, 0x11},    /*Y subsample control*/
-       {OV5693_8BIT, 0x3820, 0x00},
-       {OV5693_8BIT, 0x3821, 0x1e},
-       {OV5693_8BIT, 0x5002, 0x00},
-       {OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */
-       {OV5693_8BIT, 0x0100, 0x01},
-       {OV5693_TOK_TERM, 0, 0}
-
-};
-
-/*
- * 336x256 30fps 17ms VBlanking 2lane 10Bit (Scaling)
- DS from 2564x1956
- */
-static struct ov5693_reg const ov5693_336x256[] = {
-       {OV5693_8BIT, 0x3501, 0x3d},
-       {OV5693_8BIT, 0x3502, 0x00},
-       {OV5693_8BIT, 0x3708, 0xe6},
-       {OV5693_8BIT, 0x3709, 0xc7},
-       {OV5693_8BIT, 0x3806, 0x07},
-       {OV5693_8BIT, 0x3807, 0xa3},
-       {OV5693_8BIT, 0x3808, 0x01},
-       {OV5693_8BIT, 0x3809, 0x50},
-       {OV5693_8BIT, 0x380a, 0x01},
-       {OV5693_8BIT, 0x380b, 0x00},
-       {OV5693_8BIT, 0x380c, 0x0a},
-       {OV5693_8BIT, 0x380d, 0x80},
-       {OV5693_8BIT, 0x380e, 0x07},
-       {OV5693_8BIT, 0x380f, 0xc0},
-       {OV5693_8BIT, 0x3811, 0x1E},
-       {OV5693_8BIT, 0x3814, 0x31},
-       {OV5693_8BIT, 0x3815, 0x31},
-       {OV5693_8BIT, 0x3820, 0x04},
-       {OV5693_8BIT, 0x3821, 0x1f},
-       {OV5693_8BIT, 0x5002, 0x80},
-       {OV5693_8BIT, 0x0100, 0x01},
-       {OV5693_TOK_TERM, 0, 0}
-};
-
-/*
- * 336x256 30fps 17ms VBlanking 2lane 10Bit (Scaling)
- DS from 2368x1956
- */
-static struct ov5693_reg const ov5693_368x304[] = {
-       {OV5693_8BIT, 0x3501, 0x3d},
-       {OV5693_8BIT, 0x3502, 0x00},
-       {OV5693_8BIT, 0x3708, 0xe6},
-       {OV5693_8BIT, 0x3709, 0xc7},
-       {OV5693_8BIT, 0x3808, 0x01},
-       {OV5693_8BIT, 0x3809, 0x70},
-       {OV5693_8BIT, 0x380a, 0x01},
-       {OV5693_8BIT, 0x380b, 0x30},
-       {OV5693_8BIT, 0x380c, 0x0a},
-       {OV5693_8BIT, 0x380d, 0x80},
-       {OV5693_8BIT, 0x380e, 0x07},
-       {OV5693_8BIT, 0x380f, 0xc0},
-       {OV5693_8BIT, 0x3811, 0x80},
-       {OV5693_8BIT, 0x3814, 0x31},
-       {OV5693_8BIT, 0x3815, 0x31},
-       {OV5693_8BIT, 0x3820, 0x04},
-       {OV5693_8BIT, 0x3821, 0x1f},
-       {OV5693_8BIT, 0x5002, 0x80},
-       {OV5693_8BIT, 0x0100, 0x01},
-       {OV5693_TOK_TERM, 0, 0}
-};
-
-/*
- * ov5693_192x160 30fps 17ms VBlanking 2lane 10Bit (Scaling)
- DS from 2460x1956
- */
-static struct ov5693_reg const ov5693_192x160[] = {
-       {OV5693_8BIT, 0x3501, 0x7b},
-       {OV5693_8BIT, 0x3502, 0x80},
-       {OV5693_8BIT, 0x3708, 0xe2},
-       {OV5693_8BIT, 0x3709, 0xc3},
-       {OV5693_8BIT, 0x3804, 0x0a},
-       {OV5693_8BIT, 0x3805, 0x3f},
-       {OV5693_8BIT, 0x3806, 0x07},
-       {OV5693_8BIT, 0x3807, 0xA3},
-       {OV5693_8BIT, 0x3808, 0x00},
-       {OV5693_8BIT, 0x3809, 0xC0},
-       {OV5693_8BIT, 0x380a, 0x00},
-       {OV5693_8BIT, 0x380b, 0xA0},
-       {OV5693_8BIT, 0x380c, 0x0a},
-       {OV5693_8BIT, 0x380d, 0x80},
-       {OV5693_8BIT, 0x380e, 0x07},
-       {OV5693_8BIT, 0x380f, 0xc0},
-       {OV5693_8BIT, 0x3811, 0x40},
-       {OV5693_8BIT, 0x3813, 0x00},
-       {OV5693_8BIT, 0x3814, 0x31},
-       {OV5693_8BIT, 0x3815, 0x31},
-       {OV5693_8BIT, 0x3820, 0x04},
-       {OV5693_8BIT, 0x3821, 0x1f},
-       {OV5693_8BIT, 0x5002, 0x80},
-       {OV5693_8BIT, 0x0100, 0x01},
-       {OV5693_TOK_TERM, 0, 0}
-};
-
-static struct ov5693_reg const ov5693_736x496[] = {
-       {OV5693_8BIT, 0x3501, 0x3d},
-       {OV5693_8BIT, 0x3502, 0x00},
-       {OV5693_8BIT, 0x3708, 0xe6},
-       {OV5693_8BIT, 0x3709, 0xc7},
-       {OV5693_8BIT, 0x3803, 0x68},
-       {OV5693_8BIT, 0x3806, 0x07},
-       {OV5693_8BIT, 0x3807, 0x3b},
-       {OV5693_8BIT, 0x3808, 0x02},
-       {OV5693_8BIT, 0x3809, 0xe0},
-       {OV5693_8BIT, 0x380a, 0x01},
-       {OV5693_8BIT, 0x380b, 0xf0},
-       {OV5693_8BIT, 0x380c, 0x0a}, /*hts*/
-       {OV5693_8BIT, 0x380d, 0x80},
-       {OV5693_8BIT, 0x380e, 0x07}, /*vts*/
-       {OV5693_8BIT, 0x380f, 0xc0},
-       {OV5693_8BIT, 0x3811, 0x08},
-       {OV5693_8BIT, 0x3813, 0x02},
-       {OV5693_8BIT, 0x3814, 0x31},
-       {OV5693_8BIT, 0x3815, 0x31},
-       {OV5693_8BIT, 0x3820, 0x04},
-       {OV5693_8BIT, 0x3821, 0x1f},
-       {OV5693_8BIT, 0x5002, 0x80},
-       {OV5693_8BIT, 0x0100, 0x01},
-       {OV5693_TOK_TERM, 0, 0}
-};
-#endif
-
-/*
-static struct ov5693_reg const ov5693_736x496[] = {
-       {OV5693_8BIT, 0x3501, 0x7b},
-       {OV5693_8BIT, 0x3502, 0x00},
-       {OV5693_8BIT, 0x3708, 0xe6},
-       {OV5693_8BIT, 0x3709, 0xc3},
-       {OV5693_8BIT, 0x3803, 0x00},
-       {OV5693_8BIT, 0x3806, 0x07},
-       {OV5693_8BIT, 0x3807, 0xa3},
-       {OV5693_8BIT, 0x3808, 0x02},
-       {OV5693_8BIT, 0x3809, 0xe0},
-       {OV5693_8BIT, 0x380a, 0x01},
-       {OV5693_8BIT, 0x380b, 0xf0},
-       {OV5693_8BIT, 0x380c, 0x0d},
-       {OV5693_8BIT, 0x380d, 0xb0},
-       {OV5693_8BIT, 0x380e, 0x05},
-       {OV5693_8BIT, 0x380f, 0xf2},
-       {OV5693_8BIT, 0x3811, 0x08},
-       {OV5693_8BIT, 0x3813, 0x02},
-       {OV5693_8BIT, 0x3814, 0x31},
-       {OV5693_8BIT, 0x3815, 0x31},
-       {OV5693_8BIT, 0x3820, 0x01},
-       {OV5693_8BIT, 0x3821, 0x1f},
-       {OV5693_8BIT, 0x5002, 0x00},
-       {OV5693_8BIT, 0x0100, 0x01},
-       {OV5693_TOK_TERM, 0, 0}
-};
-*/
-/*
- * 976x556 30fps 8.8ms VBlanking 2lane 10Bit (Scaling)
- */
-#if ENABLE_NON_PREVIEW
-static struct ov5693_reg const ov5693_976x556[] = {
-       {OV5693_8BIT, 0x3501, 0x7b},
-       {OV5693_8BIT, 0x3502, 0x00},
-       {OV5693_8BIT, 0x3708, 0xe2},
-       {OV5693_8BIT, 0x3709, 0xc3},
-       {OV5693_8BIT, 0x3803, 0xf0},
-       {OV5693_8BIT, 0x3806, 0x06},
-       {OV5693_8BIT, 0x3807, 0xa7},
-       {OV5693_8BIT, 0x3808, 0x03},
-       {OV5693_8BIT, 0x3809, 0xd0},
-       {OV5693_8BIT, 0x380a, 0x02},
-       {OV5693_8BIT, 0x380b, 0x2C},
-       {OV5693_8BIT, 0x380c, 0x0a},
-       {OV5693_8BIT, 0x380d, 0x80},
-       {OV5693_8BIT, 0x380e, 0x07},
-       {OV5693_8BIT, 0x380f, 0xc0},
-       {OV5693_8BIT, 0x3811, 0x10},
-       {OV5693_8BIT, 0x3813, 0x02},
-       {OV5693_8BIT, 0x3814, 0x11},
-       {OV5693_8BIT, 0x3815, 0x11},
-       {OV5693_8BIT, 0x3820, 0x00},
-       {OV5693_8BIT, 0x3821, 0x1e},
-       {OV5693_8BIT, 0x5002, 0x80},
-       {OV5693_8BIT, 0x0100, 0x01},
-       {OV5693_TOK_TERM, 0, 0}
-};
-
-/*DS from 2624x1492*/
-static struct ov5693_reg const ov5693_1296x736[] = {
-       {OV5693_8BIT, 0x3501, 0x7b},
-       {OV5693_8BIT, 0x3502, 0x00},
-       {OV5693_8BIT, 0x3708, 0xe2},
-       {OV5693_8BIT, 0x3709, 0xc3},
-
-       {OV5693_8BIT, 0x3800, 0x00},
-       {OV5693_8BIT, 0x3801, 0x00},
-       {OV5693_8BIT, 0x3802, 0x00},
-       {OV5693_8BIT, 0x3803, 0x00},
-
-       {OV5693_8BIT, 0x3804, 0x0a},
-       {OV5693_8BIT, 0x3805, 0x3f},
-       {OV5693_8BIT, 0x3806, 0x07},
-       {OV5693_8BIT, 0x3807, 0xA3},
-
-       {OV5693_8BIT, 0x3808, 0x05},
-       {OV5693_8BIT, 0x3809, 0x10},
-       {OV5693_8BIT, 0x380a, 0x02},
-       {OV5693_8BIT, 0x380b, 0xe0},
-
-       {OV5693_8BIT, 0x380c, 0x0a},
-       {OV5693_8BIT, 0x380d, 0x80},
-       {OV5693_8BIT, 0x380e, 0x07},
-       {OV5693_8BIT, 0x380f, 0xc0},
-
-       {OV5693_8BIT, 0x3813, 0xE8},
-
-       {OV5693_8BIT, 0x3814, 0x11},    /*X subsample control*/
-       {OV5693_8BIT, 0x3815, 0x11},    /*Y subsample control*/
-       {OV5693_8BIT, 0x3820, 0x00},
-       {OV5693_8BIT, 0x3821, 0x1e},
-       {OV5693_8BIT, 0x5002, 0x00},
-       {OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */
-       {OV5693_8BIT, 0x0100, 0x01},
-       {OV5693_TOK_TERM, 0, 0}
-};
-
-static struct ov5693_reg const ov5693_1636p_30fps[] = {
-       {OV5693_8BIT, 0x3501, 0x7b},
-       {OV5693_8BIT, 0x3502, 0x00},
-       {OV5693_8BIT, 0x3708, 0xe2},
-       {OV5693_8BIT, 0x3709, 0xc3},
-       {OV5693_8BIT, 0x3803, 0xf0},
-       {OV5693_8BIT, 0x3806, 0x06},
-       {OV5693_8BIT, 0x3807, 0xa7},
-       {OV5693_8BIT, 0x3808, 0x06},
-       {OV5693_8BIT, 0x3809, 0x64},
-       {OV5693_8BIT, 0x380a, 0x04},
-       {OV5693_8BIT, 0x380b, 0x48},
-       {OV5693_8BIT, 0x380c, 0x0a}, /*hts*/
-       {OV5693_8BIT, 0x380d, 0x80},
-       {OV5693_8BIT, 0x380e, 0x07}, /*vts*/
-       {OV5693_8BIT, 0x380f, 0xc0},
-       {OV5693_8BIT, 0x3811, 0x02},
-       {OV5693_8BIT, 0x3813, 0x02},
-       {OV5693_8BIT, 0x3814, 0x11},
-       {OV5693_8BIT, 0x3815, 0x11},
-       {OV5693_8BIT, 0x3820, 0x00},
-       {OV5693_8BIT, 0x3821, 0x1e},
-       {OV5693_8BIT, 0x5002, 0x80},
-       {OV5693_8BIT, 0x0100, 0x01},
-       {OV5693_TOK_TERM, 0, 0}
-};
-#endif
-
-static struct ov5693_reg const ov5693_1616x1216_30fps[] = {
-       {OV5693_8BIT, 0x3501, 0x7b},
-       {OV5693_8BIT, 0x3502, 0x80},
-       {OV5693_8BIT, 0x3708, 0xe2},
-       {OV5693_8BIT, 0x3709, 0xc3},
-       {OV5693_8BIT, 0x3800, 0x00},    /*{3800,3801} Array X start*/
-       {OV5693_8BIT, 0x3801, 0x08},    /* 04 //{3800,3801} Array X start*/
-       {OV5693_8BIT, 0x3802, 0x00},    /*{3802,3803} Array Y start*/
-       {OV5693_8BIT, 0x3803, 0x04},    /* 00  //{3802,3803} Array Y start*/
-       {OV5693_8BIT, 0x3804, 0x0a},    /*{3804,3805} Array X end*/
-       {OV5693_8BIT, 0x3805, 0x37},    /* 3b  //{3804,3805} Array X end*/
-       {OV5693_8BIT, 0x3806, 0x07},    /*{3806,3807} Array Y end*/
-       {OV5693_8BIT, 0x3807, 0x9f},    /* a3  //{3806,3807} Array Y end*/
-       {OV5693_8BIT, 0x3808, 0x06},    /*{3808,3809} Final output H size*/
-       {OV5693_8BIT, 0x3809, 0x50},    /*{3808,3809} Final output H size*/
-       {OV5693_8BIT, 0x380a, 0x04},    /*{380a,380b} Final output V size*/
-       {OV5693_8BIT, 0x380b, 0xc0},    /*{380a,380b} Final output V size*/
-       {OV5693_8BIT, 0x380c, 0x0a},    /*{380c,380d} HTS*/
-       {OV5693_8BIT, 0x380d, 0x80},    /*{380c,380d} HTS*/
-       {OV5693_8BIT, 0x380e, 0x07},    /*{380e,380f} VTS*/
-       {OV5693_8BIT, 0x380f, 0xc0},    /* bc   //{380e,380f} VTS*/
-       {OV5693_8BIT, 0x3810, 0x00},    /*{3810,3811} windowing X offset*/
-       {OV5693_8BIT, 0x3811, 0x10},    /*{3810,3811} windowing X offset*/
-       {OV5693_8BIT, 0x3812, 0x00},    /*{3812,3813} windowing Y offset*/
-       {OV5693_8BIT, 0x3813, 0x06},    /*{3812,3813} windowing Y offset*/
-       {OV5693_8BIT, 0x3814, 0x11},    /*X subsample control*/
-       {OV5693_8BIT, 0x3815, 0x11},    /*Y subsample control*/
-       {OV5693_8BIT, 0x3820, 0x00},    /*FLIP/Binning control*/
-       {OV5693_8BIT, 0x3821, 0x1e},    /*MIRROR control*/
-       {OV5693_8BIT, 0x5002, 0x00},
-       {OV5693_8BIT, 0x5041, 0x84},
-       {OV5693_8BIT, 0x0100, 0x01},
-       {OV5693_TOK_TERM, 0, 0}
-};
-
-/*
- * 1940x1096 30fps 8.8ms VBlanking 2lane 10bit (Scaling)
- */
-#if ENABLE_NON_PREVIEW
-static struct ov5693_reg const ov5693_1940x1096[] = {
-       {OV5693_8BIT, 0x3501, 0x7b},
-       {OV5693_8BIT, 0x3502, 0x00},
-       {OV5693_8BIT, 0x3708, 0xe2},
-       {OV5693_8BIT, 0x3709, 0xc3},
-       {OV5693_8BIT, 0x3803, 0xf0},
-       {OV5693_8BIT, 0x3806, 0x06},
-       {OV5693_8BIT, 0x3807, 0xa7},
-       {OV5693_8BIT, 0x3808, 0x07},
-       {OV5693_8BIT, 0x3809, 0x94},
-       {OV5693_8BIT, 0x380a, 0x04},
-       {OV5693_8BIT, 0x380b, 0x48},
-       {OV5693_8BIT, 0x380c, 0x0a},
-       {OV5693_8BIT, 0x380d, 0x80},
-       {OV5693_8BIT, 0x380e, 0x07},
-       {OV5693_8BIT, 0x380f, 0xc0},
-       {OV5693_8BIT, 0x3811, 0x02},
-       {OV5693_8BIT, 0x3813, 0x02},
-       {OV5693_8BIT, 0x3814, 0x11},
-       {OV5693_8BIT, 0x3815, 0x11},
-       {OV5693_8BIT, 0x3820, 0x00},
-       {OV5693_8BIT, 0x3821, 0x1e},
-       {OV5693_8BIT, 0x5002, 0x80},
-       {OV5693_8BIT, 0x0100, 0x01},
-       {OV5693_TOK_TERM, 0, 0}
-};
-
-static struct ov5693_reg const ov5693_2592x1456_30fps[] = {
-       {OV5693_8BIT, 0x3501, 0x7b},
-       {OV5693_8BIT, 0x3502, 0x00},
-       {OV5693_8BIT, 0x3708, 0xe2},
-       {OV5693_8BIT, 0x3709, 0xc3},
-       {OV5693_8BIT, 0x3800, 0x00},
-       {OV5693_8BIT, 0x3801, 0x00},
-       {OV5693_8BIT, 0x3802, 0x00},
-       {OV5693_8BIT, 0x3803, 0xf0},
-       {OV5693_8BIT, 0x3804, 0x0a},
-       {OV5693_8BIT, 0x3805, 0x3f},
-       {OV5693_8BIT, 0x3806, 0x06},
-       {OV5693_8BIT, 0x3807, 0xa4},
-       {OV5693_8BIT, 0x3808, 0x0a},
-       {OV5693_8BIT, 0x3809, 0x20},
-       {OV5693_8BIT, 0x380a, 0x05},
-       {OV5693_8BIT, 0x380b, 0xb0},
-       {OV5693_8BIT, 0x380c, 0x0a},
-       {OV5693_8BIT, 0x380d, 0x80},
-       {OV5693_8BIT, 0x380e, 0x07},
-       {OV5693_8BIT, 0x380f, 0xc0},
-       {OV5693_8BIT, 0x3811, 0x10},
-       {OV5693_8BIT, 0x3813, 0x00},
-       {OV5693_8BIT, 0x3814, 0x11},
-       {OV5693_8BIT, 0x3815, 0x11},
-       {OV5693_8BIT, 0x3820, 0x00},
-       {OV5693_8BIT, 0x3821, 0x1e},
-       {OV5693_8BIT, 0x5002, 0x00},
-       {OV5693_TOK_TERM, 0, 0}
-};
-#endif
-
-static struct ov5693_reg const ov5693_2576x1456_30fps[] = {
-       {OV5693_8BIT, 0x3501, 0x7b},
-       {OV5693_8BIT, 0x3502, 0x00},
-       {OV5693_8BIT, 0x3708, 0xe2},
-       {OV5693_8BIT, 0x3709, 0xc3},
-       {OV5693_8BIT, 0x3800, 0x00},
-       {OV5693_8BIT, 0x3801, 0x00},
-       {OV5693_8BIT, 0x3802, 0x00},
-       {OV5693_8BIT, 0x3803, 0xf0},
-       {OV5693_8BIT, 0x3804, 0x0a},
-       {OV5693_8BIT, 0x3805, 0x3f},
-       {OV5693_8BIT, 0x3806, 0x06},
-       {OV5693_8BIT, 0x3807, 0xa4},
-       {OV5693_8BIT, 0x3808, 0x0a},
-       {OV5693_8BIT, 0x3809, 0x10},
-       {OV5693_8BIT, 0x380a, 0x05},
-       {OV5693_8BIT, 0x380b, 0xb0},
-       {OV5693_8BIT, 0x380c, 0x0a},
-       {OV5693_8BIT, 0x380d, 0x80},
-       {OV5693_8BIT, 0x380e, 0x07},
-       {OV5693_8BIT, 0x380f, 0xc0},
-       {OV5693_8BIT, 0x3811, 0x18},
-       {OV5693_8BIT, 0x3813, 0x00},
-       {OV5693_8BIT, 0x3814, 0x11},
-       {OV5693_8BIT, 0x3815, 0x11},
-       {OV5693_8BIT, 0x3820, 0x00},
-       {OV5693_8BIT, 0x3821, 0x1e},
-       {OV5693_8BIT, 0x5002, 0x00},
-       {OV5693_TOK_TERM, 0, 0}
-};
-
-/*
- * 2592x1944 30fps 0.6ms VBlanking 2lane 10Bit
- */
-#if ENABLE_NON_PREVIEW
-static struct ov5693_reg const ov5693_2592x1944_30fps[] = {
-       {OV5693_8BIT, 0x3501, 0x7b},
-       {OV5693_8BIT, 0x3502, 0x00},
-       {OV5693_8BIT, 0x3708, 0xe2},
-       {OV5693_8BIT, 0x3709, 0xc3},
-       {OV5693_8BIT, 0x3803, 0x00},
-       {OV5693_8BIT, 0x3806, 0x07},
-       {OV5693_8BIT, 0x3807, 0xa3},
-       {OV5693_8BIT, 0x3808, 0x0a},
-       {OV5693_8BIT, 0x3809, 0x20},
-       {OV5693_8BIT, 0x380a, 0x07},
-       {OV5693_8BIT, 0x380b, 0x98},
-       {OV5693_8BIT, 0x380c, 0x0a},
-       {OV5693_8BIT, 0x380d, 0x80},
-       {OV5693_8BIT, 0x380e, 0x07},
-       {OV5693_8BIT, 0x380f, 0xc0},
-       {OV5693_8BIT, 0x3811, 0x10},
-       {OV5693_8BIT, 0x3813, 0x00},
-       {OV5693_8BIT, 0x3814, 0x11},
-       {OV5693_8BIT, 0x3815, 0x11},
-       {OV5693_8BIT, 0x3820, 0x00},
-       {OV5693_8BIT, 0x3821, 0x1e},
-       {OV5693_8BIT, 0x5002, 0x00},
-       {OV5693_8BIT, 0x0100, 0x01},
-       {OV5693_TOK_TERM, 0, 0}
-};
-#endif
-
-/*
- * 11:9 Full FOV Output, expected FOV Res: 2346x1920
- * ISP Effect Res: 1408x1152
- * Sensor out: 1424x1168, DS From: 2380x1952
- *
- * WA: Left Offset: 8, Hor scal: 64
- */
-#if ENABLE_NON_PREVIEW
-static struct ov5693_reg const ov5693_1424x1168_30fps[] = {
-       {OV5693_8BIT, 0x3501, 0x3b}, /* long exposure[15:8] */
-       {OV5693_8BIT, 0x3502, 0x80}, /* long exposure[7:0] */
-       {OV5693_8BIT, 0x3708, 0xe2},
-       {OV5693_8BIT, 0x3709, 0xc3},
-       {OV5693_8BIT, 0x3800, 0x00}, /* TIMING_X_ADDR_START */
-       {OV5693_8BIT, 0x3801, 0x50}, /* 80 */
-       {OV5693_8BIT, 0x3802, 0x00}, /* TIMING_Y_ADDR_START */
-       {OV5693_8BIT, 0x3803, 0x02}, /* 2 */
-       {OV5693_8BIT, 0x3804, 0x09}, /* TIMING_X_ADDR_END */
-       {OV5693_8BIT, 0x3805, 0xdd}, /* 2525 */
-       {OV5693_8BIT, 0x3806, 0x07}, /* TIMING_Y_ADDR_END */
-       {OV5693_8BIT, 0x3807, 0xa1}, /* 1953 */
-       {OV5693_8BIT, 0x3808, 0x05}, /* TIMING_X_OUTPUT_SIZE */
-       {OV5693_8BIT, 0x3809, 0x90}, /* 1424 */
-       {OV5693_8BIT, 0x380a, 0x04}, /* TIMING_Y_OUTPUT_SIZE */
-       {OV5693_8BIT, 0x380b, 0x90}, /* 1168 */
-       {OV5693_8BIT, 0x380c, 0x0a}, /* TIMING_HTS */
-       {OV5693_8BIT, 0x380d, 0x80},
-       {OV5693_8BIT, 0x380e, 0x07}, /* TIMING_VTS */
-       {OV5693_8BIT, 0x380f, 0xc0},
-       {OV5693_8BIT, 0x3810, 0x00}, /* TIMING_ISP_X_WIN */
-       {OV5693_8BIT, 0x3811, 0x02}, /* 2 */
-       {OV5693_8BIT, 0x3812, 0x00}, /* TIMING_ISP_Y_WIN */
-       {OV5693_8BIT, 0x3813, 0x00}, /* 0 */
-       {OV5693_8BIT, 0x3814, 0x11}, /* TIME_X_INC */
-       {OV5693_8BIT, 0x3815, 0x11}, /* TIME_Y_INC */
-       {OV5693_8BIT, 0x3820, 0x00},
-       {OV5693_8BIT, 0x3821, 0x1e},
-       {OV5693_8BIT, 0x5002, 0x00},
-       {OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */
-       {OV5693_8BIT, 0x0100, 0x01},
-       {OV5693_TOK_TERM, 0, 0}
-};
-#endif
-
-/*
- * 3:2 Full FOV Output, expected FOV Res: 2560x1706
- * ISP Effect Res: 720x480
- * Sensor out: 736x496, DS From 2616x1764
- */
-static struct ov5693_reg const ov5693_736x496_30fps[] = {
-       {OV5693_8BIT, 0x3501, 0x3b}, /* long exposure[15:8] */
-       {OV5693_8BIT, 0x3502, 0x80}, /* long exposure[7:0] */
-       {OV5693_8BIT, 0x3708, 0xe2},
-       {OV5693_8BIT, 0x3709, 0xc3},
-       {OV5693_8BIT, 0x3800, 0x00}, /* TIMING_X_ADDR_START */
-       {OV5693_8BIT, 0x3801, 0x02}, /* 2 */
-       {OV5693_8BIT, 0x3802, 0x00}, /* TIMING_Y_ADDR_START */
-       {OV5693_8BIT, 0x3803, 0x62}, /* 98 */
-       {OV5693_8BIT, 0x3804, 0x0a}, /* TIMING_X_ADDR_END */
-       {OV5693_8BIT, 0x3805, 0x3b}, /* 2619 */
-       {OV5693_8BIT, 0x3806, 0x07}, /* TIMING_Y_ADDR_END */
-       {OV5693_8BIT, 0x3807, 0x43}, /* 1859 */
-       {OV5693_8BIT, 0x3808, 0x02}, /* TIMING_X_OUTPUT_SIZE */
-       {OV5693_8BIT, 0x3809, 0xe0}, /* 736 */
-       {OV5693_8BIT, 0x380a, 0x01}, /* TIMING_Y_OUTPUT_SIZE */
-       {OV5693_8BIT, 0x380b, 0xf0}, /* 496 */
-       {OV5693_8BIT, 0x380c, 0x0a}, /* TIMING_HTS */
-       {OV5693_8BIT, 0x380d, 0x80},
-       {OV5693_8BIT, 0x380e, 0x07}, /* TIMING_VTS */
-       {OV5693_8BIT, 0x380f, 0xc0},
-       {OV5693_8BIT, 0x3810, 0x00}, /* TIMING_ISP_X_WIN */
-       {OV5693_8BIT, 0x3811, 0x02}, /* 2 */
-       {OV5693_8BIT, 0x3812, 0x00}, /* TIMING_ISP_Y_WIN */
-       {OV5693_8BIT, 0x3813, 0x00}, /* 0 */
-       {OV5693_8BIT, 0x3814, 0x11}, /* TIME_X_INC */
-       {OV5693_8BIT, 0x3815, 0x11}, /* TIME_Y_INC */
-       {OV5693_8BIT, 0x3820, 0x00},
-       {OV5693_8BIT, 0x3821, 0x1e},
-       {OV5693_8BIT, 0x5002, 0x00},
-       {OV5693_8BIT, 0x5041, 0x84}, /* scale is auto enabled */
-       {OV5693_8BIT, 0x0100, 0x01},
-       {OV5693_TOK_TERM, 0, 0}
-};
-
-static struct ov5693_reg const ov5693_2576x1936_30fps[] = {
-       {OV5693_8BIT, 0x3501, 0x7b},
-       {OV5693_8BIT, 0x3502, 0x00},
-       {OV5693_8BIT, 0x3708, 0xe2},
-       {OV5693_8BIT, 0x3709, 0xc3},
-       {OV5693_8BIT, 0x3803, 0x00},
-       {OV5693_8BIT, 0x3806, 0x07},
-       {OV5693_8BIT, 0x3807, 0xa3},
-       {OV5693_8BIT, 0x3808, 0x0a},
-       {OV5693_8BIT, 0x3809, 0x10},
-       {OV5693_8BIT, 0x380a, 0x07},
-       {OV5693_8BIT, 0x380b, 0x90},
-       {OV5693_8BIT, 0x380c, 0x0a},
-       {OV5693_8BIT, 0x380d, 0x80},
-       {OV5693_8BIT, 0x380e, 0x07},
-       {OV5693_8BIT, 0x380f, 0xc0},
-       {OV5693_8BIT, 0x3811, 0x18},
-       {OV5693_8BIT, 0x3813, 0x00},
-       {OV5693_8BIT, 0x3814, 0x11},
-       {OV5693_8BIT, 0x3815, 0x11},
-       {OV5693_8BIT, 0x3820, 0x00},
-       {OV5693_8BIT, 0x3821, 0x1e},
-       {OV5693_8BIT, 0x5002, 0x00},
-       {OV5693_8BIT, 0x0100, 0x01},
-       {OV5693_TOK_TERM, 0, 0}
-};
-
-static struct ov5693_resolution ov5693_res_preview[] = {
-       {
-               .desc = "ov5693_736x496_30fps",
-               .width = 736,
-               .height = 496,
-               .pix_clk_freq = 160,
-               .fps = 30,
-               .used = 0,
-               .pixels_per_line = 2688,
-               .lines_per_frame = 1984,
-               .regs = ov5693_736x496_30fps,
-       },
-       {
-               .desc = "ov5693_1616x1216_30fps",
-               .width = 1616,
-               .height = 1216,
-               .pix_clk_freq = 160,
-               .fps = 30,
-               .used = 0,
-               .pixels_per_line = 2688,
-               .lines_per_frame = 1984,
-               .regs = ov5693_1616x1216_30fps,
-       },
-       {
-               .desc = "ov5693_5M_30fps",
-               .width = 2576,
-               .height = 1456,
-               .pix_clk_freq = 160,
-               .fps = 30,
-               .used = 0,
-               .pixels_per_line = 2688,
-               .lines_per_frame = 1984,
-               .regs = ov5693_2576x1456_30fps,
-       },
-       {
-               .desc = "ov5693_5M_30fps",
-               .width = 2576,
-               .height = 1936,
-               .pix_clk_freq = 160,
-               .fps = 30,
-               .used = 0,
-               .pixels_per_line = 2688,
-               .lines_per_frame = 1984,
-               .regs = ov5693_2576x1936_30fps,
-       },
-};
-
-#define N_RES_PREVIEW (ARRAY_SIZE(ov5693_res_preview))
-
-/*
- * Disable non-preview configurations until the configuration selection is
- * improved.
- */
-#if ENABLE_NON_PREVIEW
-struct ov5693_resolution ov5693_res_still[] = {
-       {
-               .desc = "ov5693_736x496_30fps",
-               .width = 736,
-               .height = 496,
-               .pix_clk_freq = 160,
-               .fps = 30,
-               .used = 0,
-               .pixels_per_line = 2688,
-               .lines_per_frame = 1984,
-               .regs = ov5693_736x496_30fps,
-       },
-       {
-               .desc = "ov5693_1424x1168_30fps",
-               .width = 1424,
-               .height = 1168,
-               .pix_clk_freq = 160,
-               .fps = 30,
-               .used = 0,
-               .pixels_per_line = 2688,
-               .lines_per_frame = 1984,
-               .regs = ov5693_1424x1168_30fps,
-       },
-       {
-               .desc = "ov5693_1616x1216_30fps",
-               .width = 1616,
-               .height = 1216,
-               .pix_clk_freq = 160,
-               .fps = 30,
-               .used = 0,
-               .pixels_per_line = 2688,
-               .lines_per_frame = 1984,
-               .regs = ov5693_1616x1216_30fps,
-       },
-       {
-               .desc = "ov5693_5M_30fps",
-               .width = 2592,
-               .height = 1456,
-               .pix_clk_freq = 160,
-               .fps = 30,
-               .used = 0,
-               .pixels_per_line = 2688,
-               .lines_per_frame = 1984,
-               .regs = ov5693_2592x1456_30fps,
-       },
-       {
-               .desc = "ov5693_5M_30fps",
-               .width = 2592,
-               .height = 1944,
-               .pix_clk_freq = 160,
-               .fps = 30,
-               .used = 0,
-               .pixels_per_line = 2688,
-               .lines_per_frame = 1984,
-               .regs = ov5693_2592x1944_30fps,
-       },
-};
-
-#define N_RES_STILL (ARRAY_SIZE(ov5693_res_still))
-
-struct ov5693_resolution ov5693_res_video[] = {
-       {
-               .desc = "ov5693_736x496_30fps",
-               .width = 736,
-               .height = 496,
-               .fps = 30,
-               .pix_clk_freq = 160,
-               .used = 0,
-               .pixels_per_line = 2688,
-               .lines_per_frame = 1984,
-               .regs = ov5693_736x496,
-       },
-       {
-               .desc = "ov5693_336x256_30fps",
-               .width = 336,
-               .height = 256,
-               .fps = 30,
-               .pix_clk_freq = 160,
-               .used = 0,
-               .pixels_per_line = 2688,
-               .lines_per_frame = 1984,
-               .regs = ov5693_336x256,
-       },
-       {
-               .desc = "ov5693_368x304_30fps",
-               .width = 368,
-               .height = 304,
-               .fps = 30,
-               .pix_clk_freq = 160,
-               .used = 0,
-               .pixels_per_line = 2688,
-               .lines_per_frame = 1984,
-               .regs = ov5693_368x304,
-       },
-       {
-               .desc = "ov5693_192x160_30fps",
-               .width = 192,
-               .height = 160,
-               .fps = 30,
-               .pix_clk_freq = 160,
-               .used = 0,
-               .pixels_per_line = 2688,
-               .lines_per_frame = 1984,
-               .regs = ov5693_192x160,
-       },
-       {
-               .desc = "ov5693_1296x736_30fps",
-               .width = 1296,
-               .height = 736,
-               .fps = 30,
-               .pix_clk_freq = 160,
-               .used = 0,
-               .pixels_per_line = 2688,
-               .lines_per_frame = 1984,
-               .regs = ov5693_1296x736,
-       },
-       {
-               .desc = "ov5693_1296x976_30fps",
-               .width = 1296,
-               .height = 976,
-               .fps = 30,
-               .pix_clk_freq = 160,
-               .used = 0,
-               .pixels_per_line = 2688,
-               .lines_per_frame = 1984,
-               .regs = ov5693_1296x976,
-       },
-       {
-               .desc = "ov5693_1636P_30fps",
-               .width = 1636,
-               .height = 1096,
-               .fps = 30,
-               .pix_clk_freq = 160,
-               .used = 0,
-               .pixels_per_line = 2688,
-               .lines_per_frame = 1984,
-               .regs = ov5693_1636p_30fps,
-       },
-       {
-               .desc = "ov5693_1080P_30fps",
-               .width = 1940,
-               .height = 1096,
-               .fps = 30,
-               .pix_clk_freq = 160,
-               .used = 0,
-               .pixels_per_line = 2688,
-               .lines_per_frame = 1984,
-               .regs = ov5693_1940x1096,
-       },
-       {
-               .desc = "ov5693_5M_30fps",
-               .width = 2592,
-               .height = 1456,
-               .pix_clk_freq = 160,
-               .fps = 30,
-               .used = 0,
-               .pixels_per_line = 2688,
-               .lines_per_frame = 1984,
-               .regs = ov5693_2592x1456_30fps,
-       },
-       {
-               .desc = "ov5693_5M_30fps",
-               .width = 2592,
-               .height = 1944,
-               .pix_clk_freq = 160,
-               .fps = 30,
-               .used = 0,
-               .pixels_per_line = 2688,
-               .lines_per_frame = 1984,
-               .regs = ov5693_2592x1944_30fps,
-       },
-};
-
-#define N_RES_VIDEO (ARRAY_SIZE(ov5693_res_video))
-#endif
-
-static struct ov5693_resolution *ov5693_res = ov5693_res_preview;
-static unsigned long N_RES = N_RES_PREVIEW;
-#endif
index 14b1757e667464945130a57458483196ef2fec5e..bbbd904b696a3d082c729fe79313c419e80db5fa 100644 (file)
@@ -713,13 +713,6 @@ enum atomisp_burst_capture_options {
 #define EXT_ISP_SHOT_MODE_ANIMATED_PHOTO       10
 #define EXT_ISP_SHOT_MODE_SPORTS       11
 
-/*
- * Set Senor run mode
- */
-struct atomisp_s_runmode {
-       __u32 mode;
-};
-
 /*Private IOCTLs for ISP */
 #define ATOMISP_IOC_G_XNR \
        _IOR('v', BASE_VIDIOC_PRIVATE + 0, int)
@@ -875,9 +868,6 @@ struct atomisp_s_runmode {
 #define ATOMISP_IOC_S_SENSOR_EE_CONFIG \
        _IOW('v', BASE_VIDIOC_PRIVATE + 47, unsigned int)
 
-#define ATOMISP_IOC_S_SENSOR_RUNMODE \
-       _IOW('v', BASE_VIDIOC_PRIVATE + 48, struct atomisp_s_runmode)
-
 /*
  * Reserved ioctls. We have customer implementing it internally.
  * We can't use both numbers to not cause ABI conflict.
index 0803b296e9acc7c9d2be85cad4cb97b596b9869e..759233a7ba5053cf3ed8da1cdebcc94980a86fbf 100644 (file)
@@ -1248,28 +1248,6 @@ static void atomisp_update_capture_mode(struct atomisp_sub_device *asd)
                atomisp_css_capture_set_mode(asd, IA_CSS_CAPTURE_MODE_PRIMARY);
 }
 
-/* ISP2401 */
-int atomisp_set_sensor_runmode(struct atomisp_sub_device *asd,
-                              struct atomisp_s_runmode *runmode)
-{
-       struct atomisp_device *isp = asd->isp;
-       struct v4l2_ctrl *c;
-       int ret = 0;
-
-       if (!(runmode && (runmode->mode & RUNMODE_MASK)))
-               return -EINVAL;
-
-       mutex_lock(asd->ctrl_handler.lock);
-       c = v4l2_ctrl_find(isp->inputs[asd->input_curr].camera->ctrl_handler,
-                          V4L2_CID_RUN_MODE);
-
-       if (c)
-               ret = v4l2_ctrl_s_ctrl(c, runmode->mode);
-
-       mutex_unlock(asd->ctrl_handler.lock);
-       return ret;
-}
-
 /*
  * Function to enable/disable lens geometry distortion correction (GDC) and
  * chromatic aberration correction (CAC)
@@ -2793,12 +2771,16 @@ int atomisp_cp_dvs_6axis_config(struct atomisp_sub_device *asd,
                        css_param->dvs_6axis = NULL;
 
                        dvs_6axis_config = ia_css_dvs2_6axis_config_allocate(stream);
-                       if (!dvs_6axis_config)
-                               return -ENOMEM;
+                       if (!dvs_6axis_config) {
+                               ret = -ENOMEM;
+                               goto error;
+                       }
                } else if (!dvs_6axis_config) {
                        dvs_6axis_config = ia_css_dvs2_6axis_config_allocate(stream);
-                       if (!dvs_6axis_config)
-                               return -ENOMEM;
+                       if (!dvs_6axis_config) {
+                               ret = -ENOMEM;
+                               goto error;
+                       }
                }
 
                dvs_6axis_config->exp_id = source_6axis_config->exp_id;
@@ -2896,8 +2878,10 @@ int atomisp_cp_morph_table(struct atomisp_sub_device *asd,
                morph_table = atomisp_css_morph_table_allocate(
                                source_morph_table->width,
                                source_morph_table->height);
-               if (!morph_table)
-                       return -ENOMEM;
+               if (!morph_table) {
+                       ret = -ENOMEM;
+                       goto error;
+               }
 
                for (i = 0; i < IA_CSS_MORPH_TABLE_NUM_PLANES; i++) {
                        if (copy_from_compatible(morph_table->coordinates_x[i],
@@ -3808,6 +3792,10 @@ int atomisp_try_fmt(struct atomisp_device *isp, struct v4l2_pix_format *f,
                        return -EINVAL;
        }
 
+       /* The preview pipeline does not support width > 1920 */
+       if (asd->run_mode->val == ATOMISP_RUN_MODE_PREVIEW)
+               f->width = min_t(u32, f->width, 1920);
+
        /*
         * atomisp_set_fmt() will set the sensor resolution to the requested
         * resolution + padding. Add padding here and remove it again after
index 8305161d2062887cf40c8c887748c9fefaf19419..b8cd957eebdc21cc0a6f04b39bfc358398d3a0f2 100644 (file)
@@ -42,13 +42,6 @@ struct ia_css_frame;
 #define INTR_IER               24
 #define INTR_IIR               16
 
-/* ISP2401 */
-#define RUNMODE_MASK (ATOMISP_RUN_MODE_VIDEO | ATOMISP_RUN_MODE_STILL_CAPTURE \
-                       | ATOMISP_RUN_MODE_PREVIEW)
-
-/* FIXME: check if can go */
-extern int atomisp_punit_hpll_freq;
-
 /* Helper function */
 void dump_sp_dmem(struct atomisp_device *isp, unsigned int addr,
                  unsigned int size);
@@ -77,12 +70,6 @@ bool atomisp_is_viewfinder_support(struct atomisp_device *isp);
 
 /* ISP features control function */
 
-/*
- * Function to set sensor runmode by user when
- * ATOMISP_IOC_S_SENSOR_RUNMODE ioctl was called
- */
-int atomisp_set_sensor_runmode(struct atomisp_sub_device *asd,
-                              struct atomisp_s_runmode *runmode);
 /*
  * Function to enable/disable lens geometry distortion correction (GDC) and
  * chromatic aberration correction (CAC)
index b97ec85aa0bab4f2978874ed632be6ec53bddf89..02f06294bbfe0744988a68b238ec1c8ad41b34ed 100644 (file)
@@ -16,7 +16,6 @@
  *
  */
 
-#include <media/videobuf-vmalloc.h>
 #include <media/v4l2-dev.h>
 #include <media/v4l2-event.h>
 
@@ -850,19 +849,17 @@ int atomisp_css_irq_translate(struct atomisp_device *isp,
 void atomisp_css_rx_get_irq_info(enum mipi_port_id port,
                                 unsigned int *infos)
 {
-#ifndef ISP2401
-       ia_css_isys_rx_get_irq_info(port, infos);
-#else
-       *infos = 0;
-#endif
+       if (IS_ISP2401)
+               *infos = 0;
+       else
+               ia_css_isys_rx_get_irq_info(port, infos);
 }
 
 void atomisp_css_rx_clear_irq_info(enum mipi_port_id port,
                                   unsigned int infos)
 {
-#ifndef ISP2401
-       ia_css_isys_rx_clear_irq_info(port, infos);
-#endif
+       if (!IS_ISP2401)
+               ia_css_isys_rx_clear_irq_info(port, infos);
 }
 
 int atomisp_css_irq_enable(struct atomisp_device *isp,
index 03940c11505fcef6e77eff0c628d892ab45c62cb..2483eaeeac7332515eb20f76e3a78970e5f2c39d 100644 (file)
@@ -521,7 +521,12 @@ static char *atomisp_csi2_get_vcm_type(struct acpi_device *adev)
 }
 
 static const struct acpi_device_id atomisp_sensor_configs[] = {
-       ATOMISP_SENSOR_CONFIG("INT33BE", 2, true),      /* OV5693 */
+       /*
+        * FIXME ov5693 modules have a VCM, but for unknown reasons
+        * the sensor fails to start streaming when instantiating
+        * an i2c-client for the VCM, so it is disabled for now.
+        */
+       ATOMISP_SENSOR_CONFIG("INT33BE", 2, false),     /* OV5693 */
        {}
 };
 
index 54466d2f323a156374ed94789c7683c87e7b270e..4dba6120af391f8e0b89d22847e255624aa68c93 100644 (file)
@@ -460,7 +460,6 @@ static void atomisp_dev_init_struct(struct atomisp_device *isp)
 
 static void atomisp_subdev_init_struct(struct atomisp_sub_device *asd)
 {
-       v4l2_ctrl_s_ctrl(asd->run_mode, ATOMISP_RUN_MODE_STILL_CAPTURE);
        memset(&asd->params.css_param, 0, sizeof(asd->params.css_param));
        asd->params.color_effect = V4L2_COLORFX_NONE;
        asd->params.bad_pixel_en = true;
@@ -513,8 +512,8 @@ static int atomisp_open(struct file *file)
         */
        if (pipe->users) {
                dev_dbg(isp->dev, "video node already opened\n");
-               mutex_unlock(&isp->mutex);
-               return -EBUSY;
+               ret = -EBUSY;
+               goto error;
        }
 
        /* runtime power management, turn on ISP */
@@ -533,8 +532,6 @@ static int atomisp_open(struct file *file)
        }
 
        atomisp_subdev_init_struct(asd);
-       /* Ensure that a mode is set */
-       v4l2_ctrl_s_ctrl(asd->run_mode, ATOMISP_RUN_MODE_PREVIEW);
 
        pipe->users++;
        mutex_unlock(&isp->mutex);
index d2174156573a536560f63b1dd3a7cd8e6ace0e52..a8e4779d007f4a3cbf0278941597a8d1828ad38e 100644 (file)
@@ -665,11 +665,6 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input)
                dev_err(isp->dev, "Failed to power-on sensor\n");
                return ret;
        }
-       /*
-        * Some sensor driver resets the run mode during power-on, thus force
-        * update the run mode to sensor after power-on.
-        */
-       atomisp_update_run_mode(asd);
 
        /* select operating sensor */
        ret = v4l2_subdev_call(isp->inputs[input].camera, video, s_routing,
@@ -708,6 +703,9 @@ static int atomisp_enum_framesizes_crop_inner(struct atomisp_device *isp,
                                              int *valid_sizes)
 {
        static const struct v4l2_frmsize_discrete frame_sizes[] = {
+               { 1920, 1440 },
+               { 1920, 1200 },
+               { 1920, 1080 },
                { 1600, 1200 },
                { 1600, 1080 },
                { 1600,  900 },
@@ -729,11 +727,11 @@ static int atomisp_enum_framesizes_crop_inner(struct atomisp_device *isp,
                        continue;
 
                /*
-                * Skip sizes where width and height are less then 2/3th of the
+                * Skip sizes where width and height are less then 5/8th of the
                 * sensor size to avoid sizes with a too small field of view.
                 */
-               if (frame_sizes[i].width < (active->width * 2 / 3) &&
-                   frame_sizes[i].height < (active->height * 2 / 3))
+               if (frame_sizes[i].width < (active->width * 5 / 8) &&
+                   frame_sizes[i].height < (active->height * 5 / 8))
                        continue;
 
                if (*valid_sizes == fsize->index) {
@@ -1781,13 +1779,6 @@ static long atomisp_vidioc_default(struct file *file, void *fh,
        int err;
 
        switch (cmd) {
-       case ATOMISP_IOC_S_SENSOR_RUNMODE:
-               if (IS_ISP2401)
-                       err = atomisp_set_sensor_runmode(asd, arg);
-               else
-                       err = -EINVAL;
-               break;
-
        case ATOMISP_IOC_G_XNR:
                err = atomisp_xnr(asd, 0, arg);
                break;
index 45073e401bac486ca7932f91d123b811843d4a3d..471912dea5cd84369108f037e2c3854f937bdf64 100644 (file)
@@ -663,52 +663,6 @@ static const struct media_entity_operations isp_subdev_media_ops = {
        /*       .set_power = v4l2_subdev_set_power,    */
 };
 
-static int __atomisp_update_run_mode(struct atomisp_sub_device *asd)
-{
-       struct atomisp_device *isp = asd->isp;
-       struct v4l2_ctrl *ctrl = asd->run_mode;
-       struct v4l2_ctrl *c;
-       s32 mode;
-
-       mode = ctrl->val;
-
-       c = v4l2_ctrl_find(
-               isp->inputs[asd->input_curr].camera->ctrl_handler,
-               V4L2_CID_RUN_MODE);
-
-       if (c)
-               return v4l2_ctrl_s_ctrl(c, mode);
-
-       return 0;
-}
-
-int atomisp_update_run_mode(struct atomisp_sub_device *asd)
-{
-       int rval;
-
-       mutex_lock(asd->ctrl_handler.lock);
-       rval = __atomisp_update_run_mode(asd);
-       mutex_unlock(asd->ctrl_handler.lock);
-
-       return rval;
-}
-
-static int s_ctrl(struct v4l2_ctrl *ctrl)
-{
-       struct atomisp_sub_device *asd = container_of(
-                                            ctrl->handler, struct atomisp_sub_device, ctrl_handler);
-       switch (ctrl->id) {
-       case V4L2_CID_RUN_MODE:
-               return __atomisp_update_run_mode(asd);
-       }
-
-       return 0;
-}
-
-static const struct v4l2_ctrl_ops ctrl_ops = {
-       .s_ctrl = &s_ctrl,
-};
-
 static const char *const ctrl_run_mode_menu[] = {
        [ATOMISP_RUN_MODE_VIDEO]                = "Video",
        [ATOMISP_RUN_MODE_STILL_CAPTURE]        = "Still capture",
@@ -716,7 +670,6 @@ static const char *const ctrl_run_mode_menu[] = {
 };
 
 static const struct v4l2_ctrl_config ctrl_run_mode = {
-       .ops = &ctrl_ops,
        .id = V4L2_CID_RUN_MODE,
        .name = "Atomisp run mode",
        .type = V4L2_CTRL_TYPE_MENU,
@@ -754,7 +707,6 @@ static const struct v4l2_ctrl_config ctrl_vfpp = {
  * the CSS subsystem.
  */
 static const struct v4l2_ctrl_config ctrl_continuous_raw_buffer_size = {
-       .ops = &ctrl_ops,
        .id = V4L2_CID_ATOMISP_CONTINUOUS_RAW_BUFFER_SIZE,
        .type = V4L2_CTRL_TYPE_INTEGER,
        .name = "Continuous raw ringbuffer size",
index 9a04511b9efd1afb74fac4c38b23801a93be1c1f..9c1703bf439cf188aa91b718a747fc7c115f54ff 100644 (file)
@@ -360,8 +360,6 @@ void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd,
                             uint32_t which,
                             u32 pad, struct v4l2_mbus_framefmt *ffmt);
 
-int atomisp_update_run_mode(struct atomisp_sub_device *asd);
-
 void atomisp_subdev_cleanup_pending_events(struct atomisp_sub_device *asd);
 
 void atomisp_subdev_unregister_entities(struct atomisp_sub_device *asd);
index 0d0329f5e4ad1384dc5c4e133bfea0caa5b26394..c1c8501ec61f57046af5027c8f9a4170597210d5 100644 (file)
@@ -1206,25 +1206,6 @@ static bool is_valid_device(struct pci_dev *pdev, const struct pci_device_id *id
                return false;
        }
 
-       /*
-        * FIXME:
-        * remove the if once the driver become generic
-        */
-
-#ifndef ISP2401
-       if (IS_ISP2401) {
-               dev_err(&pdev->dev, "Support for %s (ISP2401) was disabled at compile time\n",
-                       name);
-               return false;
-       }
-#else
-       if (!IS_ISP2401) {
-               dev_err(&pdev->dev, "Support for %s (ISP2400) was disabled at compile time\n",
-                       name);
-               return false;
-       }
-#endif
-
        dev_info(&pdev->dev, "Detected %s version %d (ISP240%c) on %s\n",
                 name, pdev->revision, IS_ISP2401 ? '1' : '0', product);
 
index a7698719029242bf3ddf7318571647e795b04771..0f585a7e0fa405f3c904297bbf4a74dfb9c14f25 100644 (file)
@@ -18,8 +18,6 @@
 
 #include <type_support.h>
 
-#if defined(ISP2401)
-
 typedef struct isys_irqc_state_s isys_irqc_state_t;
 
 struct isys_irqc_state_s {
@@ -31,6 +29,5 @@ struct isys_irqc_state_s {
        /*hrt_data clear;       */      /* write-only register */
 };
 
-#endif /* defined(ISP2401) */
 
 #endif /* __ISYS_IRQ_LOCAL_H__ */
index fb168c25bdfc3f2d0e2d39ed85f91374ccfd1176..d94c8e6add72b9dfe1615eeed58924fab88d988e 100644 (file)
@@ -19,7 +19,6 @@
 #include "isys_irq_global.h"
 #include "isys_irq_local.h"
 
-#if defined(ISP2401)
 
 /* -------------------------------------------------------+
  |             Native command interface (NCI)             |
@@ -102,6 +101,5 @@ hrt_data isys_irqc_reg_load(
 
 /* end of DLI */
 
-#endif /* defined(ISP2401) */
 
 #endif /* __ISYS_IRQ_PRIVATE_H__ */
index a81e4d13ac9f5992c2dd07c4b01430c64b2335c0..16336ed7303636a9262ed6013093ba7ce142313f 100644 (file)
@@ -16,7 +16,6 @@
 #ifndef __ISYS_IRQ_GLOBAL_H__
 #define __ISYS_IRQ_GLOBAL_H__
 
-#if defined(ISP2401)
 
 /* Register offset/index from base location */
 #define ISYS_IRQ_EDGE_REG_IDX          (0)
@@ -31,6 +30,5 @@
 #define ISYS_IRQ_CLEAR_REG_VALUE       (0xFFFF)
 #define ISYS_IRQ_ENABLE_REG_VALUE      (0xFFFF)
 
-#endif /* defined(ISP2401) */
 
 #endif /* __ISYS_IRQ_GLOBAL_H__ */
index b6538beca18ad82967ca40363d21c8b7f6a698b0..f2e17945fd4539633d18517e2bde116f49003026 100644 (file)
 
 #define DEBUG_BUFFER_ISP_DMEM_ADDR       0x0
 
-/*
- * Enable HAS_WATCHDOG_SP_THREAD_DEBUG for additional SP thread and
- * pipe information on watchdog output
- * #undef HAS_WATCHDOG_SP_THREAD_DEBUG
- * #define HAS_WATCHDOG_SP_THREAD_DEBUG
- */
-
 /*
  * The linear buffer mode will accept data until the first
  * overflow and then stop accepting new data
index f85950c471c799a5201f176fdfa634eecc2fef02..0b6647b2eb76418e18804ffcc6eb8c2ee48de47f 100644 (file)
 #include "dma_private.h"
 #endif /* __INLINE_DMA__ */
 
-void dma_get_state(const dma_ID_t ID, dma_state_t *state)
-{
-       int                     i;
-       hrt_data        tmp;
-
-       assert(ID < N_DMA_ID);
-       assert(state);
-
-       tmp = dma_reg_load(ID, DMA_COMMAND_FSM_REG_IDX);
-       //reg  [3:0] : flags error [3], stall, run, idle [0]
-       //reg  [9:4] : command
-       //reg[14:10] : channel
-       //reg [23:15] : param
-       state->fsm_command_idle = tmp & 0x1;
-       state->fsm_command_run = tmp & 0x2;
-       state->fsm_command_stalling = tmp & 0x4;
-       state->fsm_command_error    = tmp & 0x8;
-       state->last_command_channel = (tmp >> 10 & 0x1F);
-       state->last_command_param =  (tmp >> 15 & 0x0F);
-       tmp = (tmp >> 4) & 0x3F;
-       /* state->last_command = (dma_commands_t)tmp; */
-       /* if the enumerator is made non-linear */
-       /* AM: the list below does not cover all the cases*/
-       /*  and these are not correct */
-       /* therefore for just dumpinmg this command*/
-       state->last_command = tmp;
-
-       /*
-               if (tmp == 0)
-                       state->last_command = DMA_COMMAND_READ;
-               if (tmp == 1)
-                       state->last_command = DMA_COMMAND_WRITE;
-               if (tmp == 2)
-                       state->last_command = DMA_COMMAND_SET_CHANNEL;
-               if (tmp == 3)
-                       state->last_command = DMA_COMMAND_SET_PARAM;
-               if (tmp == 4)
-                       state->last_command = DMA_COMMAND_READ_SPECIFIC;
-               if (tmp == 5)
-                       state->last_command = DMA_COMMAND_WRITE_SPECIFIC;
-               if (tmp == 8)
-                       state->last_command = DMA_COMMAND_INIT;
-               if (tmp == 12)
-                       state->last_command = DMA_COMMAND_INIT_SPECIFIC;
-               if (tmp == 15)
-                       state->last_command = DMA_COMMAND_RST;
-       */
-
-       /* No sub-fields, idx = 0 */
-       state->current_command = dma_reg_load(ID,
-                                             DMA_CG_INFO_REG_IDX(0, _DMA_FSM_GROUP_CMD_IDX));
-       state->current_addr_a = dma_reg_load(ID,
-                                            DMA_CG_INFO_REG_IDX(0, _DMA_FSM_GROUP_ADDR_A_IDX));
-       state->current_addr_b = dma_reg_load(ID,
-                                            DMA_CG_INFO_REG_IDX(0, _DMA_FSM_GROUP_ADDR_B_IDX));
-
-       tmp =  dma_reg_load(ID,
-                           DMA_CG_INFO_REG_IDX(
-                               _DMA_FSM_GROUP_FSM_CTRL_STATE_IDX,
-                               _DMA_FSM_GROUP_FSM_CTRL_IDX));
-       state->fsm_ctrl_idle = tmp & 0x1;
-       state->fsm_ctrl_run = tmp & 0x2;
-       state->fsm_ctrl_stalling = tmp & 0x4;
-       state->fsm_ctrl_error = tmp & 0x8;
-       tmp = tmp >> 4;
-       /* state->fsm_ctrl_state = (dma_ctrl_states_t)tmp; */
-       if (tmp == 0)
-               state->fsm_ctrl_state = DMA_CTRL_STATE_IDLE;
-       if (tmp == 1)
-               state->fsm_ctrl_state = DMA_CTRL_STATE_REQ_RCV;
-       if (tmp == 2)
-               state->fsm_ctrl_state = DMA_CTRL_STATE_RCV;
-       if (tmp == 3)
-               state->fsm_ctrl_state = DMA_CTRL_STATE_RCV_REQ;
-       if (tmp == 4)
-               state->fsm_ctrl_state = DMA_CTRL_STATE_INIT;
-       state->fsm_ctrl_source_dev = dma_reg_load(ID,
-                                    DMA_CG_INFO_REG_IDX(
-                                        _DMA_FSM_GROUP_FSM_CTRL_REQ_DEV_IDX,
-                                        _DMA_FSM_GROUP_FSM_CTRL_IDX));
-       state->fsm_ctrl_source_addr = dma_reg_load(ID,
-                                     DMA_CG_INFO_REG_IDX(
-                                         _DMA_FSM_GROUP_FSM_CTRL_REQ_ADDR_IDX,
-                                         _DMA_FSM_GROUP_FSM_CTRL_IDX));
-       state->fsm_ctrl_source_stride = dma_reg_load(ID,
-                                       DMA_CG_INFO_REG_IDX(
-                                           _DMA_FSM_GROUP_FSM_CTRL_REQ_STRIDE_IDX,
-                                           _DMA_FSM_GROUP_FSM_CTRL_IDX));
-       state->fsm_ctrl_source_width = dma_reg_load(ID,
-                                      DMA_CG_INFO_REG_IDX(
-                                          _DMA_FSM_GROUP_FSM_CTRL_REQ_XB_IDX,
-                                          _DMA_FSM_GROUP_FSM_CTRL_IDX));
-       state->fsm_ctrl_source_height = dma_reg_load(ID,
-                                       DMA_CG_INFO_REG_IDX(
-                                           _DMA_FSM_GROUP_FSM_CTRL_REQ_YB_IDX,
-                                           _DMA_FSM_GROUP_FSM_CTRL_IDX));
-       state->fsm_ctrl_pack_source_dev = dma_reg_load(ID,
-                                         DMA_CG_INFO_REG_IDX(
-                                             _DMA_FSM_GROUP_FSM_CTRL_PACK_REQ_DEV_IDX,
-                                             _DMA_FSM_GROUP_FSM_CTRL_IDX));
-       state->fsm_ctrl_pack_dest_dev = dma_reg_load(ID,
-                                       DMA_CG_INFO_REG_IDX(
-                                           _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_DEV_IDX,
-                                           _DMA_FSM_GROUP_FSM_CTRL_IDX));
-       state->fsm_ctrl_dest_addr = dma_reg_load(ID,
-                                   DMA_CG_INFO_REG_IDX(
-                                       _DMA_FSM_GROUP_FSM_CTRL_WR_ADDR_IDX,
-                                       _DMA_FSM_GROUP_FSM_CTRL_IDX));
-       state->fsm_ctrl_dest_stride = dma_reg_load(ID,
-                                     DMA_CG_INFO_REG_IDX(
-                                         _DMA_FSM_GROUP_FSM_CTRL_WR_STRIDE_IDX,
-                                         _DMA_FSM_GROUP_FSM_CTRL_IDX));
-       state->fsm_ctrl_pack_source_width = dma_reg_load(ID,
-                                           DMA_CG_INFO_REG_IDX(
-                                               _DMA_FSM_GROUP_FSM_CTRL_PACK_REQ_XB_IDX,
-                                               _DMA_FSM_GROUP_FSM_CTRL_IDX));
-       state->fsm_ctrl_pack_dest_height = dma_reg_load(ID,
-                                          DMA_CG_INFO_REG_IDX(
-                                              _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_YB_IDX,
-                                              _DMA_FSM_GROUP_FSM_CTRL_IDX));
-       state->fsm_ctrl_pack_dest_width = dma_reg_load(ID,
-                                         DMA_CG_INFO_REG_IDX(
-                                             _DMA_FSM_GROUP_FSM_CTRL_PACK_WR_XB_IDX,
-                                             _DMA_FSM_GROUP_FSM_CTRL_IDX));
-       state->fsm_ctrl_pack_source_elems = dma_reg_load(ID,
-                                           DMA_CG_INFO_REG_IDX(
-                                               _DMA_FSM_GROUP_FSM_CTRL_PACK_ELEM_REQ_IDX,
-                                               _DMA_FSM_GROUP_FSM_CTRL_IDX));
-       state->fsm_ctrl_pack_dest_elems = dma_reg_load(ID,
-                                         DMA_CG_INFO_REG_IDX(
-                                             _DMA_FSM_GROUP_FSM_CTRL_PACK_ELEM_WR_IDX,
-                                             _DMA_FSM_GROUP_FSM_CTRL_IDX));
-       state->fsm_ctrl_pack_extension = dma_reg_load(ID,
-                                        DMA_CG_INFO_REG_IDX(
-                                            _DMA_FSM_GROUP_FSM_CTRL_PACK_S_Z_IDX,
-                                            _DMA_FSM_GROUP_FSM_CTRL_IDX));
-
-       tmp = dma_reg_load(ID,
-                          DMA_CG_INFO_REG_IDX(
-                              _DMA_FSM_GROUP_FSM_PACK_STATE_IDX,
-                              _DMA_FSM_GROUP_FSM_PACK_IDX));
-       state->pack_idle     = tmp & 0x1;
-       state->pack_run      = tmp & 0x2;
-       state->pack_stalling = tmp & 0x4;
-       state->pack_error    = tmp & 0x8;
-       state->pack_cnt_height = dma_reg_load(ID,
-                                             DMA_CG_INFO_REG_IDX(
-                                                     _DMA_FSM_GROUP_FSM_PACK_CNT_YB_IDX,
-                                                     _DMA_FSM_GROUP_FSM_PACK_IDX));
-       state->pack_src_cnt_width = dma_reg_load(ID,
-                                   DMA_CG_INFO_REG_IDX(
-                                       _DMA_FSM_GROUP_FSM_PACK_CNT_XB_REQ_IDX,
-                                       _DMA_FSM_GROUP_FSM_PACK_IDX));
-       state->pack_dest_cnt_width = dma_reg_load(ID,
-                                    DMA_CG_INFO_REG_IDX(
-                                        _DMA_FSM_GROUP_FSM_PACK_CNT_XB_WR_IDX,
-                                        _DMA_FSM_GROUP_FSM_PACK_IDX));
-
-       tmp = dma_reg_load(ID,
-                          DMA_CG_INFO_REG_IDX(
-                              _DMA_FSM_GROUP_FSM_REQ_STATE_IDX,
-                              _DMA_FSM_GROUP_FSM_REQ_IDX));
-       /* state->read_state = (dma_rw_states_t)tmp; */
-       if (tmp == 0)
-               state->read_state = DMA_RW_STATE_IDLE;
-       if (tmp == 1)
-               state->read_state = DMA_RW_STATE_REQ;
-       if (tmp == 2)
-               state->read_state = DMA_RW_STATE_NEXT_LINE;
-       if (tmp == 3)
-               state->read_state = DMA_RW_STATE_UNLOCK_CHANNEL;
-       state->read_cnt_height = dma_reg_load(ID,
-                                             DMA_CG_INFO_REG_IDX(
-                                                     _DMA_FSM_GROUP_FSM_REQ_CNT_YB_IDX,
-                                                     _DMA_FSM_GROUP_FSM_REQ_IDX));
-       state->read_cnt_width = dma_reg_load(ID,
-                                            DMA_CG_INFO_REG_IDX(
-                                                    _DMA_FSM_GROUP_FSM_REQ_CNT_XB_IDX,
-                                                    _DMA_FSM_GROUP_FSM_REQ_IDX));
-
-       tmp = dma_reg_load(ID,
-                          DMA_CG_INFO_REG_IDX(
-                              _DMA_FSM_GROUP_FSM_WR_STATE_IDX,
-                              _DMA_FSM_GROUP_FSM_WR_IDX));
-       /* state->write_state = (dma_rw_states_t)tmp; */
-       if (tmp == 0)
-               state->write_state = DMA_RW_STATE_IDLE;
-       if (tmp == 1)
-               state->write_state = DMA_RW_STATE_REQ;
-       if (tmp == 2)
-               state->write_state = DMA_RW_STATE_NEXT_LINE;
-       if (tmp == 3)
-               state->write_state = DMA_RW_STATE_UNLOCK_CHANNEL;
-       state->write_height = dma_reg_load(ID,
-                                          DMA_CG_INFO_REG_IDX(
-                                              _DMA_FSM_GROUP_FSM_WR_CNT_YB_IDX,
-                                              _DMA_FSM_GROUP_FSM_WR_IDX));
-       state->write_width = dma_reg_load(ID,
-                                         DMA_CG_INFO_REG_IDX(
-                                             _DMA_FSM_GROUP_FSM_WR_CNT_XB_IDX,
-                                             _DMA_FSM_GROUP_FSM_WR_IDX));
-
-       for (i = 0; i < HIVE_ISP_NUM_DMA_CONNS; i++) {
-               dma_port_state_t *port = &state->port_states[i];
-
-               tmp = dma_reg_load(ID, DMA_DEV_INFO_REG_IDX(0, i));
-               port->req_cs   = ((tmp & 0x1) != 0);
-               port->req_we_n = ((tmp & 0x2) != 0);
-               port->req_run  = ((tmp & 0x4) != 0);
-               port->req_ack  = ((tmp & 0x8) != 0);
-
-               tmp = dma_reg_load(ID, DMA_DEV_INFO_REG_IDX(1, i));
-               port->send_cs   = ((tmp & 0x1) != 0);
-               port->send_we_n = ((tmp & 0x2) != 0);
-               port->send_run  = ((tmp & 0x4) != 0);
-               port->send_ack  = ((tmp & 0x8) != 0);
-
-               tmp = dma_reg_load(ID, DMA_DEV_INFO_REG_IDX(2, i));
-               if (tmp & 0x1)
-                       port->fifo_state = DMA_FIFO_STATE_WILL_BE_FULL;
-               if (tmp & 0x2)
-                       port->fifo_state = DMA_FIFO_STATE_FULL;
-               if (tmp & 0x4)
-                       port->fifo_state = DMA_FIFO_STATE_EMPTY;
-               port->fifo_counter = tmp >> 3;
-       }
-
-       for (i = 0; i < HIVE_DMA_NUM_CHANNELS; i++) {
-               dma_channel_state_t *ch = &state->channel_states[i];
-
-               ch->connection = DMA_GET_CONNECTION(dma_reg_load(ID,
-                                                   DMA_CHANNEL_PARAM_REG_IDX(i,
-                                                           _DMA_PACKING_SETUP_PARAM)));
-               ch->sign_extend = DMA_GET_EXTENSION(dma_reg_load(ID,
-                                                   DMA_CHANNEL_PARAM_REG_IDX(i,
-                                                           _DMA_PACKING_SETUP_PARAM)));
-               ch->height = dma_reg_load(ID,
-                                         DMA_CHANNEL_PARAM_REG_IDX(i,
-                                                 _DMA_HEIGHT_PARAM));
-               ch->stride_a = dma_reg_load(ID,
-                                           DMA_CHANNEL_PARAM_REG_IDX(i,
-                                                   _DMA_STRIDE_A_PARAM));
-               ch->elems_a = DMA_GET_ELEMENTS(dma_reg_load(ID,
-                                              DMA_CHANNEL_PARAM_REG_IDX(i,
-                                                      _DMA_ELEM_CROPPING_A_PARAM)));
-               ch->cropping_a = DMA_GET_CROPPING(dma_reg_load(ID,
-                                                 DMA_CHANNEL_PARAM_REG_IDX(i,
-                                                         _DMA_ELEM_CROPPING_A_PARAM)));
-               ch->width_a = dma_reg_load(ID,
-                                          DMA_CHANNEL_PARAM_REG_IDX(i,
-                                                  _DMA_WIDTH_A_PARAM));
-               ch->stride_b = dma_reg_load(ID,
-                                           DMA_CHANNEL_PARAM_REG_IDX(i,
-                                                   _DMA_STRIDE_B_PARAM));
-               ch->elems_b = DMA_GET_ELEMENTS(dma_reg_load(ID,
-                                              DMA_CHANNEL_PARAM_REG_IDX(i,
-                                                      _DMA_ELEM_CROPPING_B_PARAM)));
-               ch->cropping_b = DMA_GET_CROPPING(dma_reg_load(ID,
-                                                 DMA_CHANNEL_PARAM_REG_IDX(i,
-                                                         _DMA_ELEM_CROPPING_B_PARAM)));
-               ch->width_b = dma_reg_load(ID,
-                                          DMA_CHANNEL_PARAM_REG_IDX(i,
-                                                  _DMA_WIDTH_B_PARAM));
-       }
-}
-
 void
 dma_set_max_burst_size(const dma_ID_t ID, dma_connection conn,
                       uint32_t max_burst_size)
index 7e4cc75733cd72c76731caeed892b79eef864a52..48a1ace79897ca83f0322c69dcfa26ab89ea181c 100644 (file)
 #define DMA_GET_ELEMENTS(val)      _hrt_get_bits(val, _DMA_V2_ELEMENTS_IDX,      _DMA_V2_ELEMENTS_BITS)
 #define DMA_GET_CROPPING(val)      _hrt_get_bits(val, _DMA_V2_LEFT_CROPPING_IDX, _DMA_V2_LEFT_CROPPING_BITS)
 
-typedef enum {
-       DMA_CTRL_STATE_IDLE,
-       DMA_CTRL_STATE_REQ_RCV,
-       DMA_CTRL_STATE_RCV,
-       DMA_CTRL_STATE_RCV_REQ,
-       DMA_CTRL_STATE_INIT,
-       N_DMA_CTRL_STATES
-} dma_ctrl_states_t;
-
-typedef enum {
-       DMA_COMMAND_READ,
-       DMA_COMMAND_WRITE,
-       DMA_COMMAND_SET_CHANNEL,
-       DMA_COMMAND_SET_PARAM,
-       DMA_COMMAND_READ_SPECIFIC,
-       DMA_COMMAND_WRITE_SPECIFIC,
-       DMA_COMMAND_INIT,
-       DMA_COMMAND_INIT_SPECIFIC,
-       DMA_COMMAND_RST,
-       N_DMA_COMMANDS
-} dma_commands_t;
-
-typedef enum {
-       DMA_RW_STATE_IDLE,
-       DMA_RW_STATE_REQ,
-       DMA_RW_STATE_NEXT_LINE,
-       DMA_RW_STATE_UNLOCK_CHANNEL,
-       N_DMA_RW_STATES
-} dma_rw_states_t;
-
-typedef enum {
-       DMA_FIFO_STATE_WILL_BE_FULL,
-       DMA_FIFO_STATE_FULL,
-       DMA_FIFO_STATE_EMPTY,
-       N_DMA_FIFO_STATES
-} dma_fifo_states_t;
-
-/* typedef struct dma_state_s                  dma_state_t; */
-typedef struct dma_channel_state_s     dma_channel_state_t;
-typedef struct dma_port_state_s                dma_port_state_t;
-
-struct dma_port_state_s {
-       bool                       req_cs;
-       bool                       req_we_n;
-       bool                       req_run;
-       bool                       req_ack;
-       bool                       send_cs;
-       bool                       send_we_n;
-       bool                       send_run;
-       bool                       send_ack;
-       dma_fifo_states_t          fifo_state;
-       int                        fifo_counter;
-};
-
-struct dma_channel_state_s {
-       int                        connection;
-       bool                       sign_extend;
-       int                        height;
-       int                        stride_a;
-       int                        elems_a;
-       int                        cropping_a;
-       int                        width_a;
-       int                        stride_b;
-       int                        elems_b;
-       int                        cropping_b;
-       int                        width_b;
-};
-
-struct dma_state_s {
-       bool                       fsm_command_idle;
-       bool                       fsm_command_run;
-       bool                       fsm_command_stalling;
-       bool                       fsm_command_error;
-       dma_commands_t             last_command;
-       int                        last_command_channel;
-       int                        last_command_param;
-       dma_commands_t             current_command;
-       int                        current_addr_a;
-       int                        current_addr_b;
-       bool                       fsm_ctrl_idle;
-       bool                       fsm_ctrl_run;
-       bool                       fsm_ctrl_stalling;
-       bool                       fsm_ctrl_error;
-       dma_ctrl_states_t          fsm_ctrl_state;
-       int                        fsm_ctrl_source_dev;
-       int                        fsm_ctrl_source_addr;
-       int                        fsm_ctrl_source_stride;
-       int                        fsm_ctrl_source_width;
-       int                        fsm_ctrl_source_height;
-       int                        fsm_ctrl_pack_source_dev;
-       int                        fsm_ctrl_pack_dest_dev;
-       int                        fsm_ctrl_dest_addr;
-       int                        fsm_ctrl_dest_stride;
-       int                        fsm_ctrl_pack_source_width;
-       int                        fsm_ctrl_pack_dest_height;
-       int                        fsm_ctrl_pack_dest_width;
-       int                        fsm_ctrl_pack_source_elems;
-       int                        fsm_ctrl_pack_dest_elems;
-       int                        fsm_ctrl_pack_extension;
-       int                                                pack_idle;
-       int                            pack_run;
-       int                                pack_stalling;
-       int                                pack_error;
-       int                        pack_cnt_height;
-       int                        pack_src_cnt_width;
-       int                        pack_dest_cnt_width;
-       dma_rw_states_t            read_state;
-       int                        read_cnt_height;
-       int                        read_cnt_width;
-       dma_rw_states_t            write_state;
-       int                        write_height;
-       int                        write_width;
-       dma_port_state_t           port_states[HIVE_ISP_NUM_DMA_CONNS];
-       dma_channel_state_t        channel_states[HIVE_DMA_NUM_CHANNELS];
-};
-
 #endif /* __DMA_LOCAL_H_INCLUDED__ */
index 5cd6136f21a2155c63f769b25889aa028b4c25fb..e01f30f137a00f58c856b37d4070a987d3547d68 100644 (file)
@@ -15,7 +15,6 @@
 
 #include "system_global.h"
 
-#ifndef ISP2401
 
 #include "input_formatter.h"
 #include <type_support.h>
@@ -243,4 +242,3 @@ void input_formatter_bin_get_state(
                                  HIVE_STR2MEM_EN_STAT_UPDATE_ADDRESS);
        return;
 }
-#endif
index 712e01c37870c2d2b76530be1c928cedac290ddc..ca1ce66890349abea240e0b081dc71df1ff222ac 100644 (file)
@@ -15,7 +15,6 @@
 
 #include "system_global.h"
 
-#ifndef ISP2401
 
 #include "input_system.h"
 #include <type_support.h>
@@ -80,31 +79,6 @@ static input_system_err_t input_system_multiplexer_cfg(
     const input_system_multiplex_t                     rhs,
     input_system_config_flags_t *const flags);
 
-static inline void capture_unit_get_state(
-    const input_system_ID_t                    ID,
-    const sub_system_ID_t                      sub_id,
-    capture_unit_state_t                       *state);
-
-static inline void acquisition_unit_get_state(
-    const input_system_ID_t                    ID,
-    const sub_system_ID_t                      sub_id,
-    acquisition_unit_state_t           *state);
-
-static inline void ctrl_unit_get_state(
-    const input_system_ID_t                    ID,
-    const sub_system_ID_t                      sub_id,
-    ctrl_unit_state_t                          *state);
-
-static inline void mipi_port_get_state(
-    const rx_ID_t                                      ID,
-    const enum mipi_port_id                    port_ID,
-    mipi_port_state_t                          *state);
-
-static inline void rx_channel_get_state(
-    const rx_ID_t                                      ID,
-    const unsigned int                         ch_id,
-    rx_channel_state_t                         *state);
-
 static void gp_device_rst(const gp_device_ID_t         ID);
 
 static void input_selector_cfg_for_sensor(const gp_device_ID_t ID);
@@ -116,149 +90,6 @@ static void input_switch_cfg(
     const input_switch_cfg_t *const cfg
 );
 
-void input_system_get_state(
-    const input_system_ID_t                    ID,
-    input_system_state_t                       *state)
-{
-       sub_system_ID_t sub_id;
-
-       assert(ID < N_INPUT_SYSTEM_ID);
-       assert(state);
-
-       state->str_multicastA_sel = input_system_sub_system_reg_load(ID,
-                                   GPREGS_UNIT0_ID,
-                                   HIVE_ISYS_GPREG_MULTICAST_A_IDX);
-       state->str_multicastB_sel = input_system_sub_system_reg_load(ID,
-                                   GPREGS_UNIT0_ID,
-                                   HIVE_ISYS_GPREG_MULTICAST_B_IDX);
-       state->str_multicastC_sel = input_system_sub_system_reg_load(ID,
-                                   GPREGS_UNIT0_ID,
-                                   HIVE_ISYS_GPREG_MULTICAST_C_IDX);
-       state->str_mux_sel = input_system_sub_system_reg_load(ID,
-                            GPREGS_UNIT0_ID,
-                            HIVE_ISYS_GPREG_MUX_IDX);
-       state->str_mon_status = input_system_sub_system_reg_load(ID,
-                               GPREGS_UNIT0_ID,
-                               HIVE_ISYS_GPREG_STRMON_STAT_IDX);
-       state->str_mon_irq_cond = input_system_sub_system_reg_load(ID,
-                                 GPREGS_UNIT0_ID,
-                                 HIVE_ISYS_GPREG_STRMON_COND_IDX);
-       state->str_mon_irq_en = input_system_sub_system_reg_load(ID,
-                               GPREGS_UNIT0_ID,
-                               HIVE_ISYS_GPREG_STRMON_IRQ_EN_IDX);
-       state->isys_srst = input_system_sub_system_reg_load(ID,
-                          GPREGS_UNIT0_ID,
-                          HIVE_ISYS_GPREG_SRST_IDX);
-       state->isys_slv_reg_srst = input_system_sub_system_reg_load(ID,
-                                  GPREGS_UNIT0_ID,
-                                  HIVE_ISYS_GPREG_SLV_REG_SRST_IDX);
-       state->str_deint_portA_cnt = input_system_sub_system_reg_load(ID,
-                                    GPREGS_UNIT0_ID,
-                                    HIVE_ISYS_GPREG_REG_PORT_A_IDX);
-       state->str_deint_portB_cnt = input_system_sub_system_reg_load(ID,
-                                    GPREGS_UNIT0_ID,
-                                    HIVE_ISYS_GPREG_REG_PORT_B_IDX);
-
-       for (sub_id = CAPTURE_UNIT0_ID; sub_id < CAPTURE_UNIT0_ID + N_CAPTURE_UNIT_ID;
-            sub_id++) {
-               capture_unit_get_state(ID, sub_id,
-                                      &state->capture_unit[sub_id - CAPTURE_UNIT0_ID]);
-       }
-       for (sub_id = ACQUISITION_UNIT0_ID;
-            sub_id < ACQUISITION_UNIT0_ID + N_ACQUISITION_UNIT_ID; sub_id++) {
-               acquisition_unit_get_state(ID, sub_id,
-                                          &state->acquisition_unit[sub_id - ACQUISITION_UNIT0_ID]);
-       }
-       for (sub_id = CTRL_UNIT0_ID; sub_id < CTRL_UNIT0_ID + N_CTRL_UNIT_ID;
-            sub_id++) {
-               ctrl_unit_get_state(ID, sub_id,
-                                   &state->ctrl_unit_state[sub_id - CTRL_UNIT0_ID]);
-       }
-}
-
-void receiver_get_state(
-    const rx_ID_t                              ID,
-    receiver_state_t                   *state)
-{
-       enum mipi_port_id       port_id;
-       unsigned int    ch_id;
-
-       assert(ID < N_RX_ID);
-       assert(state);
-
-       state->fs_to_ls_delay = (uint8_t)receiver_reg_load(ID,
-                               _HRT_CSS_RECEIVER_FS_TO_LS_DELAY_REG_IDX);
-       state->ls_to_data_delay = (uint8_t)receiver_reg_load(ID,
-                                 _HRT_CSS_RECEIVER_LS_TO_DATA_DELAY_REG_IDX);
-       state->data_to_le_delay = (uint8_t)receiver_reg_load(ID,
-                                 _HRT_CSS_RECEIVER_DATA_TO_LE_DELAY_REG_IDX);
-       state->le_to_fe_delay = (uint8_t)receiver_reg_load(ID,
-                               _HRT_CSS_RECEIVER_LE_TO_FE_DELAY_REG_IDX);
-       state->fe_to_fs_delay = (uint8_t)receiver_reg_load(ID,
-                               _HRT_CSS_RECEIVER_FE_TO_FS_DELAY_REG_IDX);
-       state->le_to_fs_delay = (uint8_t)receiver_reg_load(ID,
-                               _HRT_CSS_RECEIVER_LE_TO_LS_DELAY_REG_IDX);
-       state->is_two_ppc = (bool)receiver_reg_load(ID,
-                           _HRT_CSS_RECEIVER_TWO_PIXEL_EN_REG_IDX);
-       state->backend_rst = receiver_reg_load(ID,
-                                              _HRT_CSS_RECEIVER_BACKEND_RST_REG_IDX);
-       state->raw18 = (uint16_t)receiver_reg_load(ID,
-                      _HRT_CSS_RECEIVER_RAW18_REG_IDX);
-       state->force_raw8 = (bool)receiver_reg_load(ID,
-                           _HRT_CSS_RECEIVER_FORCE_RAW8_REG_IDX);
-       state->raw16 = (uint16_t)receiver_reg_load(ID,
-                      _HRT_CSS_RECEIVER_RAW16_REG_IDX);
-
-       for (port_id = (enum mipi_port_id)0; port_id < N_MIPI_PORT_ID; port_id++) {
-               mipi_port_get_state(ID, port_id,
-                                   &state->mipi_port_state[port_id]);
-       }
-       for (ch_id = 0U; ch_id < N_RX_CHANNEL_ID; ch_id++) {
-               rx_channel_get_state(ID, ch_id,
-                                    &state->rx_channel_state[ch_id]);
-       }
-
-       state->be_gsp_acc_ovl = receiver_reg_load(ID,
-                               _HRT_CSS_RECEIVER_BE_GSP_ACC_OVL_REG_IDX);
-       state->be_srst = receiver_reg_load(ID,
-                                          _HRT_CSS_RECEIVER_BE_SRST_REG_IDX);
-       state->be_is_two_ppc = receiver_reg_load(ID,
-                              _HRT_CSS_RECEIVER_BE_TWO_PPC_REG_IDX);
-       state->be_comp_format0 = receiver_reg_load(ID,
-                                _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG0_IDX);
-       state->be_comp_format1 = receiver_reg_load(ID,
-                                _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG1_IDX);
-       state->be_comp_format2 = receiver_reg_load(ID,
-                                _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG2_IDX);
-       state->be_comp_format3 = receiver_reg_load(ID,
-                                _HRT_CSS_RECEIVER_BE_COMP_FORMAT_REG3_IDX);
-       state->be_sel = receiver_reg_load(ID,
-                                         _HRT_CSS_RECEIVER_BE_SEL_REG_IDX);
-       state->be_raw16_config = receiver_reg_load(ID,
-                                _HRT_CSS_RECEIVER_BE_RAW16_CONFIG_REG_IDX);
-       state->be_raw18_config = receiver_reg_load(ID,
-                                _HRT_CSS_RECEIVER_BE_RAW18_CONFIG_REG_IDX);
-       state->be_force_raw8 = receiver_reg_load(ID,
-                              _HRT_CSS_RECEIVER_BE_FORCE_RAW8_REG_IDX);
-       state->be_irq_status = receiver_reg_load(ID,
-                              _HRT_CSS_RECEIVER_BE_IRQ_STATUS_REG_IDX);
-       state->be_irq_clear = receiver_reg_load(ID,
-                                               _HRT_CSS_RECEIVER_BE_IRQ_CLEAR_REG_IDX);
-}
-
-bool is_mipi_format_yuv420(
-    const mipi_format_t                        mipi_format)
-{
-       bool    is_yuv420 = (
-                               (mipi_format == MIPI_FORMAT_YUV420_8) ||
-                               (mipi_format == MIPI_FORMAT_YUV420_10) ||
-                               (mipi_format == MIPI_FORMAT_YUV420_8_SHIFT) ||
-                               (mipi_format == MIPI_FORMAT_YUV420_10_SHIFT));
-       /* MIPI_FORMAT_YUV420_8_LEGACY is not YUV420 */
-
-       return is_yuv420;
-}
-
 void receiver_set_compression(
     const rx_ID_t                      ID,
     const unsigned int         cfg_ID,
@@ -361,282 +192,6 @@ void receiver_irq_clear(
                                port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX, irq_info);
 }
 
-static inline void capture_unit_get_state(
-    const input_system_ID_t                    ID,
-    const sub_system_ID_t                      sub_id,
-    capture_unit_state_t                       *state)
-{
-       assert(/*(sub_id >= CAPTURE_UNIT0_ID) &&*/ (sub_id <= CAPTURE_UNIT2_ID));
-       assert(state);
-
-       state->StartMode = input_system_sub_system_reg_load(ID,
-                          sub_id,
-                          CAPT_START_MODE_REG_ID);
-       state->Start_Addr = input_system_sub_system_reg_load(ID,
-                           sub_id,
-                           CAPT_START_ADDR_REG_ID);
-       state->Mem_Region_Size = input_system_sub_system_reg_load(ID,
-                                sub_id,
-                                CAPT_MEM_REGION_SIZE_REG_ID);
-       state->Num_Mem_Regions = input_system_sub_system_reg_load(ID,
-                                sub_id,
-                                CAPT_NUM_MEM_REGIONS_REG_ID);
-//     AM: Illegal read from following registers.
-       /*      state->Init = input_system_sub_system_reg_load(ID,
-                       sub_id,
-                       CAPT_INIT_REG_ID);
-               state->Start = input_system_sub_system_reg_load(ID,
-                       sub_id,
-                       CAPT_START_REG_ID);
-               state->Stop = input_system_sub_system_reg_load(ID,
-                       sub_id,
-                       CAPT_STOP_REG_ID);
-       */
-       state->Packet_Length = input_system_sub_system_reg_load(ID,
-                              sub_id,
-                              CAPT_PACKET_LENGTH_REG_ID);
-       state->Received_Length = input_system_sub_system_reg_load(ID,
-                                sub_id,
-                                CAPT_RECEIVED_LENGTH_REG_ID);
-       state->Received_Short_Packets = input_system_sub_system_reg_load(ID,
-                                       sub_id,
-                                       CAPT_RECEIVED_SHORT_PACKETS_REG_ID);
-       state->Received_Long_Packets = input_system_sub_system_reg_load(ID,
-                                      sub_id,
-                                      CAPT_RECEIVED_LONG_PACKETS_REG_ID);
-       state->Last_Command = input_system_sub_system_reg_load(ID,
-                             sub_id,
-                             CAPT_LAST_COMMAND_REG_ID);
-       state->Next_Command = input_system_sub_system_reg_load(ID,
-                             sub_id,
-                             CAPT_NEXT_COMMAND_REG_ID);
-       state->Last_Acknowledge = input_system_sub_system_reg_load(ID,
-                                 sub_id,
-                                 CAPT_LAST_ACKNOWLEDGE_REG_ID);
-       state->Next_Acknowledge = input_system_sub_system_reg_load(ID,
-                                 sub_id,
-                                 CAPT_NEXT_ACKNOWLEDGE_REG_ID);
-       state->FSM_State_Info = input_system_sub_system_reg_load(ID,
-                               sub_id,
-                               CAPT_FSM_STATE_INFO_REG_ID);
-}
-
-static inline void acquisition_unit_get_state(
-    const input_system_ID_t                    ID,
-    const sub_system_ID_t                      sub_id,
-    acquisition_unit_state_t           *state)
-{
-       assert(sub_id == ACQUISITION_UNIT0_ID);
-       assert(state);
-
-       state->Start_Addr = input_system_sub_system_reg_load(ID,
-                           sub_id,
-                           ACQ_START_ADDR_REG_ID);
-       state->Mem_Region_Size = input_system_sub_system_reg_load(ID,
-                                sub_id,
-                                ACQ_MEM_REGION_SIZE_REG_ID);
-       state->Num_Mem_Regions = input_system_sub_system_reg_load(ID,
-                                sub_id,
-                                ACQ_NUM_MEM_REGIONS_REG_ID);
-//     AM: Illegal read from following registers.
-       /*      state->Init = input_system_sub_system_reg_load(ID,
-                       sub_id,
-                       ACQ_INIT_REG_ID);
-       */
-       state->Received_Short_Packets = input_system_sub_system_reg_load(ID,
-                                       sub_id,
-                                       ACQ_RECEIVED_SHORT_PACKETS_REG_ID);
-       state->Received_Long_Packets = input_system_sub_system_reg_load(ID,
-                                      sub_id,
-                                      ACQ_RECEIVED_LONG_PACKETS_REG_ID);
-       state->Last_Command = input_system_sub_system_reg_load(ID,
-                             sub_id,
-                             ACQ_LAST_COMMAND_REG_ID);
-       state->Next_Command = input_system_sub_system_reg_load(ID,
-                             sub_id,
-                             ACQ_NEXT_COMMAND_REG_ID);
-       state->Last_Acknowledge = input_system_sub_system_reg_load(ID,
-                                 sub_id,
-                                 ACQ_LAST_ACKNOWLEDGE_REG_ID);
-       state->Next_Acknowledge = input_system_sub_system_reg_load(ID,
-                                 sub_id,
-                                 ACQ_NEXT_ACKNOWLEDGE_REG_ID);
-       state->FSM_State_Info = input_system_sub_system_reg_load(ID,
-                               sub_id,
-                               ACQ_FSM_STATE_INFO_REG_ID);
-       state->Int_Cntr_Info = input_system_sub_system_reg_load(ID,
-                              sub_id,
-                              ACQ_INT_CNTR_INFO_REG_ID);
-}
-
-static inline void ctrl_unit_get_state(
-    const input_system_ID_t                    ID,
-    const sub_system_ID_t                      sub_id,
-    ctrl_unit_state_t                  *state)
-{
-       assert(sub_id == CTRL_UNIT0_ID);
-       assert(state);
-
-       state->captA_start_addr = input_system_sub_system_reg_load(ID,
-                                 sub_id,
-                                 ISYS_CTRL_CAPT_START_ADDR_A_REG_ID);
-       state->captB_start_addr = input_system_sub_system_reg_load(ID,
-                                 sub_id,
-                                 ISYS_CTRL_CAPT_START_ADDR_B_REG_ID);
-       state->captC_start_addr = input_system_sub_system_reg_load(ID,
-                                 sub_id,
-                                 ISYS_CTRL_CAPT_START_ADDR_C_REG_ID);
-       state->captA_mem_region_size = input_system_sub_system_reg_load(ID,
-                                      sub_id,
-                                      ISYS_CTRL_CAPT_MEM_REGION_SIZE_A_REG_ID);
-       state->captB_mem_region_size = input_system_sub_system_reg_load(ID,
-                                      sub_id,
-                                      ISYS_CTRL_CAPT_MEM_REGION_SIZE_B_REG_ID);
-       state->captC_mem_region_size = input_system_sub_system_reg_load(ID,
-                                      sub_id,
-                                      ISYS_CTRL_CAPT_MEM_REGION_SIZE_C_REG_ID);
-       state->captA_num_mem_regions = input_system_sub_system_reg_load(ID,
-                                      sub_id,
-                                      ISYS_CTRL_CAPT_NUM_MEM_REGIONS_A_REG_ID);
-       state->captB_num_mem_regions = input_system_sub_system_reg_load(ID,
-                                      sub_id,
-                                      ISYS_CTRL_CAPT_NUM_MEM_REGIONS_B_REG_ID);
-       state->captC_num_mem_regions = input_system_sub_system_reg_load(ID,
-                                      sub_id,
-                                      ISYS_CTRL_CAPT_NUM_MEM_REGIONS_C_REG_ID);
-       state->acq_start_addr = input_system_sub_system_reg_load(ID,
-                               sub_id,
-                               ISYS_CTRL_ACQ_START_ADDR_REG_ID);
-       state->acq_mem_region_size = input_system_sub_system_reg_load(ID,
-                                    sub_id,
-                                    ISYS_CTRL_ACQ_MEM_REGION_SIZE_REG_ID);
-       state->acq_num_mem_regions = input_system_sub_system_reg_load(ID,
-                                    sub_id,
-                                    ISYS_CTRL_ACQ_NUM_MEM_REGIONS_REG_ID);
-//     AM: Illegal read from following registers.
-       /*      state->ctrl_init = input_system_sub_system_reg_load(ID,
-                       sub_id,
-                       ISYS_CTRL_INIT_REG_ID);
-       */
-       state->last_cmd = input_system_sub_system_reg_load(ID,
-                         sub_id,
-                         ISYS_CTRL_LAST_COMMAND_REG_ID);
-       state->next_cmd = input_system_sub_system_reg_load(ID,
-                         sub_id,
-                         ISYS_CTRL_NEXT_COMMAND_REG_ID);
-       state->last_ack = input_system_sub_system_reg_load(ID,
-                         sub_id,
-                         ISYS_CTRL_LAST_ACKNOWLEDGE_REG_ID);
-       state->next_ack = input_system_sub_system_reg_load(ID,
-                         sub_id,
-                         ISYS_CTRL_NEXT_ACKNOWLEDGE_REG_ID);
-       state->top_fsm_state = input_system_sub_system_reg_load(ID,
-                              sub_id,
-                              ISYS_CTRL_FSM_STATE_INFO_REG_ID);
-       state->captA_fsm_state = input_system_sub_system_reg_load(ID,
-                                sub_id,
-                                ISYS_CTRL_CAPT_A_FSM_STATE_INFO_REG_ID);
-       state->captB_fsm_state = input_system_sub_system_reg_load(ID,
-                                sub_id,
-                                ISYS_CTRL_CAPT_B_FSM_STATE_INFO_REG_ID);
-       state->captC_fsm_state = input_system_sub_system_reg_load(ID,
-                                sub_id,
-                                ISYS_CTRL_CAPT_C_FSM_STATE_INFO_REG_ID);
-       state->acq_fsm_state = input_system_sub_system_reg_load(ID,
-                              sub_id,
-                              ISYS_CTRL_ACQ_FSM_STATE_INFO_REG_ID);
-       state->capt_reserve_one_mem_region = input_system_sub_system_reg_load(ID,
-                                            sub_id,
-                                            ISYS_CTRL_CAPT_RESERVE_ONE_MEM_REGION_REG_ID);
-}
-
-static inline void mipi_port_get_state(
-    const rx_ID_t                              ID,
-    const enum mipi_port_id                    port_ID,
-    mipi_port_state_t                  *state)
-{
-       int     i;
-
-       assert(ID < N_RX_ID);
-       assert(port_ID < N_MIPI_PORT_ID);
-       assert(state);
-
-       state->device_ready = receiver_port_reg_load(ID,
-                             port_ID, _HRT_CSS_RECEIVER_DEVICE_READY_REG_IDX);
-       state->irq_status = receiver_port_reg_load(ID,
-                           port_ID, _HRT_CSS_RECEIVER_IRQ_STATUS_REG_IDX);
-       state->irq_enable = receiver_port_reg_load(ID,
-                           port_ID, _HRT_CSS_RECEIVER_IRQ_ENABLE_REG_IDX);
-       state->timeout_count = receiver_port_reg_load(ID,
-                              port_ID, _HRT_CSS_RECEIVER_TIMEOUT_COUNT_REG_IDX);
-       state->init_count = (uint16_t)receiver_port_reg_load(ID,
-                           port_ID, _HRT_CSS_RECEIVER_INIT_COUNT_REG_IDX);
-       state->raw16_18 = (uint16_t)receiver_port_reg_load(ID,
-                         port_ID, _HRT_CSS_RECEIVER_RAW16_18_DATAID_REG_IDX);
-       state->sync_count = receiver_port_reg_load(ID,
-                           port_ID, _HRT_CSS_RECEIVER_SYNC_COUNT_REG_IDX);
-       state->rx_count = receiver_port_reg_load(ID,
-                         port_ID, _HRT_CSS_RECEIVER_RX_COUNT_REG_IDX);
-
-       for (i = 0; i < MIPI_4LANE_CFG ; i++) {
-               state->lane_sync_count[i] = (uint8_t)((state->sync_count) >> (i * 8));
-               state->lane_rx_count[i] = (uint8_t)((state->rx_count) >> (i * 8));
-       }
-}
-
-static inline void rx_channel_get_state(
-    const rx_ID_t                                      ID,
-    const unsigned int                         ch_id,
-    rx_channel_state_t                         *state)
-{
-       int     i;
-
-       assert(ID < N_RX_ID);
-       assert(ch_id < N_RX_CHANNEL_ID);
-       assert(state);
-
-       switch (ch_id) {
-       case 0:
-               state->comp_scheme0 = receiver_reg_load(ID,
-                                                       _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG0_IDX);
-               state->comp_scheme1 = receiver_reg_load(ID,
-                                                       _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC0_REG1_IDX);
-               break;
-       case 1:
-               state->comp_scheme0 = receiver_reg_load(ID,
-                                                       _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG0_IDX);
-               state->comp_scheme1 = receiver_reg_load(ID,
-                                                       _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC1_REG1_IDX);
-               break;
-       case 2:
-               state->comp_scheme0 = receiver_reg_load(ID,
-                                                       _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG0_IDX);
-               state->comp_scheme1 = receiver_reg_load(ID,
-                                                       _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC2_REG1_IDX);
-               break;
-       case 3:
-               state->comp_scheme0 = receiver_reg_load(ID,
-                                                       _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG0_IDX);
-               state->comp_scheme1 = receiver_reg_load(ID,
-                                                       _HRT_CSS_RECEIVER_2400_COMP_SCHEME_VC3_REG1_IDX);
-               break;
-       }
-
-       /* See Table 7.1.17,..., 7.1.24 */
-       for (i = 0; i < 6; i++) {
-               u8      val = (uint8_t)((state->comp_scheme0) >> (i * 5)) & 0x1f;
-
-               state->comp[i] = (mipi_compressor_t)(val & 0x07);
-               state->pred[i] = (mipi_predictor_t)((val & 0x18) >> 3);
-       }
-       for (i = 6; i < N_MIPI_FORMAT_CUSTOM; i++) {
-               u8      val = (uint8_t)((state->comp_scheme0) >> ((i - 6) * 5)) & 0x1f;
-
-               state->comp[i] = (mipi_compressor_t)(val & 0x07);
-               state->pred[i] = (mipi_predictor_t)((val & 0x18) >> 3);
-       }
-}
-
 // MW: "2400" in the name is not good, but this is to avoid a naming conflict
 static input_system_cfg2400_t config;
 
@@ -1787,4 +1342,3 @@ static input_system_err_t input_system_multiplexer_cfg(
        *flags |= INPUT_SYSTEM_CFG_FLAG_SET;
        return INPUT_SYSTEM_ERR_NO_ERROR;
 }
-#endif
index 80b5fd0dc9f6e004f599be322561eb5d45fede9b..4697d8d7b915bee3a70efab0db251c2d1c9b3f7d 100644 (file)
@@ -225,25 +225,6 @@ void irq_raise(
        return;
 }
 
-void irq_controller_get_state(const irq_ID_t ID,
-                             struct irq_controller_state *state)
-{
-       assert(ID < N_IRQ_ID);
-       assert(state);
-
-       state->irq_edge = irq_reg_load(ID,
-                                      _HRT_IRQ_CONTROLLER_EDGE_REG_IDX);
-       state->irq_mask = irq_reg_load(ID,
-                                      _HRT_IRQ_CONTROLLER_MASK_REG_IDX);
-       state->irq_status = irq_reg_load(ID,
-                                        _HRT_IRQ_CONTROLLER_STATUS_REG_IDX);
-       state->irq_enable = irq_reg_load(ID,
-                                        _HRT_IRQ_CONTROLLER_ENABLE_REG_IDX);
-       state->irq_level_not_pulse = irq_reg_load(ID,
-                                    _HRT_IRQ_CONTROLLER_EDGE_NOT_PULSE_REG_IDX);
-       return;
-}
-
 bool any_virq_signal(void)
 {
        unsigned int irq_status = irq_reg_load(IRQ0_ID,
index 6a25345ae88ed34a154af379bc377333ac1efc40..8fd1bce852145093192c37f9ddbcd9641e3bdcf3 100644 (file)
@@ -115,12 +115,4 @@ struct virq_info {
        hrt_data                irq_status_reg[N_IRQ_ID];
 };
 
-struct irq_controller_state {
-       unsigned int    irq_edge;
-       unsigned int    irq_mask;
-       unsigned int    irq_status;
-       unsigned int    irq_enable;
-       unsigned int    irq_level_not_pulse;
-};
-
 #endif /* __IRQ_LOCAL_H_INCLUDED__ */
index 4ad5e2db8a89b01e6877b062a32bb653e5c1642d..b78cc324da6a71d0695b3630d7a41e9258548043 100644 (file)
@@ -39,66 +39,6 @@ void cnd_isp_irq_enable(
        return;
 }
 
-void isp_get_state(
-    const isp_ID_t             ID,
-    isp_state_t                        *state,
-    isp_stall_t                        *stall)
-{
-       hrt_data sc = isp_ctrl_load(ID, ISP_SC_REG);
-
-       assert(state);
-       assert(stall);
-
-#if defined(_hrt_sysmem_ident_address)
-       /* Patch to avoid compiler unused symbol warning in C_RUN build */
-       (void)__hrt_sysmem_ident_address;
-       (void)_hrt_sysmem_map_var;
-#endif
-
-       state->pc = isp_ctrl_load(ID, ISP_PC_REG);
-       state->status_register = sc;
-       state->is_broken = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_BROKEN_BIT);
-       state->is_idle = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_IDLE_BIT);
-       state->is_sleeping = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_SLEEPING_BIT);
-       state->is_stalling = isp_ctrl_getbit(ID, ISP_SC_REG, ISP_STALLING_BIT);
-       stall->stat_ctrl =
-           !isp_ctrl_getbit(ID, ISP_CTRL_SINK_REG, ISP_CTRL_SINK_BIT);
-       stall->pmem =
-           !isp_ctrl_getbit(ID, ISP_PMEM_SINK_REG, ISP_PMEM_SINK_BIT);
-       stall->dmem =
-           !isp_ctrl_getbit(ID, ISP_DMEM_SINK_REG, ISP_DMEM_SINK_BIT);
-       stall->vmem =
-           !isp_ctrl_getbit(ID, ISP_VMEM_SINK_REG, ISP_VMEM_SINK_BIT);
-       stall->fifo0 =
-           !isp_ctrl_getbit(ID, ISP_FIFO0_SINK_REG, ISP_FIFO0_SINK_BIT);
-       stall->fifo1 =
-           !isp_ctrl_getbit(ID, ISP_FIFO1_SINK_REG, ISP_FIFO1_SINK_BIT);
-       stall->fifo2 =
-           !isp_ctrl_getbit(ID, ISP_FIFO2_SINK_REG, ISP_FIFO2_SINK_BIT);
-       stall->fifo3 =
-           !isp_ctrl_getbit(ID, ISP_FIFO3_SINK_REG, ISP_FIFO3_SINK_BIT);
-       stall->fifo4 =
-           !isp_ctrl_getbit(ID, ISP_FIFO4_SINK_REG, ISP_FIFO4_SINK_BIT);
-       stall->fifo5 =
-           !isp_ctrl_getbit(ID, ISP_FIFO5_SINK_REG, ISP_FIFO5_SINK_BIT);
-       stall->fifo6 =
-           !isp_ctrl_getbit(ID, ISP_FIFO6_SINK_REG, ISP_FIFO6_SINK_BIT);
-       stall->vamem1 =
-           !isp_ctrl_getbit(ID, ISP_VAMEM1_SINK_REG, ISP_VAMEM1_SINK_BIT);
-       stall->vamem2 =
-           !isp_ctrl_getbit(ID, ISP_VAMEM2_SINK_REG, ISP_VAMEM2_SINK_BIT);
-       stall->vamem3 =
-           !isp_ctrl_getbit(ID, ISP_VAMEM3_SINK_REG, ISP_VAMEM3_SINK_BIT);
-       stall->hmem =
-           !isp_ctrl_getbit(ID, ISP_HMEM_SINK_REG, ISP_HMEM_SINK_BIT);
-       /*
-               stall->icache_master =
-                       !isp_ctrl_getbit(ID, ISP_ICACHE_MT_SINK_REG,
-                               ISP_ICACHE_MT_SINK_BIT);
-        */
-       return;
-}
-
 /* ISP functions to control the ISP state from the host, even in crun. */
 
 /* Inspect readiness of an ISP indexed by ID */
index 4dbec4063b3d5a65a8c2478d998a655b14f4451e..fb98696cc44d105fda62fbbf70cedd10cd490bf3 100644 (file)
 
 #define HIVE_ISP_VMEM_MASK     ((1U << ISP_VMEM_ELEMBITS) - 1)
 
-typedef struct isp_state_s             isp_state_t;
-typedef struct isp_stall_s             isp_stall_t;
-
-struct isp_state_s {
-       int     pc;
-       int     status_register;
-       bool    is_broken;
-       bool    is_idle;
-       bool    is_sleeping;
-       bool    is_stalling;
-};
-
-struct isp_stall_s {
-       bool    fifo0;
-       bool    fifo1;
-       bool    fifo2;
-       bool    fifo3;
-       bool    fifo4;
-       bool    fifo5;
-       bool    fifo6;
-       bool    stat_ctrl;
-       bool    dmem;
-       bool    vmem;
-       bool    vamem1;
-       bool    vamem2;
-       bool    vamem3;
-       bool    hmem;
-       bool    pmem;
-       bool    icache_master;
-};
-
 #endif /* __ISP_LOCAL_H_INCLUDED__ */
index aae18465b6ae20db6148e3cec69db3a613d3ec8f..3dc4d1289ea1b0366a8aa65001f51ddba422d590 100644 (file)
@@ -33,50 +33,3 @@ void cnd_sp_irq_enable(
                sp_ctrl_clearbit(ID, SP_IRQ_READY_REG, SP_IRQ_READY_BIT);
        }
 }
-
-void sp_get_state(
-    const sp_ID_t                      ID,
-    sp_state_t                         *state,
-    sp_stall_t                         *stall)
-{
-       hrt_data sc = sp_ctrl_load(ID, SP_SC_REG);
-
-       assert(state);
-       assert(stall);
-
-       state->pc = sp_ctrl_load(ID, SP_PC_REG);
-       state->status_register = sc;
-       state->is_broken   = (sc & (1U << SP_BROKEN_BIT)) != 0;
-       state->is_idle     = (sc & (1U << SP_IDLE_BIT)) != 0;
-       state->is_sleeping = (sc & (1U << SP_SLEEPING_BIT)) != 0;
-       state->is_stalling = (sc & (1U << SP_STALLING_BIT)) != 0;
-       stall->fifo0 =
-           !sp_ctrl_getbit(ID, SP_FIFO0_SINK_REG, SP_FIFO0_SINK_BIT);
-       stall->fifo1 =
-           !sp_ctrl_getbit(ID, SP_FIFO1_SINK_REG, SP_FIFO1_SINK_BIT);
-       stall->fifo2 =
-           !sp_ctrl_getbit(ID, SP_FIFO2_SINK_REG, SP_FIFO2_SINK_BIT);
-       stall->fifo3 =
-           !sp_ctrl_getbit(ID, SP_FIFO3_SINK_REG, SP_FIFO3_SINK_BIT);
-       stall->fifo4 =
-           !sp_ctrl_getbit(ID, SP_FIFO4_SINK_REG, SP_FIFO4_SINK_BIT);
-       stall->fifo5 =
-           !sp_ctrl_getbit(ID, SP_FIFO5_SINK_REG, SP_FIFO5_SINK_BIT);
-       stall->fifo6 =
-           !sp_ctrl_getbit(ID, SP_FIFO6_SINK_REG, SP_FIFO6_SINK_BIT);
-       stall->fifo7 =
-           !sp_ctrl_getbit(ID, SP_FIFO7_SINK_REG, SP_FIFO7_SINK_BIT);
-       stall->fifo8 =
-           !sp_ctrl_getbit(ID, SP_FIFO8_SINK_REG, SP_FIFO8_SINK_BIT);
-       stall->fifo9 =
-           !sp_ctrl_getbit(ID, SP_FIFO9_SINK_REG, SP_FIFO9_SINK_BIT);
-       stall->fifoa =
-           !sp_ctrl_getbit(ID, SP_FIFOA_SINK_REG, SP_FIFOA_SINK_BIT);
-       stall->dmem =
-           !sp_ctrl_getbit(ID, SP_DMEM_SINK_REG, SP_DMEM_SINK_BIT);
-       stall->control_master =
-           !sp_ctrl_getbit(ID, SP_CTRL_MT_SINK_REG, SP_CTRL_MT_SINK_BIT);
-       stall->icache_master =
-           !sp_ctrl_getbit(ID, SP_ICACHE_MT_SINK_REG,
-                           SP_ICACHE_MT_SINK_BIT);
-}
index 2956c7023b33471de8a9e9d7c0c2f383c3961946..e22d25a902f406f232b3a9b43d64b996ed8ef9ff 100644 (file)
 #include <type_support.h>
 #include "sp_global.h"
 
-struct sp_state_s {
-       int             pc;
-       int             status_register;
-       bool    is_broken;
-       bool    is_idle;
-       bool    is_sleeping;
-       bool    is_stalling;
-};
-
-struct sp_stall_s {
-       bool    fifo0;
-       bool    fifo1;
-       bool    fifo2;
-       bool    fifo3;
-       bool    fifo4;
-       bool    fifo5;
-       bool    fifo6;
-       bool    fifo7;
-       bool    fifo8;
-       bool    fifo9;
-       bool    fifoa;
-       bool    dmem;
-       bool    control_master;
-       bool    icache_master;
-};
-
 #define sp_address_of(var)     (HIVE_ADDR_ ## var)
 
 /*
index 3d6621f2fa96c85d275ec57362a67b170804c6c9..693154e8ec2f914fc7d129244096181a1d0cd17d 100644 (file)
@@ -16,7 +16,6 @@
 #ifndef __CSI_RX_PUBLIC_H_INCLUDED__
 #define __CSI_RX_PUBLIC_H_INCLUDED__
 
-#ifdef ISP2401
 /*****************************************************
  *
  * Native command interface (NCI).
@@ -132,5 +131,4 @@ void csi_rx_be_ctrl_reg_store(
     const hrt_address reg,
     const hrt_data value);
 /* end of DLI */
-#endif /* ISP2401 */
 #endif /* __CSI_RX_PUBLIC_H_INCLUDED__ */
index a23cbc9a2129af72d566f5d3bdbd129843c37175..6fed47f045945a0c164db4f7f6b4c736f318447a 100644 (file)
 
 #include "system_local.h"
 
-typedef struct dma_state_s             dma_state_t;
-
-/*! Read the control registers of DMA[ID]
-
- \param        ID[in]                          DMA identifier
- \param        state[out]                      input formatter state structure
-
- \return none, state = DMA[ID].state
- */
-void dma_get_state(
-    const dma_ID_t             ID,
-    dma_state_t                        *state);
-
 /*! Write to a control register of DMA[ID]
 
  \param        ID[in]                          DMA identifier
index d335e7b0a76e589f8d1e6d168e51081502fb246b..43787ab64078a4ab44294b6b86b929a7e8803051 100644 (file)
 #include <type_support.h>
 #include "system_local.h"
 
-/*! Read the control registers of IRQ[ID]
-
- \param        ID[in]                          IRQ identifier
- \param        state[out]                      irq controller state structure
-
- \return none, state = IRQ[ID].state
- */
-void irq_controller_get_state(const irq_ID_t ID,
-                             struct irq_controller_state *state);
-
 /*! Write to a control register of IRQ[ID]
 
  \param        ID[in]                          IRQ identifier
index a8ff75c639e52d5f56f1f3cd94d1b5ad8e45b6a0..34dd7f912df6071ba226c3292706ad5d67459fd1 100644 (file)
@@ -30,19 +30,6 @@ void cnd_isp_irq_enable(
     const isp_ID_t             ID,
     const bool                 cnd);
 
-/*! Read the state of cell ISP[ID]
-
- \param        ID[in]                          ISP identifier
- \param        state[out]                      isp state structure
- \param        stall[out]                      isp stall conditions
-
- \return none, state = ISP[ID].state, stall = ISP[ID].stall
- */
-void isp_get_state(
-    const isp_ID_t             ID,
-    isp_state_t                        *state,
-    isp_stall_t                        *stall);
-
 /*! Write to the status and control register of ISP[ID]
 
  \param        ID[in]                          ISP identifier
index d9b6af898c06bf8a4804bd44f5a27f5badd323eb..f18a057adb0f771659e8d8f0d8f642468f8150b9 100644 (file)
@@ -16,7 +16,6 @@
 #ifndef __ISYS_DMA_PUBLIC_H_INCLUDED__
 #define __ISYS_DMA_PUBLIC_H_INCLUDED__
 
-#ifdef ISP2401
 
 #include "system_local.h"
 #include "type_support.h"
@@ -34,6 +33,5 @@ void isys2401_dma_set_max_burst_size(
     const isys2401_dma_ID_t dma_id,
     uint32_t           max_burst_size);
 
-#endif /* ISP2401 */
 
 #endif /* __ISYS_DMA_PUBLIC_H_INCLUDED__ */
index 736cbc4e37055657b4c80f5a50c1ddc83e03ff4f..2b13688256cffb19059679125809f01fb1a27fc3 100644 (file)
@@ -19,7 +19,6 @@
 #include "isys_irq_global.h"
 #include "isys_irq_local.h"
 
-#if defined(ISP2401)
 
 void isys_irqc_state_get(const isys_irq_ID_t   isys_irqc_id,
                         isys_irqc_state_t      *state);
@@ -36,6 +35,5 @@ hrt_data isys_irqc_reg_load(const isys_irq_ID_t       isys_irqc_id,
 
 void isys_irqc_status_enable(const isys_irq_ID_t isys_irqc_id);
 
-#endif /* defined(ISP2401) */
 
 #endif /* __ISYS_IRQ_PUBLIC_H__ */
diff --git a/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_public.h b/drivers/staging/media/atomisp/pci/hive_isp_css_include/host/isys_public.h
deleted file mode 100644 (file)
index dac53e3..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- */
-
-#ifndef __ISYS_PUBLIC_H_INCLUDED__
-#define __ISYS_PUBLIC_H_INCLUDED__
-
-#ifdef ISP2401
-/*! Read the state of INPUT_SYSTEM[ID]
- \param ID[in]         INPUT_SYSTEM identifier
- \param state[out]     pointer to input system state structure
- \return none, state = INPUT_SYSTEM[ID].state
- */
-STORAGE_CLASS_INPUT_SYSTEM_H input_system_err_t input_system_get_state(
-    const input_system_ID_t    ID,
-    input_system_state_t *state);
-/*! Dump the state of INPUT_SYSTEM[ID]
- \param ID[in]         INPUT_SYSTEM identifier
- \param state[in]      pointer to input system state structure
- \return none
- \depends on host supplied print function as part of ia_css_init()
- */
-STORAGE_CLASS_INPUT_SYSTEM_H void input_system_dump_state(
-    const input_system_ID_t    ID,
-    input_system_state_t *state);
-#endif /* ISP2401 */
-#endif /* __ISYS_PUBLIC_H_INCLUDED__ */
index 40a9fb6d77618ad4e26dd07a80db230ce77eb071..da10e6b98c63f6fcf6b3fdb862efa7a43bc624d3 100644 (file)
@@ -16,7 +16,6 @@
 #ifndef __PIXELGEN_PUBLIC_H_INCLUDED__
 #define __PIXELGEN_PUBLIC_H_INCLUDED__
 
-#ifdef ISP2401
 /*****************************************************
  *
  * Native command interface (NCI).
@@ -76,5 +75,4 @@ STORAGE_CLASS_PIXELGEN_H void pixelgen_ctrl_reg_store(
     const hrt_data value);
 /* end of DLI */
 
-#endif /* ISP2401 */
 #endif /* __PIXELGEN_PUBLIC_H_INCLUDED__ */
index b0b7f2e278546a204c66bb1f7a6f992fdb44fd67..e9166535ce099c90bbb1d3f1f2293f71b01757a7 100644 (file)
@@ -19,9 +19,6 @@
 #include <type_support.h>
 #include "system_local.h"
 
-typedef struct sp_state_s              sp_state_t;
-typedef struct sp_stall_s              sp_stall_t;
-
 /*! Enable or disable the program complete irq signal of SP[ID]
 
  \param        ID[in]                          SP identifier
@@ -33,19 +30,6 @@ void cnd_sp_irq_enable(
     const sp_ID_t              ID,
     const bool                 cnd);
 
-/*! Read the state of cell SP[ID]
-
- \param        ID[in]                          SP identifier
- \param        state[out]                      sp state structure
- \param        stall[out]                      isp stall conditions
-
- \return none, state = SP[ID].state, stall = SP[ID].stall
- */
-void sp_get_state(
-    const sp_ID_t              ID,
-    sp_state_t                 *state,
-    sp_stall_t                 *stall);
-
 /*! Write to the status and control register of SP[ID]
 
  \param        ID[in]                          SP identifier
index 001c55ea970b059d43689b7eac2c60f9adc14694..952b633fdca709cc24f3fa1ab4d00ae10db208fe 100644 (file)
 #include <type_support.h>
 #include <system_local.h>
 
-#if defined(ISP2401)
 
 #include "isys_irq_public.h"
 
-#endif /* defined(ISP2401) */
 
 #endif /* __IA_CSS_ISYS_IRQ_H__ */
index a20879aedef67e1edb06ba5bf338be20fa437cb3..d6e52b4971d689dcf92b729c97bf224b933a3363 100644 (file)
@@ -331,11 +331,7 @@ struct ia_css_sp_info {
        of DDR debug queue */
        u32 perf_counter_input_system_error; /** input system perf
        counter array */
-#ifdef HAS_WATCHDOG_SP_THREAD_DEBUG
-       u32 debug_wait; /** thread/pipe post mortem debug */
-       u32 debug_stage; /** thread/pipe post mortem debug */
-       u32 debug_stripe; /** thread/pipe post mortem debug */
-#endif
+
        u32 threads_stack; /** sp thread's stack pointers */
        u32 threads_stack_size; /** sp thread's stack sizes */
        u32 curr_binary_id;        /** current binary id */
index 9e50e1c619be20538251ff4de8f8bc134a4e48ad..cd6e0111d9f4e59a53a075f9997726cbc8f27829 100644 (file)
 #include "ia_css_stream_format.h"
 #include "ia_css_input_port.h"
 
-/* @brief Register size of a CSS MIPI frame for check during capturing.
- *
- * @param[in]  port    CSI-2 port this check is registered.
- * @param[in]  size_mem_words  The frame size in memory words (32B).
- * @return             Return the error in case of failure. E.g. MAX_NOF_ENTRIES REACHED
- *
- * Register size of a CSS MIPI frame to check during capturing. Up to
- *             IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES entries per port allowed. Entries are reset
- *             when stream is stopped.
- *
- *
- */
-int
-ia_css_mipi_frame_enable_check_on_size(const enum mipi_port_id port,
-                                      const unsigned int       size_mem_words);
-
 /* @brief Calculate the size of a mipi frame.
  *
  * @param[in]  width           The width (in pixels) of the frame.
index 357987d629cd3ac5e17a71735ff67fdd1713a73e..12f7acfeb79c94f1253b0153a90f384038989c82 100644 (file)
@@ -138,8 +138,5 @@ struct rx_cfg_s {
        bool                is_two_ppc;
 };
 
-#ifdef ISP2401
-#  include "isp2401_input_system_local.h"
-#else
-#  include "isp2400_input_system_local.h"
-#endif
+#include "isp2401_input_system_local.h"
+#include "isp2400_input_system_local.h"
index 889f204e77d5bad1a6089fe3f275254ec708e698..148ba2ca22904e48fc53fc2705715c5808734399 100644 (file)
@@ -4,8 +4,5 @@
  *    (c) 2020 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
  */
 
-#ifdef ISP2401
-#  include "isp2401_input_system_private.h"
-#else
-#  include "isp2400_input_system_private.h"
-#endif
+#include "isp2401_input_system_private.h"
+#include "isp2400_input_system_private.h"
index 3f5167fd66433b37790a80ee1de6af2d131e1f62..06b19434b6c09817fba93119adfddf24db21c10e 100644 (file)
@@ -4,6 +4,4 @@
  *    (c) 2020 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
  */
 
-#ifndef ISP2401
-#  include "isp2400_input_system_public.h"
-#endif
+#include "isp2400_input_system_public.h"
index 646d6e39c1e589074b52b9c446ead7732fa90e60..40d4a052156274bd7855dc2ff9115d0403f23d69 100644 (file)
@@ -73,17 +73,9 @@ int ia_css_raw_config(struct sh_css_isp_raw_isp_config *to,
        const struct ia_css_frame_info *internal_info = from->internal_info;
        int ret;
 
-#if !defined(ISP2401)
-       /* 2401 input system uses input width width */
-       in_info = internal_info;
-#else
-       /*in some cases, in_info is NULL*/
-       if (in_info)
-               (void)internal_info;
-       else
+       if (!IS_ISP2401 || !in_info)
                in_info = internal_info;
 
-#endif
        ret = ia_css_dma_configure_from_info(&to->port_b, in_info);
        if (ret)
                return ret;
@@ -99,11 +91,12 @@ int ia_css_raw_config(struct sh_css_isp_raw_isp_config *to,
        to->two_ppc             = from->two_ppc;
        to->stream_format       = css2isp_stream_format(from->stream_format);
        to->deinterleaved       = from->deinterleaved;
-#if defined(ISP2401)
-       to->start_column        = in_info->crop_info.start_column;
-       to->start_line          = in_info->crop_info.start_line;
-       to->enable_left_padding = from->enable_left_padding;
-#endif
+
+       if (IS_ISP2401) {
+               to->start_column        = in_info->crop_info.start_column;
+               to->start_line          = in_info->crop_info.start_line;
+               to->enable_left_padding = from->enable_left_padding;
+       }
 
        return 0;
 }
index c3ae5014a03959a9f30e4bc0c8794508023950b4..f9b9c3ae50aa79e1144701cf78efeac2c47b50eb 100644 (file)
@@ -13,8 +13,8 @@
  * more details.
  */
 
-#ifndef __INPUT_SYSTEM_LOCAL_H_INCLUDED__
-#define __INPUT_SYSTEM_LOCAL_H_INCLUDED__
+#ifndef __INPUT_SYSTEM_2400_LOCAL_H_INCLUDED__
+#define __INPUT_SYSTEM_2400_LOCAL_H_INCLUDED__
 
 #include "input_system_defs.h"         /* HIVE_ISYS_GPREG_MULTICAST_A_IDX,... */
 
@@ -163,45 +163,39 @@ struct input_system_cfg2400_s {
 #define        _HRT_CSS_RECEIVER_DATA_TIMEOUT_IDX              _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_IDX
 #define        _HRT_CSS_RECEIVER_DATA_TIMEOUT_BITS             _HRT_CSS_RECEIVER_2400_CSI2_DATA_TIMEOUT_BITS
 
-typedef struct capture_unit_state_s    capture_unit_state_t;
-typedef struct acquisition_unit_state_s        acquisition_unit_state_t;
-typedef struct ctrl_unit_state_s       ctrl_unit_state_t;
-
 typedef enum {
-       MIPI_FORMAT_RGB888 = 0,
-       MIPI_FORMAT_RGB555,
-       MIPI_FORMAT_RGB444,
-       MIPI_FORMAT_RGB565,
-       MIPI_FORMAT_RGB666,
-       MIPI_FORMAT_RAW8,               /* 5 */
-       MIPI_FORMAT_RAW10,
-       MIPI_FORMAT_RAW6,
-       MIPI_FORMAT_RAW7,
-       MIPI_FORMAT_RAW12,
-       MIPI_FORMAT_RAW14,              /* 10 */
-       MIPI_FORMAT_YUV420_8,
-       MIPI_FORMAT_YUV420_10,
-       MIPI_FORMAT_YUV422_8,
-       MIPI_FORMAT_YUV422_10,
-       MIPI_FORMAT_CUSTOM0,    /* 15 */
-       MIPI_FORMAT_YUV420_8_LEGACY,
-       MIPI_FORMAT_EMBEDDED,
-       MIPI_FORMAT_CUSTOM1,
-       MIPI_FORMAT_CUSTOM2,
-       MIPI_FORMAT_CUSTOM3,    /* 20 */
-       MIPI_FORMAT_CUSTOM4,
-       MIPI_FORMAT_CUSTOM5,
-       MIPI_FORMAT_CUSTOM6,
-       MIPI_FORMAT_CUSTOM7,
-       MIPI_FORMAT_YUV420_8_SHIFT,     /* 25 */
-       MIPI_FORMAT_YUV420_10_SHIFT,
-       MIPI_FORMAT_RAW16,
-       MIPI_FORMAT_RAW18,
-       N_MIPI_FORMAT,
-} mipi_format_t;
+       MIPI_FORMAT_2400_RGB888 = 0,
+       MIPI_FORMAT_2400_RGB555,
+       MIPI_FORMAT_2400_RGB444,
+       MIPI_FORMAT_2400_RGB565,
+       MIPI_FORMAT_2400_RGB666,
+       MIPI_FORMAT_2400_RAW8,          /* 5 */
+       MIPI_FORMAT_2400_RAW10,
+       MIPI_FORMAT_2400_RAW6,
+       MIPI_FORMAT_2400_RAW7,
+       MIPI_FORMAT_2400_RAW12,
+       MIPI_FORMAT_2400_RAW14,         /* 10 */
+       MIPI_FORMAT_2400_YUV420_8,
+       MIPI_FORMAT_2400_YUV420_10,
+       MIPI_FORMAT_2400_YUV422_8,
+       MIPI_FORMAT_2400_YUV422_10,
+       MIPI_FORMAT_2400_CUSTOM0,       /* 15 */
+       MIPI_FORMAT_2400_YUV420_8_LEGACY,
+       MIPI_FORMAT_2400_EMBEDDED,
+       MIPI_FORMAT_2400_CUSTOM1,
+       MIPI_FORMAT_2400_CUSTOM2,
+       MIPI_FORMAT_2400_CUSTOM3,       /* 20 */
+       MIPI_FORMAT_2400_CUSTOM4,
+       MIPI_FORMAT_2400_CUSTOM5,
+       MIPI_FORMAT_2400_CUSTOM6,
+       MIPI_FORMAT_2400_CUSTOM7,
+       MIPI_FORMAT_2400_YUV420_8_SHIFT,        /* 25 */
+       MIPI_FORMAT_2400_YUV420_10_SHIFT,
+       MIPI_FORMAT_2400_RAW16,
+       MIPI_FORMAT_2400_RAW18,
+       N_MIPI_FORMAT_2400,
+} mipi_format_2400_t;
 
-#define MIPI_FORMAT_JPEG               MIPI_FORMAT_CUSTOM0
-#define MIPI_FORMAT_BINARY_8   MIPI_FORMAT_CUSTOM0
 #define N_MIPI_FORMAT_CUSTOM   8
 
 /* The number of stores for compressed format types */
@@ -246,130 +240,4 @@ static const hrt_address __maybe_unused SUB_SYSTEM_OFFSET[N_SUB_SYSTEM_ID] = {
        0x0000C000UL
 };
 
-struct capture_unit_state_s {
-       int     Packet_Length;
-       int     Received_Length;
-       int     Received_Short_Packets;
-       int     Received_Long_Packets;
-       int     Last_Command;
-       int     Next_Command;
-       int     Last_Acknowledge;
-       int     Next_Acknowledge;
-       int     FSM_State_Info;
-       int     StartMode;
-       int     Start_Addr;
-       int     Mem_Region_Size;
-       int     Num_Mem_Regions;
-       /*      int     Init;   write-only registers
-               int     Start;
-               int     Stop;      */
-};
-
-struct acquisition_unit_state_s {
-       /*      int     Init;   write-only register */
-       int     Received_Short_Packets;
-       int     Received_Long_Packets;
-       int     Last_Command;
-       int     Next_Command;
-       int     Last_Acknowledge;
-       int     Next_Acknowledge;
-       int     FSM_State_Info;
-       int     Int_Cntr_Info;
-       int     Start_Addr;
-       int     Mem_Region_Size;
-       int     Num_Mem_Regions;
-};
-
-struct ctrl_unit_state_s {
-       int     last_cmd;
-       int     next_cmd;
-       int     last_ack;
-       int     next_ack;
-       int     top_fsm_state;
-       int     captA_fsm_state;
-       int     captB_fsm_state;
-       int     captC_fsm_state;
-       int     acq_fsm_state;
-       int     captA_start_addr;
-       int     captB_start_addr;
-       int     captC_start_addr;
-       int     captA_mem_region_size;
-       int     captB_mem_region_size;
-       int     captC_mem_region_size;
-       int     captA_num_mem_regions;
-       int     captB_num_mem_regions;
-       int     captC_num_mem_regions;
-       int     acq_start_addr;
-       int     acq_mem_region_size;
-       int     acq_num_mem_regions;
-       /*      int     ctrl_init;  write only register */
-       int     capt_reserve_one_mem_region;
-};
-
-struct input_system_state_s {
-       int     str_multicastA_sel;
-       int     str_multicastB_sel;
-       int     str_multicastC_sel;
-       int     str_mux_sel;
-       int     str_mon_status;
-       int     str_mon_irq_cond;
-       int     str_mon_irq_en;
-       int     isys_srst;
-       int     isys_slv_reg_srst;
-       int     str_deint_portA_cnt;
-       int     str_deint_portB_cnt;
-       struct capture_unit_state_s             capture_unit[N_CAPTURE_UNIT_ID];
-       struct acquisition_unit_state_s acquisition_unit[N_ACQUISITION_UNIT_ID];
-       struct ctrl_unit_state_s                ctrl_unit_state[N_CTRL_UNIT_ID];
-};
-
-struct mipi_port_state_s {
-       int     device_ready;
-       int     irq_status;
-       int     irq_enable;
-       u32     timeout_count;
-       u16     init_count;
-       u16     raw16_18;
-       u32     sync_count;             /*4 x uint8_t */
-       u32     rx_count;               /*4 x uint8_t */
-       u8              lane_sync_count[MIPI_4LANE_CFG];
-       u8              lane_rx_count[MIPI_4LANE_CFG];
-};
-
-struct rx_channel_state_s {
-       u32     comp_scheme0;
-       u32     comp_scheme1;
-       mipi_predictor_t                pred[N_MIPI_FORMAT_CUSTOM];
-       mipi_compressor_t               comp[N_MIPI_FORMAT_CUSTOM];
-};
-
-struct receiver_state_s {
-       u8      fs_to_ls_delay;
-       u8      ls_to_data_delay;
-       u8      data_to_le_delay;
-       u8      le_to_fe_delay;
-       u8      fe_to_fs_delay;
-       u8      le_to_fs_delay;
-       bool    is_two_ppc;
-       int     backend_rst;
-       u16     raw18;
-       bool            force_raw8;
-       u16     raw16;
-       struct mipi_port_state_s        mipi_port_state[N_MIPI_PORT_ID];
-       struct rx_channel_state_s       rx_channel_state[N_RX_CHANNEL_ID];
-       int     be_gsp_acc_ovl;
-       int     be_srst;
-       int     be_is_two_ppc;
-       int     be_comp_format0;
-       int     be_comp_format1;
-       int     be_comp_format2;
-       int     be_comp_format3;
-       int     be_sel;
-       int     be_raw16_config;
-       int     be_raw18_config;
-       int     be_force_raw8;
-       int     be_irq_status;
-       int     be_irq_clear;
-};
-
 #endif /* __INPUT_SYSTEM_LOCAL_H_INCLUDED__ */
index 9c39ca2da923c6950d62b4b72c0a21c0b9974959..e011920f00e191b0391cbfc76aafcaecde6d99d7 100644 (file)
@@ -13,8 +13,8 @@
  * more details.
  */
 
-#ifndef __INPUT_SYSTEM_PRIVATE_H_INCLUDED__
-#define __INPUT_SYSTEM_PRIVATE_H_INCLUDED__
+#ifndef __INPUT_SYSTEM_2400_PRIVATE_H_INCLUDED__
+#define __INPUT_SYSTEM_2400_PRIVATE_H_INCLUDED__
 
 #include "input_system_public.h"
 
index 85cb61e341920f982717abbf16c775bc332588f8..447c7c5c55a1f74162c493ff75fa69e9aac44498 100644 (file)
  * more details.
  */
 
-#ifndef __INPUT_SYSTEM_PUBLIC_H_INCLUDED__
-#define __INPUT_SYSTEM_PUBLIC_H_INCLUDED__
+#ifndef __INPUT_SYSTEM_2400_PUBLIC_H_INCLUDED__
+#define __INPUT_SYSTEM_2400_PUBLIC_H_INCLUDED__
 
 #include <type_support.h>
-#ifdef ISP2401
-#include "isys_public.h"
-#else
-
-typedef struct input_system_state_s            input_system_state_t;
-typedef struct receiver_state_s                        receiver_state_t;
-
-/*! Read the state of INPUT_SYSTEM[ID]
-
- \param        ID[in]                          INPUT_SYSTEM identifier
- \param        state[out]                      input system state structure
-
- \return none, state = INPUT_SYSTEM[ID].state
- */
-void input_system_get_state(
-    const input_system_ID_t            ID,
-    input_system_state_t               *state);
-
-/*! Read the state of RECEIVER[ID]
-
- \param        ID[in]                          RECEIVER identifier
- \param        state[out]                      receiver state structure
-
- \return none, state = RECEIVER[ID].state
- */
-void receiver_get_state(
-    const rx_ID_t                              ID,
-    receiver_state_t                   *state);
-
-/*! Flag whether a MIPI format is YUV420
-
- \param        mipi_format[in]         MIPI format
-
- \return mipi_format == YUV420
- */
-bool is_mipi_format_yuv420(
-    const mipi_format_t                        mipi_format);
 
 /*! Set compression parameters for cfg[cfg_ID] of RECEIVER[ID]
 
@@ -365,6 +328,5 @@ input_system_err_t  input_system_gpfifo_channel_cfg(
     u32                nof_frames,
     target_cfg2400_t   target
 );
-#endif /* #ifdef ISP2401 */
 
 #endif /* __INPUT_SYSTEM_PUBLIC_H_INCLUDED__ */
index 74bfa10e670e344bfd545e2f0f85e8a993b21b66..d9a8d575c58e4130759929817c336433fdf2a807 100644 (file)
@@ -13,8 +13,8 @@
  * more details.
  */
 
-#ifndef __INPUT_SYSTEM_LOCAL_H_INCLUDED__
-#define __INPUT_SYSTEM_LOCAL_H_INCLUDED__
+#ifndef __INPUT_SYSTEM_2401_LOCAL_H_INCLUDED__
+#define __INPUT_SYSTEM_2401_LOCAL_H_INCLUDED__
 
 #include "csi_rx.h"
 #include "pixelgen.h"
 #include "isys_irq.h"
 
 typedef enum {
-       MIPI_FORMAT_SHORT1 = 0x08,
-       MIPI_FORMAT_SHORT2,
-       MIPI_FORMAT_SHORT3,
-       MIPI_FORMAT_SHORT4,
-       MIPI_FORMAT_SHORT5,
-       MIPI_FORMAT_SHORT6,
-       MIPI_FORMAT_SHORT7,
-       MIPI_FORMAT_SHORT8,
-       MIPI_FORMAT_EMBEDDED = 0x12,
-       MIPI_FORMAT_YUV420_8 = 0x18,
-       MIPI_FORMAT_YUV420_10,
-       MIPI_FORMAT_YUV420_8_LEGACY,
-       MIPI_FORMAT_YUV420_8_SHIFT = 0x1C,
-       MIPI_FORMAT_YUV420_10_SHIFT,
-       MIPI_FORMAT_YUV422_8 = 0x1E,
-       MIPI_FORMAT_YUV422_10,
-       MIPI_FORMAT_RGB444 = 0x20,
-       MIPI_FORMAT_RGB555,
-       MIPI_FORMAT_RGB565,
-       MIPI_FORMAT_RGB666,
-       MIPI_FORMAT_RGB888,
-       MIPI_FORMAT_RAW6 = 0x28,
-       MIPI_FORMAT_RAW7,
-       MIPI_FORMAT_RAW8,
-       MIPI_FORMAT_RAW10,
-       MIPI_FORMAT_RAW12,
-       MIPI_FORMAT_RAW14,
-       MIPI_FORMAT_CUSTOM0 = 0x30,
-       MIPI_FORMAT_CUSTOM1,
-       MIPI_FORMAT_CUSTOM2,
-       MIPI_FORMAT_CUSTOM3,
-       MIPI_FORMAT_CUSTOM4,
-       MIPI_FORMAT_CUSTOM5,
-       MIPI_FORMAT_CUSTOM6,
-       MIPI_FORMAT_CUSTOM7,
+       MIPI_FORMAT_2401_SHORT1 = 0x08,
+       MIPI_FORMAT_2401_SHORT2,
+       MIPI_FORMAT_2401_SHORT3,
+       MIPI_FORMAT_2401_SHORT4,
+       MIPI_FORMAT_2401_SHORT5,
+       MIPI_FORMAT_2401_SHORT6,
+       MIPI_FORMAT_2401_SHORT7,
+       MIPI_FORMAT_2401_SHORT8,
+       MIPI_FORMAT_2401_EMBEDDED = 0x12,
+       MIPI_FORMAT_2401_YUV420_8 = 0x18,
+       MIPI_FORMAT_2401_YUV420_10,
+       MIPI_FORMAT_2401_YUV420_8_LEGACY,
+       MIPI_FORMAT_2401_YUV420_8_SHIFT = 0x1C,
+       MIPI_FORMAT_2401_YUV420_10_SHIFT,
+       MIPI_FORMAT_2401_YUV422_8 = 0x1E,
+       MIPI_FORMAT_2401_YUV422_10,
+       MIPI_FORMAT_2401_RGB444 = 0x20,
+       MIPI_FORMAT_2401_RGB555,
+       MIPI_FORMAT_2401_RGB565,
+       MIPI_FORMAT_2401_RGB666,
+       MIPI_FORMAT_2401_RGB888,
+       MIPI_FORMAT_2401_RAW6 = 0x28,
+       MIPI_FORMAT_2401_RAW7,
+       MIPI_FORMAT_2401_RAW8,
+       MIPI_FORMAT_2401_RAW10,
+       MIPI_FORMAT_2401_RAW12,
+       MIPI_FORMAT_2401_RAW14,
+       MIPI_FORMAT_2401_CUSTOM0 = 0x30,
+       MIPI_FORMAT_2401_CUSTOM1,
+       MIPI_FORMAT_2401_CUSTOM2,
+       MIPI_FORMAT_2401_CUSTOM3,
+       MIPI_FORMAT_2401_CUSTOM4,
+       MIPI_FORMAT_2401_CUSTOM5,
+       MIPI_FORMAT_2401_CUSTOM6,
+       MIPI_FORMAT_2401_CUSTOM7,
        //MIPI_FORMAT_RAW16, /*not supported by 2401*/
        //MIPI_FORMAT_RAW18,
-       N_MIPI_FORMAT
-} mipi_format_t;
+       N_MIPI_FORMAT_2401
+} mipi_format_2401_t;
 
 #define N_MIPI_FORMAT_CUSTOM   8
 
 /* The number of stores for compressed format types */
 #define        N_MIPI_COMPRESSOR_CONTEXT       (N_RX_CHANNEL_ID * N_MIPI_FORMAT_CUSTOM)
-typedef struct input_system_state_s    input_system_state_t;
-struct input_system_state_s {
-       ibuf_ctrl_state_t       ibuf_ctrl_state[N_IBUF_CTRL_ID];
-       csi_rx_fe_ctrl_state_t  csi_rx_fe_ctrl_state[N_CSI_RX_FRONTEND_ID];
-       csi_rx_be_ctrl_state_t  csi_rx_be_ctrl_state[N_CSI_RX_BACKEND_ID];
-       pixelgen_ctrl_state_t   pixelgen_ctrl_state[N_PIXELGEN_ID];
-       stream2mmio_state_t     stream2mmio_state[N_STREAM2MMIO_ID];
-       isys_irqc_state_t       isys_irqc_state[N_ISYS_IRQ_ID];
-};
+
 #endif /* __INPUT_SYSTEM_LOCAL_H_INCLUDED__ */
index e4c76428f6dd761eb88e5a1655d4e244780360d9..845ed0add027bd62f8eaa0b7b50a28cd9669440e 100644 (file)
@@ -13,8 +13,8 @@
  * more details.
  */
 
-#ifndef __INPUT_SYSTEM_PRIVATE_H_INCLUDED__
-#define __INPUT_SYSTEM_PRIVATE_H_INCLUDED__
+#ifndef __INPUT_SYSTEM_2401_PRIVATE_H_INCLUDED__
+#define __INPUT_SYSTEM_2401_PRIVATE_H_INCLUDED__
 
 #include "input_system_public.h"
 
@@ -231,112 +231,4 @@ static inline void ibuf_ctrl_dump_state(const ibuf_ctrl_ID_t ID,
        }
 }
 
-static inline input_system_err_t
-input_system_get_state(const input_system_ID_t ID,
-                      input_system_state_t *state)
-{
-       u32 i;
-
-       (void)(ID);
-
-       /*  get the states of all CSI RX frontend devices */
-       for (i = 0; i < N_CSI_RX_FRONTEND_ID; i++) {
-               csi_rx_fe_ctrl_get_state(
-                   (csi_rx_frontend_ID_t)i,
-                   &state->csi_rx_fe_ctrl_state[i]);
-       }
-
-       /*  get the states of all CIS RX backend devices */
-       for (i = 0; i < N_CSI_RX_BACKEND_ID; i++) {
-               csi_rx_be_ctrl_get_state(
-                   (csi_rx_backend_ID_t)i,
-                   &state->csi_rx_be_ctrl_state[i]);
-       }
-
-       /* get the states of all pixelgen devices */
-       for (i = 0; i < N_PIXELGEN_ID; i++) {
-               pixelgen_ctrl_get_state(
-                   (pixelgen_ID_t)i,
-                   &state->pixelgen_ctrl_state[i]);
-       }
-
-       /* get the states of all stream2mmio devices */
-       for (i = 0; i < N_STREAM2MMIO_ID; i++) {
-               stream2mmio_get_state(
-                   (stream2mmio_ID_t)i,
-                   &state->stream2mmio_state[i]);
-       }
-
-       /* get the states of all ibuf-controller devices */
-       for (i = 0; i < N_IBUF_CTRL_ID; i++) {
-               ibuf_ctrl_get_state(
-                   (ibuf_ctrl_ID_t)i,
-                   &state->ibuf_ctrl_state[i]);
-       }
-
-       /* get the states of all isys irq controllers */
-       for (i = 0; i < N_ISYS_IRQ_ID; i++) {
-               isys_irqc_state_get((isys_irq_ID_t)i, &state->isys_irqc_state[i]);
-       }
-
-       /* TODO: get the states of all ISYS2401 DMA devices  */
-       for (i = 0; i < N_ISYS2401_DMA_ID; i++) {
-       }
-
-       return INPUT_SYSTEM_ERR_NO_ERROR;
-}
-
-static inline void input_system_dump_state(const input_system_ID_t ID,
-                                          input_system_state_t *state)
-{
-       u32 i;
-
-       (void)(ID);
-
-       /*  dump the states of all CSI RX frontend devices */
-       for (i = 0; i < N_CSI_RX_FRONTEND_ID; i++) {
-               csi_rx_fe_ctrl_dump_state(
-                   (csi_rx_frontend_ID_t)i,
-                   &state->csi_rx_fe_ctrl_state[i]);
-       }
-
-       /*  dump the states of all CIS RX backend devices */
-       for (i = 0; i < N_CSI_RX_BACKEND_ID; i++) {
-               csi_rx_be_ctrl_dump_state(
-                   (csi_rx_backend_ID_t)i,
-                   &state->csi_rx_be_ctrl_state[i]);
-       }
-
-       /* dump the states of all pixelgen devices */
-       for (i = 0; i < N_PIXELGEN_ID; i++) {
-               pixelgen_ctrl_dump_state(
-                   (pixelgen_ID_t)i,
-                   &state->pixelgen_ctrl_state[i]);
-       }
-
-       /* dump the states of all st2mmio devices */
-       for (i = 0; i < N_STREAM2MMIO_ID; i++) {
-               stream2mmio_dump_state(
-                   (stream2mmio_ID_t)i,
-                   &state->stream2mmio_state[i]);
-       }
-
-       /* dump the states of all ibuf-controller devices */
-       for (i = 0; i < N_IBUF_CTRL_ID; i++) {
-               ibuf_ctrl_dump_state(
-                   (ibuf_ctrl_ID_t)i,
-                   &state->ibuf_ctrl_state[i]);
-       }
-
-       /* dump the states of all isys irq controllers */
-       for (i = 0; i < N_ISYS_IRQ_ID; i++) {
-               isys_irqc_state_dump((isys_irq_ID_t)i, &state->isys_irqc_state[i]);
-       }
-
-       /* TODO: dump the states of all ISYS2401 DMA devices  */
-       for (i = 0; i < N_ISYS2401_DMA_ID; i++) {
-       }
-
-       return;
-}
 #endif /* __INPUT_SYSTEM_PRIVATE_H_INCLUDED__ */
index 768da86b8c2c10283d87e48f354cd49af65dd9bd..0f3729e55e14a6fb3b03b439fcff22552530350b 100644 (file)
@@ -604,13 +604,14 @@ binary_in_frame_padded_width(int in_frame_width,
        int rval;
        int nr_of_left_paddings;        /* number of paddings pixels on the left of an image line */
 
-#if defined(ISP2401)
-       /* the output image line of Input System 2401 does not have the left paddings  */
-       nr_of_left_paddings = 0;
-#else
-       /* in other cases, the left padding pixels are always 128 */
-       nr_of_left_paddings = 2 * ISP_VEC_NELEMS;
-#endif
+       if (IS_ISP2401) {
+               /* the output image line of Input System 2401 does not have the left paddings  */
+               nr_of_left_paddings = 0;
+       } else {
+               /* in other cases, the left padding pixels are always 128 */
+               nr_of_left_paddings = 2 * ISP_VEC_NELEMS;
+       }
+
        if (need_scaling) {
                /* In SDV use-case, we need to match left-padding of
                 * primary and the video binary. */
index fff89e9b4b0108b07541732600a034d1b26d9cc6..e9a09117e5e51e24389305447361cf1852dfbf3b 100644 (file)
@@ -141,12 +141,6 @@ static inline void __printf(2, 0) ia_css_debug_vdtrace(unsigned int level,
 __printf(2, 3) void ia_css_debug_dtrace(unsigned int level,
                                        const char *fmt, ...);
 
-/*! @brief Dump sp thread's stack contents
- * SP thread's stack contents are set to 0xcafecafe. This function dumps the
- * stack to inspect if the stack's boundaries are compromised.
- * @return     None
- */
-void ia_css_debug_dump_sp_stack_info(void);
 
 /*! @brief Function to set the global dtrace verbosity level.
  * @param[in]  trace_level     Maximum level of the messages to be traced.
@@ -160,18 +154,6 @@ void ia_css_debug_set_dtrace_level(
  */
 unsigned int ia_css_debug_get_dtrace_level(void);
 
-/*! @brief Dump isp hardware state.
- * Dumps the isp hardware state to tracing output.
- * @return     None
- */
-void ia_css_debug_dump_isp_state(void);
-
-/*! @brief Dump sp hardware state.
- * Dumps the sp hardware state to tracing output.
- * @return     None
- */
-void ia_css_debug_dump_sp_state(void);
-
 /* ISP2401 */
 /*! @brief Dump GAC hardware state.
  * Dumps the GAC ACB hardware registers. may be useful for
@@ -180,25 +162,12 @@ void ia_css_debug_dump_sp_state(void);
  */
 void ia_css_debug_dump_gac_state(void);
 
-/*! @brief Dump dma controller state.
- * Dumps the dma controller state to tracing output.
- * @return     None
- */
-void ia_css_debug_dump_dma_state(void);
-
 /*! @brief Dump internal sp software state.
  * Dumps the sp software state to tracing output.
  * @return     None
  */
 void ia_css_debug_dump_sp_sw_debug_info(void);
 
-/*! @brief Dump all related hardware state to the trace output
- * @param[in]  context String to identify context in output.
- * @return     None
- */
-void ia_css_debug_dump_debug_info(
-    const char *context);
-
 #if SP_DEBUG != SP_DEBUG_NONE
 void ia_css_debug_print_sp_debug_state(
     const struct sh_css_sp_debug_state *state);
@@ -213,24 +182,6 @@ void ia_css_debug_binary_print(
 
 void ia_css_debug_sp_dump_mipi_fifo_high_water(void);
 
-/*! @brief Dump isp gdc fifo state to the trace output
- * Dumps the isp gdc fifo state to tracing output.
- * @return     None
- */
-void ia_css_debug_dump_isp_gdc_fifo_state(void);
-
-/*! @brief Dump dma isp fifo state
- * Dumps the dma isp fifo state to tracing output.
- * @return     None
- */
-void ia_css_debug_dump_dma_isp_fifo_state(void);
-
-/*! @brief Dump dma sp fifo state
- * Dumps the dma sp fifo state to tracing output.
- * @return     None
- */
-void ia_css_debug_dump_dma_sp_fifo_state(void);
-
 /*! \brief Dump pif A isp fifo state
  * Dumps the primary input formatter state to tracing output.
  * @return     None
@@ -249,30 +200,12 @@ void ia_css_debug_dump_pif_b_isp_fifo_state(void);
  */
 void ia_css_debug_dump_str2mem_sp_fifo_state(void);
 
-/*! @brief Dump isp sp fifo state
- * Dumps the isp sp fifo state to tracing output.
- * @return     None
- */
-void ia_css_debug_dump_isp_sp_fifo_state(void);
-
 /*! @brief Dump all fifo state info to the output
  * Dumps all fifo state to tracing output.
  * @return     None
  */
 void ia_css_debug_dump_all_fifo_state(void);
 
-/*! @brief Dump the rx state to the output
- * Dumps the rx state to tracing output.
- * @return     None
- */
-void ia_css_debug_dump_rx_state(void);
-
-/*! @brief Dump the input system state to the output
- * Dumps the input system state to tracing output.
- * @return     None
- */
-void ia_css_debug_dump_isys_state(void);
-
 /*! @brief Dump the frame info to the trace output
  * Dumps the frame info to tracing output.
  * @param[in]  frame           pointer to struct ia_css_frame
@@ -306,18 +239,6 @@ void ia_css_debug_wake_up_sp(void);
 void ia_css_debug_dump_isp_params(struct ia_css_stream *stream,
                                  unsigned int enable);
 
-/*! @brief Function to dump some sp performance counters.
- * Dump sp performance counters, currently input system errors.
- * @return     None
- */
-void ia_css_debug_dump_perf_counters(void);
-
-#ifdef HAS_WATCHDOG_SP_THREAD_DEBUG
-void sh_css_dump_thread_wait_info(void);
-void sh_css_dump_pipe_stage_info(void);
-void sh_css_dump_pipe_stripe_info(void);
-#endif
-
 void ia_css_debug_dump_isp_binary(void);
 
 void sh_css_dump_sp_raw_copy_linecount(bool reduced);
@@ -400,12 +321,6 @@ void ia_css_debug_dump_stream_config(
     const struct ia_css_stream_config *config,
     int num_pipes);
 
-/*! @brief Dump the state of the SP tagger
- * Dumps the internal state of the SP tagger
- * @return     None
- */
-void ia_css_debug_tagger_state(void);
-
 /**
  * @brief Initialize the debug mode.
  *
index bb6204cb42c5a5693b1713368bceae9d23aa5238..3e92794555ec7dfd1af4a9df0ecc33052c6dabf4 100644 (file)
 
 #define ENABLE_LINE_MAX_LENGTH (25)
 
-/*
- * TODO:SH_CSS_MAX_SP_THREADS is not the max number of sp threads
- * future rework should fix this and remove the define MAX_THREAD_NUM
- */
-#define MAX_THREAD_NUM (SH_CSS_MAX_SP_THREADS + SH_CSS_MAX_SP_INTERNAL_THREADS)
-
 static struct pipe_graph_class {
        bool do_init;
        int height;
@@ -147,79 +141,6 @@ void ia_css_debug_dtrace(unsigned int level, const char *fmt, ...)
        va_end(ap);
 }
 
-static void debug_dump_long_array_formatted(
-    const sp_ID_t sp_id,
-    hrt_address stack_sp_addr,
-    unsigned int stack_size)
-{
-       unsigned int i;
-       u32 val;
-       u32 addr = (uint32_t)stack_sp_addr;
-       u32 stack_size_words = CEIL_DIV(stack_size, sizeof(uint32_t));
-
-       /* When size is not multiple of four, last word is only relevant for
-        * remaining bytes */
-       for (i = 0; i < stack_size_words; i++) {
-               val = sp_dmem_load_uint32(sp_id, (hrt_address)addr);
-               if ((i % 8) == 0)
-                       ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "\n");
-
-               ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "0x%08x ", val);
-               addr += sizeof(uint32_t);
-       }
-
-       ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "\n");
-}
-
-static void debug_dump_sp_stack_info(
-    const sp_ID_t sp_id)
-{
-       const struct ia_css_fw_info *fw;
-       unsigned int HIVE_ADDR_sp_threads_stack;
-       unsigned int HIVE_ADDR_sp_threads_stack_size;
-       u32 stack_sizes[MAX_THREAD_NUM];
-       u32 stack_sp_addr[MAX_THREAD_NUM];
-       unsigned int i;
-
-       fw = &sh_css_sp_fw;
-
-       ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "sp_id(%u) stack info\n", sp_id);
-       ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
-                           "from objects stack_addr_offset:0x%x stack_size_offset:0x%x\n",
-                           fw->info.sp.threads_stack,
-                           fw->info.sp.threads_stack_size);
-
-       HIVE_ADDR_sp_threads_stack = fw->info.sp.threads_stack;
-       HIVE_ADDR_sp_threads_stack_size = fw->info.sp.threads_stack_size;
-
-       if (fw->info.sp.threads_stack == 0 ||
-           fw->info.sp.threads_stack_size == 0)
-               return;
-
-       (void)HIVE_ADDR_sp_threads_stack;
-       (void)HIVE_ADDR_sp_threads_stack_size;
-
-       sp_dmem_load(sp_id,
-                    (unsigned int)sp_address_of(sp_threads_stack),
-                    &stack_sp_addr, sizeof(stack_sp_addr));
-       sp_dmem_load(sp_id,
-                    (unsigned int)sp_address_of(sp_threads_stack_size),
-                    &stack_sizes, sizeof(stack_sizes));
-
-       for (i = 0 ; i < MAX_THREAD_NUM; i++) {
-               ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
-                                   "thread: %u stack_addr: 0x%08x stack_size: %u\n",
-                                   i, stack_sp_addr[i], stack_sizes[i]);
-               debug_dump_long_array_formatted(sp_id, (hrt_address)stack_sp_addr[i],
-                                               stack_sizes[i]);
-       }
-}
-
-void ia_css_debug_dump_sp_stack_info(void)
-{
-       debug_dump_sp_stack_info(SP0_ID);
-}
-
 void ia_css_debug_set_dtrace_level(const unsigned int trace_level)
 {
        dbg_level = trace_level;
@@ -387,136 +308,6 @@ static const char *debug_frame_format2str(const enum ia_css_frame_format
        }
 }
 
-static void debug_print_sp_state(const sp_state_t *state, const char *cell)
-{
-       assert(cell);
-       assert(state);
-
-       ia_css_debug_dtrace(2, "%s state:\n", cell);
-       ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "PC", state->pc);
-       ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "Status register",
-                           state->status_register);
-       ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is broken", state->is_broken);
-       ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is idle", state->is_idle);
-       ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is sleeping",
-                           state->is_sleeping);
-       ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is stalling",
-                           state->is_stalling);
-       return;
-}
-
-static void debug_print_isp_state(const isp_state_t *state, const char *cell)
-{
-       assert(state);
-       assert(cell);
-
-       ia_css_debug_dtrace(2, "%s state:\n", cell);
-       ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "PC", state->pc);
-       ia_css_debug_dtrace(2, "\t%-32s: 0x%X\n", "Status register",
-                           state->status_register);
-       ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is broken", state->is_broken);
-       ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is idle", state->is_idle);
-       ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is sleeping",
-                           state->is_sleeping);
-       ia_css_debug_dtrace(2, "\t%-32s: %d\n", "Is stalling",
-                           state->is_stalling);
-       return;
-}
-
-void ia_css_debug_dump_isp_state(void)
-{
-       isp_state_t state;
-       isp_stall_t stall;
-
-       isp_get_state(ISP0_ID, &state, &stall);
-
-       debug_print_isp_state(&state, "ISP");
-
-       if (state.is_stalling) {
-               if (!IS_ISP2401) {
-                       ia_css_debug_dtrace(2, "\t%-32s: %d\n",
-                                           "[0] if_prim_a_FIFO stalled", stall.fifo0);
-                       ia_css_debug_dtrace(2, "\t%-32s: %d\n",
-                                           "[1] if_prim_b_FIFO stalled", stall.fifo1);
-               }
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[2] dma_FIFO stalled",
-                                   stall.fifo2);
-
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[3] gdc0_FIFO stalled",
-                                   stall.fifo3);
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[4] gdc1_FIFO stalled",
-                                   stall.fifo4);
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[5] gpio_FIFO stalled",
-                                   stall.fifo5);
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n", "[6] sp_FIFO stalled",
-                                   stall.fifo6);
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n",
-                                   "status & control stalled",
-                                   stall.stat_ctrl);
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n", "dmem stalled",
-                                   stall.dmem);
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vmem stalled",
-                                   stall.vmem);
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vamem1 stalled",
-                                   stall.vamem1);
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vamem2 stalled",
-                                   stall.vamem2);
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n", "vamem3 stalled",
-                                   stall.vamem3);
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n", "hmem stalled",
-                                   stall.hmem);
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n", "pmem stalled",
-                                   stall.pmem);
-       }
-       return;
-}
-
-void ia_css_debug_dump_sp_state(void)
-{
-       sp_state_t state;
-       sp_stall_t stall;
-
-       sp_get_state(SP0_ID, &state, &stall);
-       debug_print_sp_state(&state, "SP");
-       if (state.is_stalling) {
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n", "isys_FIFO stalled",
-                                   stall.fifo0);
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n", "if_sec_FIFO stalled",
-                                   stall.fifo1);
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n",
-                                   "str_to_mem_FIFO stalled", stall.fifo2);
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n", "dma_FIFO stalled",
-                                   stall.fifo3);
-               if (!IS_ISP2401)
-                       ia_css_debug_dtrace(2, "\t%-32s: %d\n",
-                                           "if_prim_a_FIFO stalled", stall.fifo4);
-
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n", "isp_FIFO stalled",
-                                   stall.fifo5);
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n", "gp_FIFO stalled",
-                                   stall.fifo6);
-               if (!IS_ISP2401)
-                       ia_css_debug_dtrace(2, "\t%-32s: %d\n",
-                                           "if_prim_b_FIFO stalled", stall.fifo7);
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n", "gdc0_FIFO stalled",
-                                   stall.fifo8);
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n", "gdc1_FIFO stalled",
-                                   stall.fifo9);
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n", "irq FIFO stalled",
-                                   stall.fifoa);
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n", "dmem stalled",
-                                   stall.dmem);
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n",
-                                   "control master stalled",
-                                   stall.control_master);
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n",
-                                   "i-cache master stalled",
-                                   stall.icache_master);
-       }
-       ia_css_debug_dump_trace();
-       return;
-}
-
 static void debug_print_fifo_channel_state(const fifo_channel_state_t *state,
        const char *descr)
 {
@@ -571,634 +362,6 @@ void ia_css_debug_dump_str2mem_sp_fifo_state(void)
        debug_print_fifo_channel_state(&sp_to_s2m, "SP to stream-to-memory");
 }
 
-#ifndef ISP2401
-static void debug_print_if_state(input_formatter_state_t *state, const char *id)
-{
-       unsigned int val;
-
-       const char *st_vsync_active_low =
-           (state->vsync_active_low ? "low" : "high");
-       const char *st_hsync_active_low =
-           (state->hsync_active_low ? "low" : "high");
-
-       const char *fsm_sync_status_str = "unknown";
-       const char *fsm_crop_status_str = "unknown";
-       const char *fsm_padding_status_str = "unknown";
-
-       int st_stline = state->start_line;
-       int st_stcol = state->start_column;
-       int st_crpht = state->cropped_height;
-       int st_crpwd = state->cropped_width;
-       int st_verdcm = state->ver_decimation;
-       int st_hordcm = state->hor_decimation;
-       int st_ver_deinterleaving = state->ver_deinterleaving;
-       int st_hor_deinterleaving = state->hor_deinterleaving;
-       int st_leftpd = state->left_padding;
-       int st_eoloff = state->eol_offset;
-       int st_vmstartaddr = state->vmem_start_address;
-       int st_vmendaddr = state->vmem_end_address;
-       int st_vmincr = state->vmem_increment;
-       int st_yuv420 = state->is_yuv420;
-       int st_allow_fifo_overflow = state->allow_fifo_overflow;
-       int st_block_fifo_when_no_req = state->block_fifo_when_no_req;
-
-       assert(state);
-       ia_css_debug_dtrace(2, "InputFormatter State (%s):\n", id);
-
-       ia_css_debug_dtrace(2, "\tConfiguration:\n");
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Start line", st_stline);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Start column", st_stcol);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropped height", st_crpht);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropped width", st_crpwd);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Ver decimation", st_verdcm);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Hor decimation", st_hordcm);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Ver deinterleaving", st_ver_deinterleaving);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Hor deinterleaving", st_hor_deinterleaving);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Left padding", st_leftpd);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "EOL offset (bytes)", st_eoloff);
-       ia_css_debug_dtrace(2, "\t\t%-32s: 0x%06X\n",
-                           "VMEM start address", st_vmstartaddr);
-       ia_css_debug_dtrace(2, "\t\t%-32s: 0x%06X\n",
-                           "VMEM end address", st_vmendaddr);
-       ia_css_debug_dtrace(2, "\t\t%-32s: 0x%06X\n",
-                           "VMEM increment", st_vmincr);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "YUV 420 format", st_yuv420);
-       ia_css_debug_dtrace(2, "\t\t%-32s: Active %s\n",
-                           "Vsync", st_vsync_active_low);
-       ia_css_debug_dtrace(2, "\t\t%-32s: Active %s\n",
-                           "Hsync", st_hsync_active_low);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Allow FIFO overflow", st_allow_fifo_overflow);
-       /* Flag that tells whether the IF gives backpressure on frames */
-       /*
-        * FYI, this is only on the frame request (indicate), when the IF has
-        * synch'd on a frame it will always give back pressure
-        */
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Block when no request", st_block_fifo_when_no_req);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "IF_BLOCKED_FIFO_NO_REQ_ADDRESS",
-                           input_formatter_reg_load(INPUT_FORMATTER0_ID,
-                                   HIVE_IF_BLOCK_FIFO_NO_REQ_ADDRESS)
-                          );
-
-       ia_css_debug_dtrace(2, "\t%-32s:\n", "InputSwitch State");
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "_REG_GP_IFMT_input_switch_lut_reg0",
-                           gp_device_reg_load(GP_DEVICE0_ID,
-                                              _REG_GP_IFMT_input_switch_lut_reg0));
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "_REG_GP_IFMT_input_switch_lut_reg1",
-                           gp_device_reg_load(GP_DEVICE0_ID,
-                                              _REG_GP_IFMT_input_switch_lut_reg1));
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "_REG_GP_IFMT_input_switch_lut_reg2",
-                           gp_device_reg_load(GP_DEVICE0_ID,
-                                              _REG_GP_IFMT_input_switch_lut_reg2));
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "_REG_GP_IFMT_input_switch_lut_reg3",
-                           gp_device_reg_load(GP_DEVICE0_ID,
-                                              _REG_GP_IFMT_input_switch_lut_reg3));
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "_REG_GP_IFMT_input_switch_lut_reg4",
-                           gp_device_reg_load(GP_DEVICE0_ID,
-                                              _REG_GP_IFMT_input_switch_lut_reg4));
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "_REG_GP_IFMT_input_switch_lut_reg5",
-                           gp_device_reg_load(GP_DEVICE0_ID,
-                                              _REG_GP_IFMT_input_switch_lut_reg5));
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "_REG_GP_IFMT_input_switch_lut_reg6",
-                           gp_device_reg_load(GP_DEVICE0_ID,
-                                              _REG_GP_IFMT_input_switch_lut_reg6));
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "_REG_GP_IFMT_input_switch_lut_reg7",
-                           gp_device_reg_load(GP_DEVICE0_ID,
-                                              _REG_GP_IFMT_input_switch_lut_reg7));
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "_REG_GP_IFMT_input_switch_fsync_lut",
-                           gp_device_reg_load(GP_DEVICE0_ID,
-                                              _REG_GP_IFMT_input_switch_fsync_lut));
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "_REG_GP_IFMT_srst",
-                           gp_device_reg_load(GP_DEVICE0_ID,
-                                              _REG_GP_IFMT_srst));
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "_REG_GP_IFMT_slv_reg_srst",
-                           gp_device_reg_load(GP_DEVICE0_ID,
-                                              _REG_GP_IFMT_slv_reg_srst));
-
-       ia_css_debug_dtrace(2, "\tFSM Status:\n");
-
-       val = state->fsm_sync_status;
-
-       if (val > 7)
-               fsm_sync_status_str = "ERROR";
-
-       switch (val & 0x7) {
-       case 0:
-               fsm_sync_status_str = "idle";
-               break;
-       case 1:
-               fsm_sync_status_str = "request frame";
-               break;
-       case 2:
-               fsm_sync_status_str = "request lines";
-               break;
-       case 3:
-               fsm_sync_status_str = "request vectors";
-               break;
-       case 4:
-               fsm_sync_status_str = "send acknowledge";
-               break;
-       default:
-               fsm_sync_status_str = "unknown";
-               break;
-       }
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: (0x%X: %s)\n",
-                           "FSM Synchronization Status", val,
-                           fsm_sync_status_str);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "FSM Synchronization Counter",
-                           state->fsm_sync_counter);
-
-       val = state->fsm_crop_status;
-
-       if (val > 7)
-               fsm_crop_status_str = "ERROR";
-
-       switch (val & 0x7) {
-       case 0:
-               fsm_crop_status_str = "idle";
-               break;
-       case 1:
-               fsm_crop_status_str = "wait line";
-               break;
-       case 2:
-               fsm_crop_status_str = "crop line";
-               break;
-       case 3:
-               fsm_crop_status_str = "crop pixel";
-               break;
-       case 4:
-               fsm_crop_status_str = "pass pixel";
-               break;
-       case 5:
-               fsm_crop_status_str = "pass line";
-               break;
-       case 6:
-               fsm_crop_status_str = "lost line";
-               break;
-       default:
-               fsm_crop_status_str = "unknown";
-               break;
-       }
-       ia_css_debug_dtrace(2, "\t\t%-32s: (0x%X: %s)\n",
-                           "FSM Crop Status", val, fsm_crop_status_str);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "FSM Crop Line Counter",
-                           state->fsm_crop_line_counter);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "FSM Crop Pixel Counter",
-                           state->fsm_crop_pixel_counter);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "FSM Deinterleaving idx buffer",
-                           state->fsm_deinterleaving_index);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "FSM H decimation counter",
-                           state->fsm_dec_h_counter);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "FSM V decimation counter",
-                           state->fsm_dec_v_counter);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "FSM block V decimation counter",
-                           state->fsm_dec_block_v_counter);
-
-       val = state->fsm_padding_status;
-
-       if (val > 7)
-               fsm_padding_status_str = "ERROR";
-
-       switch (val & 0x7) {
-       case 0:
-               fsm_padding_status_str = "idle";
-               break;
-       case 1:
-               fsm_padding_status_str = "left pad";
-               break;
-       case 2:
-               fsm_padding_status_str = "write";
-               break;
-       case 3:
-               fsm_padding_status_str = "right pad";
-               break;
-       case 4:
-               fsm_padding_status_str = "send end of line";
-               break;
-       default:
-               fsm_padding_status_str = "unknown";
-               break;
-       }
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: (0x%X: %s)\n", "FSM Padding Status",
-                           val, fsm_padding_status_str);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "FSM Padding element idx counter",
-                           state->fsm_padding_elem_counter);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Vector support error",
-                           state->fsm_vector_support_error);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Vector support buf full",
-                           state->fsm_vector_buffer_full);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Vector support",
-                           state->vector_support);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Fifo sensor data lost",
-                           state->sensor_data_lost);
-}
-
-static void debug_print_if_bin_state(input_formatter_bin_state_t *state)
-{
-       ia_css_debug_dtrace(2, "Stream-to-memory state:\n");
-       ia_css_debug_dtrace(2, "\t%-32s: %d\n", "reset", state->reset);
-       ia_css_debug_dtrace(2, "\t%-32s: %d\n", "input endianness",
-                           state->input_endianness);
-       ia_css_debug_dtrace(2, "\t%-32s: %d\n", "output endianness",
-                           state->output_endianness);
-       ia_css_debug_dtrace(2, "\t%-32s: %d\n", "bitswap", state->bitswap);
-       ia_css_debug_dtrace(2, "\t%-32s: %d\n", "block_synch",
-                           state->block_synch);
-       ia_css_debug_dtrace(2, "\t%-32s: %d\n", "packet_synch",
-                           state->packet_synch);
-       ia_css_debug_dtrace(2, "\t%-32s: %d\n", "readpostwrite_sync",
-                           state->readpostwrite_synch);
-       ia_css_debug_dtrace(2, "\t%-32s: %d\n", "is_2ppc", state->is_2ppc);
-       ia_css_debug_dtrace(2, "\t%-32s: %d\n", "en_status_update",
-                           state->en_status_update);
-}
-
-static void ia_css_debug_dump_if_state(void)
-{
-       input_formatter_state_t if_state;
-       input_formatter_bin_state_t if_bin_state;
-
-       input_formatter_get_state(INPUT_FORMATTER0_ID, &if_state);
-       debug_print_if_state(&if_state, "Primary IF A");
-       ia_css_debug_dump_pif_a_isp_fifo_state();
-
-       input_formatter_get_state(INPUT_FORMATTER1_ID, &if_state);
-       debug_print_if_state(&if_state, "Primary IF B");
-       ia_css_debug_dump_pif_b_isp_fifo_state();
-
-       input_formatter_bin_get_state(INPUT_FORMATTER3_ID, &if_bin_state);
-       debug_print_if_bin_state(&if_bin_state);
-       ia_css_debug_dump_str2mem_sp_fifo_state();
-}
-#endif
-
-void ia_css_debug_dump_dma_state(void)
-{
-       /* note: the var below is made static as it is quite large;
-          if it is not static it ends up on the stack which could
-          cause issues for drivers
-       */
-       static dma_state_t state;
-       int i, ch_id;
-
-       const char *fsm_cmd_st_lbl = "FSM Command flag state";
-       const char *fsm_ctl_st_lbl = "FSM Control flag state";
-       const char *fsm_ctl_state = NULL;
-       const char *fsm_ctl_flag = NULL;
-       const char *fsm_pack_st = NULL;
-       const char *fsm_read_st = NULL;
-       const char *fsm_write_st = NULL;
-       char last_cmd_str[64];
-
-       dma_get_state(DMA0_ID, &state);
-       /* Print header for DMA dump status */
-       ia_css_debug_dtrace(2, "DMA dump status:\n");
-
-       /* Print FSM command flag state */
-       if (state.fsm_command_idle)
-               ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl, "IDLE");
-       if (state.fsm_command_run)
-               ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl, "RUN");
-       if (state.fsm_command_stalling)
-               ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl,
-                                   "STALL");
-       if (state.fsm_command_error)
-               ia_css_debug_dtrace(2, "\t%-32s: %s\n", fsm_cmd_st_lbl,
-                                   "ERROR");
-
-       /* Print last command along with the channel */
-       ch_id = state.last_command_channel;
-
-       switch (state.last_command) {
-       case DMA_COMMAND_READ:
-               snprintf(last_cmd_str, 64,
-                        "Read 2D Block [Channel: %d]", ch_id);
-               break;
-       case DMA_COMMAND_WRITE:
-               snprintf(last_cmd_str, 64,
-                        "Write 2D Block [Channel: %d]", ch_id);
-               break;
-       case DMA_COMMAND_SET_CHANNEL:
-               snprintf(last_cmd_str, 64, "Set Channel [Channel: %d]", ch_id);
-               break;
-       case DMA_COMMAND_SET_PARAM:
-               snprintf(last_cmd_str, 64,
-                        "Set Param: %d [Channel: %d]",
-                        state.last_command_param, ch_id);
-               break;
-       case DMA_COMMAND_READ_SPECIFIC:
-               snprintf(last_cmd_str, 64,
-                        "Read Specific 2D Block [Channel: %d]", ch_id);
-               break;
-       case DMA_COMMAND_WRITE_SPECIFIC:
-               snprintf(last_cmd_str, 64,
-                        "Write Specific 2D Block [Channel: %d]", ch_id);
-               break;
-       case DMA_COMMAND_INIT:
-               snprintf(last_cmd_str, 64,
-                        "Init 2D Block on Device A [Channel: %d]", ch_id);
-               break;
-       case DMA_COMMAND_INIT_SPECIFIC:
-               snprintf(last_cmd_str, 64,
-                        "Init Specific 2D Block [Channel: %d]", ch_id);
-               break;
-       case DMA_COMMAND_RST:
-               snprintf(last_cmd_str, 64, "DMA SW Reset");
-               break;
-       case N_DMA_COMMANDS:
-               snprintf(last_cmd_str, 64, "UNKNOWN");
-               break;
-       default:
-               snprintf(last_cmd_str, 64,
-                        "unknown [Channel: %d]", ch_id);
-               break;
-       }
-       ia_css_debug_dtrace(2, "\t%-32s: (0x%X : %s)\n",
-                           "last command received", state.last_command,
-                           last_cmd_str);
-
-       /* Print DMA registers */
-       ia_css_debug_dtrace(2, "\t%-32s\n",
-                           "DMA registers, connection group 0");
-       ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Cmd Fifo Command",
-                           state.current_command);
-       ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Cmd Fifo Address A",
-                           state.current_addr_a);
-       ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Cmd Fifo Address B",
-                           state.current_addr_b);
-
-       if (state.fsm_ctrl_idle)
-               fsm_ctl_flag = "IDLE";
-       else if (state.fsm_ctrl_run)
-               fsm_ctl_flag = "RUN";
-       else if (state.fsm_ctrl_stalling)
-               fsm_ctl_flag = "STAL";
-       else if (state.fsm_ctrl_error)
-               fsm_ctl_flag = "ERROR";
-       else
-               fsm_ctl_flag = "UNKNOWN";
-
-       switch (state.fsm_ctrl_state) {
-       case DMA_CTRL_STATE_IDLE:
-               fsm_ctl_state = "Idle state";
-               break;
-       case DMA_CTRL_STATE_REQ_RCV:
-               fsm_ctl_state = "Req Rcv state";
-               break;
-       case DMA_CTRL_STATE_RCV:
-               fsm_ctl_state = "Rcv state";
-               break;
-       case DMA_CTRL_STATE_RCV_REQ:
-               fsm_ctl_state = "Rcv Req state";
-               break;
-       case DMA_CTRL_STATE_INIT:
-               fsm_ctl_state = "Init state";
-               break;
-       case N_DMA_CTRL_STATES:
-               fsm_ctl_state = "Unknown";
-               break;
-       }
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %s -> %s\n", fsm_ctl_st_lbl,
-                           fsm_ctl_flag, fsm_ctl_state);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl source dev",
-                           state.fsm_ctrl_source_dev);
-       ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl source addr",
-                           state.fsm_ctrl_source_addr);
-       ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl source stride",
-                           state.fsm_ctrl_source_stride);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl source width",
-                           state.fsm_ctrl_source_width);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl source height",
-                           state.fsm_ctrl_source_height);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack source dev",
-                           state.fsm_ctrl_pack_source_dev);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest dev",
-                           state.fsm_ctrl_pack_dest_dev);
-       ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl dest addr",
-                           state.fsm_ctrl_dest_addr);
-       ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "FSM Ctrl dest stride",
-                           state.fsm_ctrl_dest_stride);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack source width",
-                           state.fsm_ctrl_pack_source_width);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest height",
-                           state.fsm_ctrl_pack_dest_height);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest width",
-                           state.fsm_ctrl_pack_dest_width);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack source elems",
-                           state.fsm_ctrl_pack_source_elems);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack dest elems",
-                           state.fsm_ctrl_pack_dest_elems);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Ctrl pack extension",
-                           state.fsm_ctrl_pack_extension);
-
-       if (state.pack_idle)
-               fsm_pack_st = "IDLE";
-       if (state.pack_run)
-               fsm_pack_st = "RUN";
-       if (state.pack_stalling)
-               fsm_pack_st = "STALL";
-       if (state.pack_error)
-               fsm_pack_st = "ERROR";
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %s\n", "FSM Pack flag state",
-                           fsm_pack_st);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Pack cnt height",
-                           state.pack_cnt_height);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Pack src cnt width",
-                           state.pack_src_cnt_width);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Pack dest cnt width",
-                           state.pack_dest_cnt_width);
-
-       if (state.read_state == DMA_RW_STATE_IDLE)
-               fsm_read_st = "Idle state";
-       if (state.read_state == DMA_RW_STATE_REQ)
-               fsm_read_st = "Req state";
-       if (state.read_state == DMA_RW_STATE_NEXT_LINE)
-               fsm_read_st = "Next line";
-       if (state.read_state == DMA_RW_STATE_UNLOCK_CHANNEL)
-               fsm_read_st = "Unlock channel";
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %s\n", "FSM Read state",
-                           fsm_read_st);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Read cnt height",
-                           state.read_cnt_height);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Read cnt width",
-                           state.read_cnt_width);
-
-       if (state.write_state == DMA_RW_STATE_IDLE)
-               fsm_write_st = "Idle state";
-       if (state.write_state == DMA_RW_STATE_REQ)
-               fsm_write_st = "Req state";
-       if (state.write_state == DMA_RW_STATE_NEXT_LINE)
-               fsm_write_st = "Next line";
-       if (state.write_state == DMA_RW_STATE_UNLOCK_CHANNEL)
-               fsm_write_st = "Unlock channel";
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %s\n", "FSM Write state",
-                           fsm_write_st);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Write height",
-                           state.write_height);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "FSM Write width",
-                           state.write_width);
-
-       for (i = 0; i < HIVE_ISP_NUM_DMA_CONNS; i++) {
-               dma_port_state_t *port = &state.port_states[i];
-
-               ia_css_debug_dtrace(2, "\tDMA device interface %d\n", i);
-               ia_css_debug_dtrace(2, "\t\tDMA internal side state\n");
-               ia_css_debug_dtrace(2,
-                                   "\t\t\tCS:%d - We_n:%d - Run:%d - Ack:%d\n",
-                                   port->req_cs, port->req_we_n, port->req_run,
-                                   port->req_ack);
-               ia_css_debug_dtrace(2, "\t\tMaster Output side state\n");
-               ia_css_debug_dtrace(2,
-                                   "\t\t\tCS:%d - We_n:%d - Run:%d - Ack:%d\n",
-                                   port->send_cs, port->send_we_n,
-                                   port->send_run, port->send_ack);
-               ia_css_debug_dtrace(2, "\t\tFifo state\n");
-               if (port->fifo_state == DMA_FIFO_STATE_WILL_BE_FULL)
-                       ia_css_debug_dtrace(2, "\t\t\tFiFo will be full\n");
-               else if (port->fifo_state == DMA_FIFO_STATE_FULL)
-                       ia_css_debug_dtrace(2, "\t\t\tFifo Full\n");
-               else if (port->fifo_state == DMA_FIFO_STATE_EMPTY)
-                       ia_css_debug_dtrace(2, "\t\t\tFifo Empty\n");
-               else
-                       ia_css_debug_dtrace(2, "\t\t\tFifo state unknown\n");
-
-               ia_css_debug_dtrace(2, "\t\tFifo counter %d\n\n",
-                                   port->fifo_counter);
-       }
-
-       for (i = 0; i < HIVE_DMA_NUM_CHANNELS; i++) {
-               dma_channel_state_t *ch = &state.channel_states[i];
-
-               ia_css_debug_dtrace(2, "\t%-32s: %d\n", "DMA channel register",
-                                   i);
-               ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Connection",
-                                   ch->connection);
-               ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Sign extend",
-                                   ch->sign_extend);
-               ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Stride Dev A",
-                                   ch->stride_a);
-               ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Elems Dev A",
-                                   ch->elems_a);
-               ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropping Dev A",
-                                   ch->cropping_a);
-               ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Width Dev A",
-                                   ch->width_a);
-               ia_css_debug_dtrace(2, "\t\t%-32s: 0x%X\n", "Stride Dev B",
-                                   ch->stride_b);
-               ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Elems Dev B",
-                                   ch->elems_b);
-               ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Cropping Dev B",
-                                   ch->cropping_b);
-               ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Width Dev B",
-                                   ch->width_b);
-               ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "Height", ch->height);
-       }
-       ia_css_debug_dtrace(2, "\n");
-       return;
-}
-
-void ia_css_debug_dump_dma_sp_fifo_state(void)
-{
-       fifo_channel_state_t dma_to_sp, sp_to_dma;
-
-       fifo_channel_get_state(FIFO_MONITOR0_ID,
-                              FIFO_CHANNEL_DMA0_TO_SP0, &dma_to_sp);
-       fifo_channel_get_state(FIFO_MONITOR0_ID,
-                              FIFO_CHANNEL_SP0_TO_DMA0, &sp_to_dma);
-       debug_print_fifo_channel_state(&dma_to_sp, "DMA to SP");
-       debug_print_fifo_channel_state(&sp_to_dma, "SP to DMA");
-       return;
-}
-
-void ia_css_debug_dump_dma_isp_fifo_state(void)
-{
-       fifo_channel_state_t dma_to_isp, isp_to_dma;
-
-       fifo_channel_get_state(FIFO_MONITOR0_ID,
-                              FIFO_CHANNEL_DMA0_TO_ISP0, &dma_to_isp);
-       fifo_channel_get_state(FIFO_MONITOR0_ID,
-                              FIFO_CHANNEL_ISP0_TO_DMA0, &isp_to_dma);
-       debug_print_fifo_channel_state(&dma_to_isp, "DMA to ISP");
-       debug_print_fifo_channel_state(&isp_to_dma, "ISP to DMA");
-       return;
-}
-
-void ia_css_debug_dump_isp_sp_fifo_state(void)
-{
-       fifo_channel_state_t sp_to_isp, isp_to_sp;
-
-       fifo_channel_get_state(FIFO_MONITOR0_ID,
-                              FIFO_CHANNEL_SP0_TO_ISP0, &sp_to_isp);
-       fifo_channel_get_state(FIFO_MONITOR0_ID,
-                              FIFO_CHANNEL_ISP0_TO_SP0, &isp_to_sp);
-       debug_print_fifo_channel_state(&sp_to_isp, "SP to ISP");
-       debug_print_fifo_channel_state(&isp_to_sp, "ISP to SP");
-       return;
-}
-
-void ia_css_debug_dump_isp_gdc_fifo_state(void)
-{
-       fifo_channel_state_t gdc_to_isp, isp_to_gdc;
-
-       fifo_channel_get_state(FIFO_MONITOR0_ID,
-                              FIFO_CHANNEL_GDC0_TO_ISP0, &gdc_to_isp);
-       fifo_channel_get_state(FIFO_MONITOR0_ID,
-                              FIFO_CHANNEL_ISP0_TO_GDC0, &isp_to_gdc);
-       debug_print_fifo_channel_state(&gdc_to_isp, "GDC to ISP");
-       debug_print_fifo_channel_state(&isp_to_gdc, "ISP to GDC");
-       return;
-}
-
 void ia_css_debug_dump_all_fifo_state(void)
 {
        int i;
@@ -1658,213 +821,6 @@ void ia_css_debug_print_sp_debug_state(const struct sh_css_sp_debug_state
 }
 #endif
 
-#if !defined(ISP2401)
-static void debug_print_rx_mipi_port_state(mipi_port_state_t *state)
-{
-       int i;
-       unsigned int bits, infos;
-
-       assert(state);
-
-       bits = state->irq_status;
-       infos = ia_css_isys_rx_translate_irq_infos(bits);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: (irq reg = 0x%X)\n",
-                           "receiver errors", bits);
-
-       if (infos & IA_CSS_RX_IRQ_INFO_BUFFER_OVERRUN)
-               ia_css_debug_dtrace(2, "\t\t\tbuffer overrun\n");
-       if (infos & IA_CSS_RX_IRQ_INFO_ERR_SOT)
-               ia_css_debug_dtrace(2, "\t\t\tstart-of-transmission error\n");
-       if (infos & IA_CSS_RX_IRQ_INFO_ERR_SOT_SYNC)
-               ia_css_debug_dtrace(2, "\t\t\tstart-of-transmission sync error\n");
-       if (infos & IA_CSS_RX_IRQ_INFO_ERR_CONTROL)
-               ia_css_debug_dtrace(2, "\t\t\tcontrol error\n");
-       if (infos & IA_CSS_RX_IRQ_INFO_ERR_ECC_DOUBLE)
-               ia_css_debug_dtrace(2, "\t\t\t2 or more ECC errors\n");
-       if (infos & IA_CSS_RX_IRQ_INFO_ERR_CRC)
-               ia_css_debug_dtrace(2, "\t\t\tCRC mismatch\n");
-       if (infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ID)
-               ia_css_debug_dtrace(2, "\t\t\tunknown error\n");
-       if (infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_SYNC)
-               ia_css_debug_dtrace(2, "\t\t\tframe sync error\n");
-       if (infos & IA_CSS_RX_IRQ_INFO_ERR_FRAME_DATA)
-               ia_css_debug_dtrace(2, "\t\t\tframe data error\n");
-       if (infos & IA_CSS_RX_IRQ_INFO_ERR_DATA_TIMEOUT)
-               ia_css_debug_dtrace(2, "\t\t\tdata timeout\n");
-       if (infos & IA_CSS_RX_IRQ_INFO_ERR_UNKNOWN_ESC)
-               ia_css_debug_dtrace(2, "\t\t\tunknown escape command entry\n");
-       if (infos & IA_CSS_RX_IRQ_INFO_ERR_LINE_SYNC)
-               ia_css_debug_dtrace(2, "\t\t\tline sync error\n");
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "device_ready", state->device_ready);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "irq_status", state->irq_status);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "irq_enable", state->irq_enable);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "timeout_count", state->timeout_count);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "init_count", state->init_count);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "raw16_18", state->raw16_18);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "sync_count", state->sync_count);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "rx_count", state->rx_count);
-
-       for (i = 0; i < MIPI_4LANE_CFG; i++) {
-               ia_css_debug_dtrace(2, "\t\t%-32s%d%-32s: %d\n",
-                                   "lane_sync_count[", i, "]",
-                                   state->lane_sync_count[i]);
-       }
-
-       for (i = 0; i < MIPI_4LANE_CFG; i++) {
-               ia_css_debug_dtrace(2, "\t\t%-32s%d%-32s: %d\n",
-                                   "lane_rx_count[", i, "]",
-                                   state->lane_rx_count[i]);
-       }
-
-       return;
-}
-
-static void debug_print_rx_channel_state(rx_channel_state_t *state)
-{
-       int i;
-
-       assert(state);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "compression_scheme0", state->comp_scheme0);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "compression_scheme1", state->comp_scheme1);
-
-       for (i = 0; i < N_MIPI_FORMAT_CUSTOM; i++) {
-               ia_css_debug_dtrace(2, "\t\t%-32s%d: %d\n",
-                                   "MIPI Predictor ", i, state->pred[i]);
-       }
-
-       for (i = 0; i < N_MIPI_FORMAT_CUSTOM; i++) {
-               ia_css_debug_dtrace(2, "\t\t%-32s%d: %d\n",
-                                   "MIPI Compressor ", i, state->comp[i]);
-       }
-
-       return;
-}
-
-static void debug_print_rx_state(receiver_state_t *state)
-{
-       int i;
-
-       assert(state);
-       ia_css_debug_dtrace(2, "CSI Receiver State:\n");
-
-       ia_css_debug_dtrace(2, "\tConfiguration:\n");
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "fs_to_ls_delay", state->fs_to_ls_delay);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "ls_to_data_delay", state->ls_to_data_delay);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "data_to_le_delay", state->data_to_le_delay);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "le_to_fe_delay", state->le_to_fe_delay);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "fe_to_fs_delay", state->fe_to_fs_delay);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "le_to_fs_delay", state->le_to_fs_delay);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "is_two_ppc", state->is_two_ppc);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "backend_rst", state->backend_rst);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "raw18", state->raw18);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "force_raw8", state->force_raw8);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "raw16", state->raw16);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "be_gsp_acc_ovl", state->be_gsp_acc_ovl);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "be_srst", state->be_srst);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "be_is_two_ppc", state->be_is_two_ppc);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "be_comp_format0", state->be_comp_format0);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "be_comp_format1", state->be_comp_format1);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "be_comp_format2", state->be_comp_format2);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "be_comp_format3", state->be_comp_format3);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "be_sel", state->be_sel);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "be_raw16_config", state->be_raw16_config);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "be_raw18_config", state->be_raw18_config);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "be_force_raw8", state->be_force_raw8);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "be_irq_status", state->be_irq_status);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "be_irq_clear", state->be_irq_clear);
-
-       /* mipi port state */
-       for (i = 0; i < N_MIPI_PORT_ID; i++) {
-               ia_css_debug_dtrace(2, "\tMIPI Port %d State:\n", i);
-
-               debug_print_rx_mipi_port_state(&state->mipi_port_state[i]);
-       }
-       /* end of mipi port state */
-
-       /* rx channel state */
-       for (i = 0; i < N_RX_CHANNEL_ID; i++) {
-               ia_css_debug_dtrace(2, "\tRX Channel %d State:\n", i);
-
-               debug_print_rx_channel_state(&state->rx_channel_state[i]);
-       }
-       /* end of rx channel state */
-
-       return;
-}
-#endif
-
-void ia_css_debug_dump_rx_state(void)
-{
-#if !defined(ISP2401)
-       receiver_state_t state;
-
-       receiver_get_state(RX0_ID, &state);
-       debug_print_rx_state(&state);
-#endif
-}
-
 void ia_css_debug_dump_sp_sw_debug_info(void)
 {
 #if SP_DEBUG != SP_DEBUG_NONE
@@ -1878,319 +834,6 @@ void ia_css_debug_dump_sp_sw_debug_info(void)
        return;
 }
 
-#if !defined(ISP2401)
-static void debug_print_isys_capture_unit_state(capture_unit_state_t *state)
-{
-       assert(state);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Packet_Length", state->Packet_Length);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Received_Length", state->Received_Length);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Received_Short_Packets",
-                           state->Received_Short_Packets);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Received_Long_Packets",
-                           state->Received_Long_Packets);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Last_Command", state->Last_Command);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Next_Command", state->Next_Command);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Last_Acknowledge", state->Last_Acknowledge);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Next_Acknowledge", state->Next_Acknowledge);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "FSM_State_Info", state->FSM_State_Info);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "StartMode", state->StartMode);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Start_Addr", state->Start_Addr);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Mem_Region_Size", state->Mem_Region_Size);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Num_Mem_Regions", state->Num_Mem_Regions);
-       return;
-}
-
-static void debug_print_isys_acquisition_unit_state(
-    acquisition_unit_state_t *state)
-{
-       assert(state);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Received_Short_Packets",
-                           state->Received_Short_Packets);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Received_Long_Packets",
-                           state->Received_Long_Packets);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Last_Command", state->Last_Command);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Next_Command", state->Next_Command);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Last_Acknowledge", state->Last_Acknowledge);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Next_Acknowledge", state->Next_Acknowledge);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "FSM_State_Info", state->FSM_State_Info);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Int_Cntr_Info", state->Int_Cntr_Info);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Start_Addr", state->Start_Addr);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Mem_Region_Size", state->Mem_Region_Size);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "Num_Mem_Regions", state->Num_Mem_Regions);
-}
-
-static void debug_print_isys_ctrl_unit_state(ctrl_unit_state_t *state)
-{
-       assert(state);
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "last_cmd", state->last_cmd);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "next_cmd", state->next_cmd);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "last_ack", state->last_ack);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n", "next_ack", state->next_ack);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "top_fsm_state", state->top_fsm_state);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "captA_fsm_state", state->captA_fsm_state);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "captB_fsm_state", state->captB_fsm_state);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "captC_fsm_state", state->captC_fsm_state);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "acq_fsm_state", state->acq_fsm_state);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "captA_start_addr", state->captA_start_addr);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "captB_start_addr", state->captB_start_addr);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "captC_start_addr", state->captC_start_addr);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "captA_mem_region_size",
-                           state->captA_mem_region_size);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "captB_mem_region_size",
-                           state->captB_mem_region_size);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "captC_mem_region_size",
-                           state->captC_mem_region_size);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "captA_num_mem_regions",
-                           state->captA_num_mem_regions);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "captB_num_mem_regions",
-                           state->captB_num_mem_regions);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "captC_num_mem_regions",
-                           state->captC_num_mem_regions);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "acq_start_addr", state->acq_start_addr);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "acq_mem_region_size", state->acq_mem_region_size);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "acq_num_mem_regions", state->acq_num_mem_regions);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "capt_reserve_one_mem_region",
-                           state->capt_reserve_one_mem_region);
-
-       return;
-}
-
-static void debug_print_isys_state(input_system_state_t *state)
-{
-       int i;
-
-       assert(state);
-       ia_css_debug_dtrace(2, "InputSystem State:\n");
-
-       /* configuration */
-       ia_css_debug_dtrace(2, "\tConfiguration:\n");
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "str_multiCastA_sel", state->str_multicastA_sel);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "str_multicastB_sel", state->str_multicastB_sel);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "str_multicastC_sel", state->str_multicastC_sel);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "str_mux_sel", state->str_mux_sel);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "str_mon_status", state->str_mon_status);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "str_mon_irq_cond", state->str_mon_irq_cond);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "str_mon_irq_en", state->str_mon_irq_en);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "isys_srst", state->isys_srst);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "isys_slv_reg_srst", state->isys_slv_reg_srst);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "str_deint_portA_cnt", state->str_deint_portA_cnt);
-
-       ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                           "str_deint_portB_cnd", state->str_deint_portB_cnt);
-       /* end of configuration */
-
-       /* capture unit state */
-       for (i = 0; i < N_CAPTURE_UNIT_ID; i++) {
-               capture_unit_state_t *capture_unit_state;
-
-               ia_css_debug_dtrace(2, "\tCaptureUnit %d State:\n", i);
-
-               capture_unit_state = &state->capture_unit[i];
-               debug_print_isys_capture_unit_state(capture_unit_state);
-       }
-       /* end of capture unit state */
-
-       /* acquisition unit state */
-       for (i = 0; i < N_ACQUISITION_UNIT_ID; i++) {
-               acquisition_unit_state_t *acquisition_unit_state;
-
-               ia_css_debug_dtrace(2, "\tAcquisitionUnit %d State:\n", i);
-
-               acquisition_unit_state = &state->acquisition_unit[i];
-               debug_print_isys_acquisition_unit_state(acquisition_unit_state);
-       }
-       /* end of acquisition unit state */
-
-       /* control unit state */
-       for (i = 0; i < N_CTRL_UNIT_ID; i++) {
-               ia_css_debug_dtrace(2, "\tControlUnit %d State:\n", i);
-
-               debug_print_isys_ctrl_unit_state(&state->ctrl_unit_state[i]);
-       }
-       /* end of control unit state */
-}
-#endif
-
-void ia_css_debug_dump_isys_state(void)
-{
-       static input_system_state_t state;
-
-       input_system_get_state(INPUT_SYSTEM0_ID, &state);
-
-#ifndef ISP2401
-       debug_print_isys_state(&state);
-#else
-       input_system_dump_state(INPUT_SYSTEM0_ID, &state);
-#endif
-}
-
-void ia_css_debug_dump_debug_info(const char *context)
-{
-       if (!context)
-               context = "No Context provided";
-
-       ia_css_debug_dtrace(2, "CSS Debug Info dump [Context = %s]\n", context);
-       if (!IS_ISP2401)
-               ia_css_debug_dump_rx_state();
-
-#ifndef ISP2401
-       ia_css_debug_dump_if_state();
-#endif
-       ia_css_debug_dump_isp_state();
-       ia_css_debug_dump_isp_sp_fifo_state();
-       ia_css_debug_dump_isp_gdc_fifo_state();
-       ia_css_debug_dump_sp_state();
-       ia_css_debug_dump_perf_counters();
-
-#ifdef HAS_WATCHDOG_SP_THREAD_DEBUG
-       sh_css_dump_thread_wait_info();
-       sh_css_dump_pipe_stage_info();
-       sh_css_dump_pipe_stripe_info();
-#endif
-       ia_css_debug_dump_dma_isp_fifo_state();
-       ia_css_debug_dump_dma_sp_fifo_state();
-       ia_css_debug_dump_dma_state();
-
-       if (!IS_ISP2401) {
-               struct irq_controller_state state;
-
-               ia_css_debug_dump_isys_state();
-
-               irq_controller_get_state(IRQ2_ID, &state);
-
-               ia_css_debug_dtrace(2, "\t%-32s:\n",
-                                   "Input System IRQ Controller State");
-
-               ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                                   "irq_edge", state.irq_edge);
-
-               ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                                   "irq_mask", state.irq_mask);
-
-               ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                                   "irq_status", state.irq_status);
-
-               ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                                   "irq_enable", state.irq_enable);
-
-               ia_css_debug_dtrace(2, "\t\t%-32s: %d\n",
-                                   "irq_level_not_pulse",
-                                   state.irq_level_not_pulse);
-       } else {
-               ia_css_debug_dump_isys_state();
-       }
-
-       ia_css_debug_tagger_state();
-
-       return;
-}
-
 /* this function is for debug use, it can make SP go to sleep
   state after each frame, then user can dump the stable SP dmem.
   this function can be called after ia_css_start_sp()
@@ -2379,36 +1022,6 @@ void ia_css_debug_dump_isp_binary(void)
        }
 }
 
-void ia_css_debug_dump_perf_counters(void)
-{
-       const struct ia_css_fw_info *fw;
-       int i;
-       unsigned int HIVE_ADDR_ia_css_isys_sp_error_cnt;
-       /* N_MIPI_PORT_ID + 1: 3 Capture Units and 1 Acquire Unit. */
-       s32 ia_css_sp_input_system_error_cnt[N_MIPI_PORT_ID + 1];
-
-       if (IS_ISP2401)
-               return;
-
-       ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "Input System Error Counters:\n");
-
-       fw = &sh_css_sp_fw;
-       HIVE_ADDR_ia_css_isys_sp_error_cnt =
-           fw->info.sp.perf_counter_input_system_error;
-
-       (void)HIVE_ADDR_ia_css_isys_sp_error_cnt;
-
-       sp_dmem_load(SP0_ID,
-                    (unsigned int)sp_address_of(ia_css_isys_sp_error_cnt),
-                    &ia_css_sp_input_system_error_cnt,
-                    sizeof(ia_css_sp_input_system_error_cnt));
-
-       for (i = 0; i < N_MIPI_PORT_ID + 1; i++) {
-               ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "\tport[%d] = %d\n",
-                                   i, ia_css_sp_input_system_error_cnt[i]);
-       }
-}
-
 /*
  * @brief Initialize the debug mode.
  * Refer to "ia_css_debug.h" for more details.
@@ -2464,86 +1077,6 @@ static void __printf(1, 2) dtrace_dot(const char *fmt, ...)
        va_end(ap);
 }
 
-#ifdef HAS_WATCHDOG_SP_THREAD_DEBUG
-void sh_css_dump_thread_wait_info(void)
-{
-       const struct ia_css_fw_info *fw;
-       int i;
-       unsigned int HIVE_ADDR_sp_thread_wait;
-       s32 sp_thread_wait[MAX_THREAD_NUM];
-
-       ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "SEM WAITS:\n");
-
-       fw = &sh_css_sp_fw;
-       HIVE_ADDR_sp_thread_wait =
-           fw->info.sp.debug_wait;
-
-       (void)HIVE_ADDR_sp_thread_wait;
-
-       sp_dmem_load(SP0_ID,
-                    (unsigned int)sp_address_of(sp_thread_wait),
-                    &sp_thread_wait,
-                    sizeof(sp_thread_wait));
-       for (i = 0; i < MAX_THREAD_NUM; i++) {
-               ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
-                                   "\twait[%d] = 0x%X\n",
-                                   i, sp_thread_wait[i]);
-       }
-}
-
-void sh_css_dump_pipe_stage_info(void)
-{
-       const struct ia_css_fw_info *fw;
-       int i;
-       unsigned int HIVE_ADDR_sp_pipe_stage;
-       s32 sp_pipe_stage[MAX_THREAD_NUM];
-
-       ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "PIPE STAGE:\n");
-
-       fw = &sh_css_sp_fw;
-       HIVE_ADDR_sp_pipe_stage =
-           fw->info.sp.debug_stage;
-
-       (void)HIVE_ADDR_sp_pipe_stage;
-
-       sp_dmem_load(SP0_ID,
-                    (unsigned int)sp_address_of(sp_pipe_stage),
-                    &sp_pipe_stage,
-                    sizeof(sp_pipe_stage));
-       for (i = 0; i < MAX_THREAD_NUM; i++) {
-               ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
-                                   "\tstage[%d] = %d\n",
-                                   i, sp_pipe_stage[i]);
-       }
-}
-
-void sh_css_dump_pipe_stripe_info(void)
-{
-       const struct ia_css_fw_info *fw;
-       int i;
-       unsigned int HIVE_ADDR_sp_pipe_stripe;
-       s32 sp_pipe_stripe[MAX_THREAD_NUM];
-
-       ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE, "PIPE STRIPE:\n");
-
-       fw = &sh_css_sp_fw;
-       HIVE_ADDR_sp_pipe_stripe =
-           fw->info.sp.debug_stripe;
-
-       (void)HIVE_ADDR_sp_pipe_stripe;
-
-       sp_dmem_load(SP0_ID,
-                    (unsigned int)sp_address_of(sp_pipe_stripe),
-                    &sp_pipe_stripe,
-                    sizeof(sp_pipe_stripe));
-       for (i = 0; i < MAX_THREAD_NUM; i++) {
-               ia_css_debug_dtrace(IA_CSS_DEBUG_VERBOSE,
-                                   "\tstripe[%d] = %d\n",
-                                   i, sp_pipe_stripe[i]);
-       }
-}
-#endif
-
 static void
 ia_css_debug_pipe_graph_dump_frame(
     const struct ia_css_frame *frame,
@@ -2673,7 +1206,7 @@ ia_css_debug_pipe_graph_dump_stage(
                char enable_info1[100];
                char enable_info2[100];
                char enable_info3[100];
-               char enable_info[200];
+               char enable_info[302];
                struct ia_css_binary_info *bi = stage->binary_info;
 
                /* Split it in 2 function-calls to keep the amount of
@@ -3360,32 +1893,6 @@ void ia_css_debug_dump_trace(void)
 #endif
 }
 
-/* Tagger state dump function. The tagger is only available when the CSS
- * contains an input system (2400 or 2401). */
-void ia_css_debug_tagger_state(void)
-{
-       unsigned int i;
-       unsigned int HIVE_ADDR_tagger_frames;
-       ia_css_tagger_buf_sp_elem_t tbuf_frames[MAX_CB_ELEMS_FOR_TAGGER];
-
-       HIVE_ADDR_tagger_frames = sh_css_sp_fw.info.sp.tagger_frames_addr;
-
-       /* This variable is not used in crun */
-       (void)HIVE_ADDR_tagger_frames;
-
-       /* 2400 and 2401 only have 1 SP, so the tagger lives on SP0 */
-       sp_dmem_load(SP0_ID,
-                    (unsigned int)sp_address_of(tagger_frames),
-                    tbuf_frames,
-                    sizeof(tbuf_frames));
-
-       ia_css_debug_dtrace(2, "Tagger Info:\n");
-       for (i = 0; i < MAX_CB_ELEMS_FOR_TAGGER; i++) {
-               ia_css_debug_dtrace(2, "\t tagger frame[%d]: exp_id=%d, marked=%d, locked=%d\n",
-                                   i, tbuf_frames[i].exp_id, tbuf_frames[i].mark, tbuf_frames[i].lock);
-       }
-}
-
 /* ISP2401 */
 void ia_css_debug_pc_dump(sp_ID_t id, unsigned int num_of_dumps)
 {
index 6d9f47629fbce7b014e603eaa96208cd6bd2bccf..86254888f676aad5687fd7869bef11197616c370 100644 (file)
@@ -16,7 +16,6 @@
 #include "system_global.h"
 #include <linux/kernel.h>
 
-#ifndef ISP2401
 
 #include "ia_css_ifmtr.h"
 #include <math_support.h>
@@ -550,4 +549,3 @@ static int ifmtr_input_start_line(
        return 0;
 }
 
-#endif
index 711a321e9a3f19940394e73f79dd24144ed531e3..d067b9fc43c7959c25614c11ccf70c0e255ecd24 100644 (file)
 #include <system_global.h>
 #include "ia_css_isys_comm.h"
 
-#ifdef ISP2401
 /**
  * Virtual Input System. (Input System 2401)
  */
 typedef isp2401_input_system_cfg_t     ia_css_isys_descr_t;
 /* end of Virtual Input System */
-#endif
+
 
 input_system_err_t ia_css_isys_init(void);
 void ia_css_isys_uninit(void);
 enum mipi_port_id ia_css_isys_port_to_mipi_port(
     enum mipi_port_id api_port);
 
-#if defined(ISP2401)
 
 /**
  * @brief Register one (virtual) stream. This is used to track when all
@@ -74,9 +72,7 @@ int ia_css_isys_convert_compressed_format(
     struct isp2401_input_system_cfg_s *cfg);
 unsigned int ia_css_csi2_calculate_input_system_alignment(
     enum atomisp_input_format fmt_type);
-#endif
 
-#if !defined(ISP2401)
 /* CSS Receiver */
 void ia_css_isys_rx_configure(
     const rx_cfg_t *config,
@@ -93,7 +89,6 @@ void ia_css_isys_rx_clear_irq_info(enum mipi_port_id port,
                                   unsigned int irq_infos);
 unsigned int ia_css_isys_rx_translate_irq_infos(unsigned int bits);
 
-#endif /* #if !defined(ISP2401) */
 
 /* @brief Translate format and compression to format type.
  *
@@ -111,7 +106,6 @@ int ia_css_isys_convert_stream_format_to_mipi_format(
     mipi_predictor_t compression,
     unsigned int *fmt_type);
 
-#ifdef ISP2401
 /**
  * Virtual Input System. (Input System 2401)
  */
@@ -178,6 +172,5 @@ void ia_css_isys_stream2mmio_sid_rmgr_release(
     stream2mmio_sid_ID_t       *sid);
 
 /* end of Virtual Input System */
-#endif
 
 #endif                         /* __IA_CSS_ISYS_H__ */
index d80ef42c7a642b52d7aa6c5b2d7c36c36437a01a..784afc82c8d2f9ef1d01b83f9d4b713ad4d25aaa 100644 (file)
@@ -19,7 +19,6 @@
 #include <type_support.h>
 #include <input_system.h>
 
-#ifdef ISP2401
 #include <platform_support.h>          /* inline */
 #include <input_system_global.h>
 #include <ia_css_stream_public.h>      /* IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH */
@@ -50,5 +49,4 @@ static inline uint32_t ia_css_isys_generate_stream_id(
        return sp_thread_id * IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH + stream_id;
 }
 
-#endif  /* ISP2401*/
 #endif  /*_IA_CSS_ISYS_COMM_H */
index 3fc9fed1e516d0a67498c5579aea01b6cf46d276..881036c67baf626821f80ce6b01a44f1e44d19ca 100644 (file)
@@ -15,7 +15,6 @@
 
 #include "system_global.h"
 
-#ifdef ISP2401
 
 #include "assert_support.h"
 #include "platform_support.h"
@@ -165,4 +164,3 @@ int ia_css_isys_csi_rx_unregister_stream(
        }
        return retval;
 }
-#endif
index 261c6460e9705f8fc0483fa11c0d0dcfbff69dd4..4df0a9188ee66e7f89147c806b196d08e18dce7a 100644 (file)
@@ -15,7 +15,6 @@
 
 #include "system_global.h"
 
-#ifdef ISP2401
 
 #include "assert_support.h"
 #include "platform_support.h"
@@ -85,4 +84,3 @@ void ia_css_isys_dma_channel_rmgr_release(
                }
        }
 }
-#endif
index d0a43c44963cd2a53285a08431a0a3602bc8c010..18bfe10109892219e9c3e0b31b21f079628b75b4 100644 (file)
 #include "ia_css_isys.h"
 #include "platform_support.h"
 
-#ifdef ISP2401
 #include "isys_dma_public.h"   /* isys2401_dma_set_max_burst_size() */
 #include "isys_irq.h"
-#endif
 
-#if !defined(ISP2401)
-input_system_err_t ia_css_isys_init(void)
+static input_system_err_t ia_css_isys_2400_init(void)
 {
        backend_channel_cfg_t backend_ch0;
        backend_channel_cfg_t backend_ch1;
@@ -86,8 +83,8 @@ input_system_err_t ia_css_isys_init(void)
 
        return error;
 }
-#elif defined(ISP2401)
-input_system_err_t ia_css_isys_init(void)
+
+static input_system_err_t ia_css_isys_2401_init(void)
 {
        ia_css_isys_csi_rx_lut_rmgr_init();
        ia_css_isys_ibuf_rmgr_init();
@@ -104,19 +101,21 @@ input_system_err_t ia_css_isys_init(void)
 
        return INPUT_SYSTEM_ERR_NO_ERROR;
 }
-#endif
 
-#if !defined(ISP2401)
-void ia_css_isys_uninit(void)
+input_system_err_t ia_css_isys_init(void)
 {
+       if (IS_ISP2401)
+               return ia_css_isys_2401_init();
+
+       return ia_css_isys_2400_init();
 }
-#elif defined(ISP2401)
+
 void ia_css_isys_uninit(void)
 {
-       ia_css_isys_csi_rx_lut_rmgr_uninit();
-       ia_css_isys_ibuf_rmgr_uninit();
-       ia_css_isys_dma_channel_rmgr_uninit();
-       ia_css_isys_stream2mmio_sid_rmgr_uninit();
+       if (IS_ISP2401) {
+               ia_css_isys_csi_rx_lut_rmgr_uninit();
+               ia_css_isys_ibuf_rmgr_uninit();
+               ia_css_isys_dma_channel_rmgr_uninit();
+               ia_css_isys_stream2mmio_sid_rmgr_uninit();
+       }
 }
-#endif
-
index fb0cb183f701799880e9b46f12d849512cf339f0..b6be63746c3eed83d4d83429039fd2d9f084f059 100644 (file)
@@ -15,7 +15,6 @@
 
 #include "system_global.h"
 
-#ifdef ISP2401
 
 #include "assert_support.h"
 #include "platform_support.h"
@@ -87,4 +86,3 @@ void ia_css_isys_stream2mmio_sid_rmgr_release(
                }
        }
 }
-#endif
index af153c3fb86d357c446500c4ac16db667414019f..deb4130f710cd56fc8fbbc4e9a3f5164020bde67 100644 (file)
@@ -20,7 +20,6 @@
 #include "ia_css_irq.h"
 #include "sh_css_internal.h"
 
-#if !defined(ISP2401)
 void ia_css_isys_rx_enable_all_interrupts(enum mipi_port_id port)
 {
        hrt_data bits = receiver_port_reg_load(RX0_ID,
@@ -209,144 +208,158 @@ void ia_css_isys_rx_clear_irq_info(enum mipi_port_id port,
 
        return;
 }
-#endif /* #if !defined(ISP2401) */
 
-int ia_css_isys_convert_stream_format_to_mipi_format(
-    enum atomisp_input_format input_format,
-    mipi_predictor_t compression,
-    unsigned int *fmt_type)
+static int ia_css_isys_2400_set_fmt_type(enum atomisp_input_format input_format,
+                                        unsigned int *fmt_type)
 {
-       assert(fmt_type);
-       /*
-        * Custom (user defined) modes. Used for compressed
-        * MIPI transfers
-        *
-        * Checkpatch thinks the indent before "if" is suspect
-        * I think the only suspect part is the missing "else"
-        * because of the return.
-        */
-       if (compression != MIPI_PREDICTOR_NONE) {
-               switch (input_format) {
-               case ATOMISP_INPUT_FORMAT_RAW_6:
-                       *fmt_type = 6;
-                       break;
-               case ATOMISP_INPUT_FORMAT_RAW_7:
-                       *fmt_type = 7;
-                       break;
-               case ATOMISP_INPUT_FORMAT_RAW_8:
-                       *fmt_type = 8;
-                       break;
-               case ATOMISP_INPUT_FORMAT_RAW_10:
-                       *fmt_type = 10;
-                       break;
-               case ATOMISP_INPUT_FORMAT_RAW_12:
-                       *fmt_type = 12;
-                       break;
-               case ATOMISP_INPUT_FORMAT_RAW_14:
-                       *fmt_type = 14;
-                       break;
-               case ATOMISP_INPUT_FORMAT_RAW_16:
-                       *fmt_type = 16;
-                       break;
-               default:
-                       return -EINVAL;
-               }
-               return 0;
-       }
-       /*
-        * This mapping comes from the Arasan CSS function spec
-        * (CSS_func_spec1.08_ahb_sep29_08.pdf).
-        *
-        * MW: For some reason the mapping is not 1-to-1
-        */
        switch (input_format) {
        case ATOMISP_INPUT_FORMAT_RGB_888:
-               *fmt_type = MIPI_FORMAT_RGB888;
+               *fmt_type = MIPI_FORMAT_2400_RGB888;
                break;
        case ATOMISP_INPUT_FORMAT_RGB_555:
-               *fmt_type = MIPI_FORMAT_RGB555;
+               *fmt_type = MIPI_FORMAT_2400_RGB555;
                break;
        case ATOMISP_INPUT_FORMAT_RGB_444:
-               *fmt_type = MIPI_FORMAT_RGB444;
+               *fmt_type = MIPI_FORMAT_2400_RGB444;
                break;
        case ATOMISP_INPUT_FORMAT_RGB_565:
-               *fmt_type = MIPI_FORMAT_RGB565;
+               *fmt_type = MIPI_FORMAT_2400_RGB565;
                break;
        case ATOMISP_INPUT_FORMAT_RGB_666:
-               *fmt_type = MIPI_FORMAT_RGB666;
+               *fmt_type = MIPI_FORMAT_2400_RGB666;
                break;
        case ATOMISP_INPUT_FORMAT_RAW_8:
-               *fmt_type = MIPI_FORMAT_RAW8;
+               *fmt_type = MIPI_FORMAT_2400_RAW8;
                break;
        case ATOMISP_INPUT_FORMAT_RAW_10:
-               *fmt_type = MIPI_FORMAT_RAW10;
+               *fmt_type = MIPI_FORMAT_2400_RAW10;
                break;
        case ATOMISP_INPUT_FORMAT_RAW_6:
-               *fmt_type = MIPI_FORMAT_RAW6;
+               *fmt_type = MIPI_FORMAT_2400_RAW6;
                break;
        case ATOMISP_INPUT_FORMAT_RAW_7:
-               *fmt_type = MIPI_FORMAT_RAW7;
+               *fmt_type = MIPI_FORMAT_2400_RAW7;
                break;
        case ATOMISP_INPUT_FORMAT_RAW_12:
-               *fmt_type = MIPI_FORMAT_RAW12;
+               *fmt_type = MIPI_FORMAT_2400_RAW12;
                break;
        case ATOMISP_INPUT_FORMAT_RAW_14:
-               *fmt_type = MIPI_FORMAT_RAW14;
+               *fmt_type = MIPI_FORMAT_2400_RAW14;
                break;
        case ATOMISP_INPUT_FORMAT_YUV420_8:
-               *fmt_type = MIPI_FORMAT_YUV420_8;
+               *fmt_type = MIPI_FORMAT_2400_YUV420_8;
                break;
        case ATOMISP_INPUT_FORMAT_YUV420_10:
-               *fmt_type = MIPI_FORMAT_YUV420_10;
+               *fmt_type = MIPI_FORMAT_2400_YUV420_10;
                break;
        case ATOMISP_INPUT_FORMAT_YUV422_8:
-               *fmt_type = MIPI_FORMAT_YUV422_8;
+               *fmt_type = MIPI_FORMAT_2400_YUV422_8;
                break;
        case ATOMISP_INPUT_FORMAT_YUV422_10:
-               *fmt_type = MIPI_FORMAT_YUV422_10;
+               *fmt_type = MIPI_FORMAT_2400_YUV422_10;
                break;
        case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
-               *fmt_type = MIPI_FORMAT_YUV420_8_LEGACY;
+               *fmt_type = MIPI_FORMAT_2400_YUV420_8_LEGACY;
                break;
        case ATOMISP_INPUT_FORMAT_EMBEDDED:
-               *fmt_type = MIPI_FORMAT_EMBEDDED;
+               *fmt_type = MIPI_FORMAT_2400_EMBEDDED;
                break;
-#ifndef ISP2401
        case ATOMISP_INPUT_FORMAT_RAW_16:
                /* This is not specified by Arasan, so we use
                 * 17 for now.
                 */
-               *fmt_type = MIPI_FORMAT_RAW16;
+               *fmt_type = MIPI_FORMAT_2400_RAW16;
                break;
        case ATOMISP_INPUT_FORMAT_BINARY_8:
-               *fmt_type = MIPI_FORMAT_BINARY_8;
+               *fmt_type = MIPI_FORMAT_2400_CUSTOM0;
+               break;
+       case ATOMISP_INPUT_FORMAT_YUV420_16:
+       case ATOMISP_INPUT_FORMAT_YUV422_16:
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int ia_css_isys_2401_set_fmt_type(enum atomisp_input_format input_format,
+                                        unsigned int *fmt_type)
+{
+       switch (input_format) {
+       case ATOMISP_INPUT_FORMAT_RGB_888:
+               *fmt_type = MIPI_FORMAT_2401_RGB888;
+               break;
+       case ATOMISP_INPUT_FORMAT_RGB_555:
+               *fmt_type = MIPI_FORMAT_2401_RGB555;
+               break;
+       case ATOMISP_INPUT_FORMAT_RGB_444:
+               *fmt_type = MIPI_FORMAT_2401_RGB444;
+               break;
+       case ATOMISP_INPUT_FORMAT_RGB_565:
+               *fmt_type = MIPI_FORMAT_2401_RGB565;
+               break;
+       case ATOMISP_INPUT_FORMAT_RGB_666:
+               *fmt_type = MIPI_FORMAT_2401_RGB666;
+               break;
+       case ATOMISP_INPUT_FORMAT_RAW_8:
+               *fmt_type = MIPI_FORMAT_2401_RAW8;
+               break;
+       case ATOMISP_INPUT_FORMAT_RAW_10:
+               *fmt_type = MIPI_FORMAT_2401_RAW10;
+               break;
+       case ATOMISP_INPUT_FORMAT_RAW_6:
+               *fmt_type = MIPI_FORMAT_2401_RAW6;
+               break;
+       case ATOMISP_INPUT_FORMAT_RAW_7:
+               *fmt_type = MIPI_FORMAT_2401_RAW7;
+               break;
+       case ATOMISP_INPUT_FORMAT_RAW_12:
+               *fmt_type = MIPI_FORMAT_2401_RAW12;
+               break;
+       case ATOMISP_INPUT_FORMAT_RAW_14:
+               *fmt_type = MIPI_FORMAT_2401_RAW14;
+               break;
+       case ATOMISP_INPUT_FORMAT_YUV420_8:
+               *fmt_type = MIPI_FORMAT_2401_YUV420_8;
+               break;
+       case ATOMISP_INPUT_FORMAT_YUV420_10:
+               *fmt_type = MIPI_FORMAT_2401_YUV420_10;
+               break;
+       case ATOMISP_INPUT_FORMAT_YUV422_8:
+               *fmt_type = MIPI_FORMAT_2401_YUV422_8;
+               break;
+       case ATOMISP_INPUT_FORMAT_YUV422_10:
+               *fmt_type = MIPI_FORMAT_2401_YUV422_10;
+               break;
+       case ATOMISP_INPUT_FORMAT_YUV420_8_LEGACY:
+               *fmt_type = MIPI_FORMAT_2401_YUV420_8_LEGACY;
+               break;
+       case ATOMISP_INPUT_FORMAT_EMBEDDED:
+               *fmt_type = MIPI_FORMAT_2401_EMBEDDED;
                break;
-#else
        case ATOMISP_INPUT_FORMAT_USER_DEF1:
-               *fmt_type = MIPI_FORMAT_CUSTOM0;
+               *fmt_type = MIPI_FORMAT_2401_CUSTOM0;
                break;
        case ATOMISP_INPUT_FORMAT_USER_DEF2:
-               *fmt_type = MIPI_FORMAT_CUSTOM1;
+               *fmt_type = MIPI_FORMAT_2401_CUSTOM1;
                break;
        case ATOMISP_INPUT_FORMAT_USER_DEF3:
-               *fmt_type = MIPI_FORMAT_CUSTOM2;
+               *fmt_type = MIPI_FORMAT_2401_CUSTOM2;
                break;
        case ATOMISP_INPUT_FORMAT_USER_DEF4:
-               *fmt_type = MIPI_FORMAT_CUSTOM3;
+               *fmt_type = MIPI_FORMAT_2401_CUSTOM3;
                break;
        case ATOMISP_INPUT_FORMAT_USER_DEF5:
-               *fmt_type = MIPI_FORMAT_CUSTOM4;
+               *fmt_type = MIPI_FORMAT_2401_CUSTOM4;
                break;
        case ATOMISP_INPUT_FORMAT_USER_DEF6:
-               *fmt_type = MIPI_FORMAT_CUSTOM5;
+               *fmt_type = MIPI_FORMAT_2401_CUSTOM5;
                break;
        case ATOMISP_INPUT_FORMAT_USER_DEF7:
-               *fmt_type = MIPI_FORMAT_CUSTOM6;
+               *fmt_type = MIPI_FORMAT_2401_CUSTOM6;
                break;
        case ATOMISP_INPUT_FORMAT_USER_DEF8:
-               *fmt_type = MIPI_FORMAT_CUSTOM7;
+               *fmt_type = MIPI_FORMAT_2401_CUSTOM7;
                break;
-#endif
 
        case ATOMISP_INPUT_FORMAT_YUV420_16:
        case ATOMISP_INPUT_FORMAT_YUV422_16:
@@ -356,7 +369,60 @@ int ia_css_isys_convert_stream_format_to_mipi_format(
        return 0;
 }
 
-#if defined(ISP2401)
+int ia_css_isys_convert_stream_format_to_mipi_format(
+    enum atomisp_input_format input_format,
+    mipi_predictor_t compression,
+    unsigned int *fmt_type)
+{
+       assert(fmt_type);
+       /*
+        * Custom (user defined) modes. Used for compressed
+        * MIPI transfers
+        *
+        * Checkpatch thinks the indent before "if" is suspect
+        * I think the only suspect part is the missing "else"
+        * because of the return.
+        */
+       if (compression != MIPI_PREDICTOR_NONE) {
+               switch (input_format) {
+               case ATOMISP_INPUT_FORMAT_RAW_6:
+                       *fmt_type = 6;
+                       break;
+               case ATOMISP_INPUT_FORMAT_RAW_7:
+                       *fmt_type = 7;
+                       break;
+               case ATOMISP_INPUT_FORMAT_RAW_8:
+                       *fmt_type = 8;
+                       break;
+               case ATOMISP_INPUT_FORMAT_RAW_10:
+                       *fmt_type = 10;
+                       break;
+               case ATOMISP_INPUT_FORMAT_RAW_12:
+                       *fmt_type = 12;
+                       break;
+               case ATOMISP_INPUT_FORMAT_RAW_14:
+                       *fmt_type = 14;
+                       break;
+               case ATOMISP_INPUT_FORMAT_RAW_16:
+                       *fmt_type = 16;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               return 0;
+       }
+       /*
+        * This mapping comes from the Arasan CSS function spec
+        * (CSS_func_spec1.08_ahb_sep29_08.pdf).
+        *
+        * MW: For some reason the mapping is not 1-to-1
+        */
+       if (IS_ISP2401)
+               return ia_css_isys_2401_set_fmt_type(input_format, fmt_type);
+       else
+               return ia_css_isys_2400_set_fmt_type(input_format, fmt_type);
+}
+
 static mipi_predictor_t sh_css_csi2_compression_type_2_mipi_predictor(
     enum ia_css_csi2_compression_type type)
 {
@@ -473,9 +539,7 @@ unsigned int ia_css_csi2_calculate_input_system_alignment(
        return memory_alignment_in_bytes;
 }
 
-#endif
 
-#if !defined(ISP2401)
 static const mipi_lane_cfg_t MIPI_PORT_LANES[N_RX_MODE][N_MIPI_PORT_ID] = {
        {MIPI_4LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG},
        {MIPI_3LANE_CFG, MIPI_1LANE_CFG, MIPI_0LANE_CFG},
@@ -597,4 +661,3 @@ void ia_css_isys_rx_disable(void)
        }
        return;
 }
-#endif /* if !defined(ISP2401) */
index 8fc7746f86390eddf2378aa899a050297939d7f1..269a81190577822e84ec475dbc7c5d1b8b29b87d 100644 (file)
@@ -17,7 +17,6 @@
 
 #include "system_global.h"
 
-#ifdef ISP2401
 
 #include "ia_css_isys.h"
 #include "ia_css_debug.h"
@@ -689,7 +688,7 @@ static bool calculate_be_cfg(
                cfg->csi_mipi_cfg.comp_scheme = isys_cfg->csi_port_attr.comp_scheme;
                cfg->csi_mipi_cfg.comp_predictor = isys_cfg->csi_port_attr.comp_predictor;
                cfg->csi_mipi_cfg.comp_bit_idx = cfg->csi_mipi_cfg.data_type -
-                                                MIPI_FORMAT_CUSTOM0;
+                                                MIPI_FORMAT_2401_CUSTOM0;
        }
 
        return true;
@@ -856,14 +855,13 @@ static csi_mipi_packet_type_t get_csi_mipi_packet_type(
 
        packet_type = CSI_MIPI_PACKET_TYPE_RESERVED;
 
-       if (data_type >= 0 && data_type <= MIPI_FORMAT_SHORT8)
+       if (data_type >= 0 && data_type <= MIPI_FORMAT_2401_SHORT8)
                packet_type = CSI_MIPI_PACKET_TYPE_SHORT;
 
-       if (data_type > MIPI_FORMAT_SHORT8 && data_type <= N_MIPI_FORMAT)
+       if (data_type > MIPI_FORMAT_2401_SHORT8 && data_type <= N_MIPI_FORMAT_2401)
                packet_type = CSI_MIPI_PACKET_TYPE_LONG;
 
        return packet_type;
 }
 
 /* end of Private Methods */
-#endif
index 222c381ff3b9a32bee65336166b7b47bb656a69e..5f5dab7252aad49e77bd34e5f61563f181a8c5f6 100644 (file)
@@ -241,7 +241,6 @@ bool ia_css_pipeline_uses_params(struct ia_css_pipeline *pipeline);
  */
 bool ia_css_pipeline_get_sp_thread_id(unsigned int key, unsigned int *val);
 
-#if defined(ISP2401)
 /**
  * @brief Get the pipeline io status
  *
@@ -250,7 +249,6 @@ bool ia_css_pipeline_get_sp_thread_id(unsigned int key, unsigned int *val);
  *     Pointer to pipe_io_status
  */
 struct sh_css_sp_pipeline_io_status *ia_css_pipeline_get_pipe_io_status(void);
-#endif
 
 /**
  * @brief Map an SP thread to this pipeline
index e9e187649a657847d3a8303b82eb6c9dcd395642..3d8741e7d5ca792d484200b1905c17b76dd3bc07 100644 (file)
@@ -454,12 +454,10 @@ bool ia_css_pipeline_has_stopped(struct ia_css_pipeline *pipeline)
        return sp_group.pipe[thread_id].num_stages == 0;
 }
 
-#if defined(ISP2401)
 struct sh_css_sp_pipeline_io_status *ia_css_pipeline_get_pipe_io_status(void)
 {
        return(&sh_css_sp_group.pipe_io_status);
 }
-#endif
 
 bool ia_css_pipeline_is_mapped(unsigned int key)
 {
index 4b3fa6d93fe0ab61f366628baa252d389ea3dd54..f35c90809414cb6d5e235e16ebdca5dd3ecb047d 100644 (file)
@@ -56,9 +56,7 @@
 #include "assert_support.h"
 #include "math_support.h"
 #include "sw_event_global.h"                   /* Event IDs.*/
-#if !defined(ISP2401)
 #include "ia_css_ifmtr.h"
-#endif
 #include "input_system.h"
 #include "mmu_device.h"                /* mmu_set_page_table_base_index(), ... */
 #include "ia_css_mmu_private.h" /* sh_css_mmu_set_page_table_base_index() */
@@ -345,7 +343,6 @@ static struct sh_css_hmm_buffer_record
 *sh_css_hmm_buffer_record_validate(ia_css_ptr ddr_buffer_addr,
                                   enum ia_css_buffer_type type);
 
-#ifdef ISP2401
 static unsigned int get_crop_lines_for_bayer_order(const struct
        ia_css_stream_config *config);
 static unsigned int get_crop_columns_for_bayer_order(const struct
@@ -353,8 +350,6 @@ static unsigned int get_crop_columns_for_bayer_order(const struct
 static void get_pipe_extra_pixel(struct ia_css_pipe *pipe,
                                 unsigned int *extra_row, unsigned int *extra_column);
 
-#endif
-
 static void
 sh_css_pipe_free_shading_table(struct ia_css_pipe *pipe)
 {
@@ -472,9 +467,8 @@ ia_css_stream_input_format_bits_per_pixel(struct ia_css_stream *stream)
 /* TODO: move define to proper file in tools */
 #define GP_ISEL_TPG_MODE 0x90058
 
-#if !defined(ISP2401)
 static int
-sh_css_config_input_network(struct ia_css_stream *stream)
+sh_css_config_input_network_2400(struct ia_css_stream *stream)
 {
        unsigned int fmt_type;
        struct ia_css_pipe *pipe = stream->last_pipe;
@@ -528,7 +522,7 @@ sh_css_config_input_network(struct ia_css_stream *stream)
                            "sh_css_config_input_network() leave:\n");
        return 0;
 }
-#elif defined(ISP2401)
+
 static unsigned int csi2_protocol_calculate_max_subpixels_per_line(
     enum atomisp_input_format  format,
     unsigned int                       pixels_per_line)
@@ -824,9 +818,10 @@ static bool sh_css_translate_stream_cfg_to_input_system_input_port_attr(
                    stream_cfg->source.port.num_lanes;
                isys_stream_descr->csi_port_attr.fmt_type = fmt_type;
                isys_stream_descr->csi_port_attr.ch_id = stream_cfg->channel_id;
-#ifdef ISP2401
-               isys_stream_descr->online = stream_cfg->online;
-#endif
+
+               if (IS_ISP2401)
+                       isys_stream_descr->online = stream_cfg->online;
+
                err |= ia_css_isys_convert_compressed_format(
                           &stream_cfg->source.port.compression,
                           isys_stream_descr);
@@ -849,15 +844,15 @@ static bool sh_css_translate_stream_cfg_to_input_system_input_port_attr(
                            stream_cfg->metadata_config.resolution.width;
                        isys_stream_descr->metadata.lines_per_frame =
                            stream_cfg->metadata_config.resolution.height;
-#ifdef ISP2401
+
                        /*
                         * For new input system, number of str2mmio requests must be even.
                         * So we round up number of metadata lines to be even.
                         */
-                       if (isys_stream_descr->metadata.lines_per_frame > 0)
+                       if (IS_ISP2401 && isys_stream_descr->metadata.lines_per_frame > 0)
                                isys_stream_descr->metadata.lines_per_frame +=
                                    (isys_stream_descr->metadata.lines_per_frame & 1);
-#endif
+
                        isys_stream_descr->metadata.align_req_in_bytes =
                            ia_css_csi2_calculate_input_system_alignment(
                                stream_cfg->metadata_config.data_type);
@@ -972,7 +967,7 @@ static bool sh_css_translate_binary_info_to_input_system_output_port_attr(
 }
 
 static int
-sh_css_config_input_network(struct ia_css_stream *stream)
+sh_css_config_input_network_2401(struct ia_css_stream *stream)
 {
        bool                                    rc;
        ia_css_isys_descr_t                     isys_stream_descr;
@@ -1181,7 +1176,6 @@ static inline int stream_unregister_with_csi_rx(
 {
        return stream_csi_rx_helper(stream, ia_css_isys_csi_rx_unregister_stream);
 }
-#endif
 
 
 static void
@@ -1194,14 +1188,11 @@ start_binary(struct ia_css_pipe *pipe,
        if (binary)
                sh_css_metrics_start_binary(&binary->metrics);
 
-
-#if !defined(ISP2401)
-       if (pipe->stream->reconfigure_css_rx) {
+       if (!IS_ISP2401 && pipe->stream->reconfigure_css_rx) {
                ia_css_isys_rx_configure(&pipe->stream->csi_rx_config,
                                         pipe->stream->config.mode);
                pipe->stream->reconfigure_css_rx = false;
        }
-#endif
 }
 
 /* start the copy function on the SP */
@@ -1214,22 +1205,18 @@ start_copy_on_sp(struct ia_css_pipe *pipe,
        if ((!pipe) || (!pipe->stream))
                return -EINVAL;
 
-#if !defined(ISP2401)
-       if (pipe->stream->reconfigure_css_rx)
+       if (!IS_ISP2401 && pipe->stream->reconfigure_css_rx)
                ia_css_isys_rx_disable();
-#endif
 
        if (pipe->stream->config.input_config.format != ATOMISP_INPUT_FORMAT_BINARY_8)
                return -EINVAL;
        sh_css_sp_start_binary_copy(ia_css_pipe_get_pipe_num(pipe), out_frame, pipe->stream->config.pixels_per_clock == 2);
 
-#if !defined(ISP2401)
-       if (pipe->stream->reconfigure_css_rx) {
+       if (!IS_ISP2401 && pipe->stream->reconfigure_css_rx) {
                ia_css_isys_rx_configure(&pipe->stream->csi_rx_config,
                                         pipe->stream->config.mode);
                pipe->stream->reconfigure_css_rx = false;
        }
-#endif
 
        return 0;
 }
@@ -1311,9 +1298,7 @@ sh_css_invalidate_shading_tables(struct ia_css_stream *stream)
 static void
 enable_interrupts(enum ia_css_irq_type irq_type)
 {
-#ifndef ISP2401
        enum mipi_port_id port;
-#endif
        bool enable_pulse = irq_type != IA_CSS_IRQ_TYPE_EDGE;
 
        IA_CSS_ENTER_PRIVATE("");
@@ -1334,10 +1319,10 @@ enable_interrupts(enum ia_css_irq_type irq_type)
            (enum virq_id)(IRQ_SW_CHANNEL1_ID + IRQ_SW_CHANNEL_OFFSET),
            true);
 
-#ifndef ISP2401
-       for (port = 0; port < N_MIPI_PORT_ID; port++)
-               ia_css_isys_rx_enable_all_interrupts(port);
-#endif
+       if (!IS_ISP2401) {
+               for (port = 0; port < N_MIPI_PORT_ID; port++)
+                       ia_css_isys_rx_enable_all_interrupts(port);
+       }
 
        IA_CSS_LEAVE_PRIVATE("");
 }
@@ -2173,10 +2158,10 @@ ia_css_uninit(void)
 
        ia_css_rmgr_uninit();
 
-#if !defined(ISP2401)
-       /* needed for reprogramming the inputformatter after power cycle of css */
-       ifmtr_set_if_blocking_mode_reset = true;
-#endif
+       if (!IS_ISP2401) {
+               /* needed for reprogramming the inputformatter after power cycle of css */
+               ifmtr_set_if_blocking_mode_reset = true;
+       }
 
        if (!fw_explicitly_loaded)
                ia_css_unload_firmware();
@@ -2957,7 +2942,6 @@ init_vf_frameinfo_defaults(struct ia_css_pipe *pipe,
        return err;
 }
 
-#ifdef ISP2401
 static unsigned int
 get_crop_lines_for_bayer_order(const struct ia_css_stream_config *config)
 {
@@ -3059,11 +3043,11 @@ ia_css_get_crop_offsets(
                             pipe->config.input_effective_res.height);
 
        input_res = &pipe->stream->config.input_config.input_res;
-#ifndef ISP2401
-       effective_res = &pipe->stream->config.input_config.effective_res;
-#else
-       effective_res = &pipe->config.input_effective_res;
-#endif
+
+       if (IS_ISP2401)
+               effective_res = &pipe->config.input_effective_res;
+       else
+               effective_res = &pipe->stream->config.input_config.effective_res;
 
        get_pipe_extra_pixel(pipe, &extra_row, &extra_col);
 
@@ -3101,7 +3085,6 @@ ia_css_get_crop_offsets(
 
        return;
 }
-#endif
 
 static int
 init_in_frameinfo_memory_defaults(struct ia_css_pipe *pipe,
@@ -3132,9 +3115,10 @@ init_in_frameinfo_memory_defaults(struct ia_css_pipe *pipe,
        ia_css_query_internal_queue_id(IA_CSS_BUFFER_TYPE_INPUT_FRAME, thread_id, &queue_id);
        in_frame->dynamic_queue_id = queue_id;
        in_frame->buf_type = IA_CSS_BUFFER_TYPE_INPUT_FRAME;
-#ifdef ISP2401
-       ia_css_get_crop_offsets(pipe, &in_frame->frame_info);
-#endif
+
+       if (IS_ISP2401)
+               ia_css_get_crop_offsets(pipe, &in_frame->frame_info);
+
        err = ia_css_frame_init_planes(in_frame);
 
        ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE_PRIVATE, "%s() bayer_order = %d\n",
@@ -4473,7 +4457,6 @@ ia_css_stream_get_buffer_depth(struct ia_css_stream *stream,
        return 0;
 }
 
-#if !defined(ISP2401)
 unsigned int
 sh_css_get_mipi_sizes_for_check(const unsigned int port, const unsigned int idx)
 {
@@ -4484,7 +4467,6 @@ sh_css_get_mipi_sizes_for_check(const unsigned int port, const unsigned int idx)
                            port, idx, my_css.mipi_sizes_for_check[port][idx]);
        return my_css.mipi_sizes_for_check[port][idx];
 }
-#endif
 
 static int sh_css_pipe_configure_output(
     struct ia_css_pipe *pipe,
@@ -7369,7 +7351,7 @@ static int capture_start(struct ia_css_pipe *pipe)
                }
        }
        /* old isys: need to send_mipi_frames() in all pipe modes */
-       if (!IS_ISP2401 || (IS_ISP2401 && pipe->config.mode != IA_CSS_PIPE_MODE_COPY)) {
+       if (!IS_ISP2401 || pipe->config.mode != IA_CSS_PIPE_MODE_COPY) {
                err = send_mipi_frames(pipe);
                if (err) {
                        IA_CSS_LEAVE_ERR_PRIVATE(err);
@@ -7382,19 +7364,18 @@ static int capture_start(struct ia_css_pipe *pipe)
 
        start_pipe(pipe, copy_ovrd, pipe->stream->config.mode);
 
-#if !defined(ISP2401)
        /*
         * old isys: for IA_CSS_PIPE_MODE_COPY pipe, isys rx has to be configured,
         * which is currently done in start_binary(); but COPY pipe contains no binary,
         * and does not call start_binary(); so we need to configure the rx here.
         */
-       if (pipe->config.mode == IA_CSS_PIPE_MODE_COPY &&
+       if (!IS_ISP2401 &&
+           pipe->config.mode == IA_CSS_PIPE_MODE_COPY &&
            pipe->stream->reconfigure_css_rx) {
                ia_css_isys_rx_configure(&pipe->stream->csi_rx_config,
                                         pipe->stream->config.mode);
                pipe->stream->reconfigure_css_rx = false;
        }
-#endif
 
        IA_CSS_LEAVE_ERR_PRIVATE(err);
        return err;
@@ -7616,20 +7597,15 @@ void ia_css_stream_request_flash(struct ia_css_stream *stream)
        ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
                            "ia_css_stream_request_flash() enter: void\n");
 
-#ifndef ISP2401
-       sh_css_write_host2sp_command(host2sp_cmd_start_flash);
-#else
-       if (sh_css_sp_is_running()) {
-               if (!sh_css_write_host2sp_command(host2sp_cmd_start_flash)) {
+       if (!IS_ISP2401 || sh_css_sp_is_running()) {
+               if (!sh_css_write_host2sp_command(host2sp_cmd_start_flash) && IS_ISP2401) {
                        IA_CSS_ERROR("Call to 'sh-css_write_host2sp_command()' failed");
                        ia_css_debug_dump_sp_sw_debug_info();
-                       ia_css_debug_dump_debug_info(NULL);
                }
        } else {
                IA_CSS_LOG("SP is not running!");
        }
 
-#endif
        ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
                            "ia_css_stream_request_flash() leave: return_void\n");
 }
@@ -7974,7 +7950,6 @@ ia_css_pipe_override_frame_format(struct ia_css_pipe *pipe,
        return err;
 }
 
-#if !defined(ISP2401)
 /* Configuration of INPUT_SYSTEM_VERSION_2401 is done on SP */
 static int
 ia_css_stream_configure_rx(struct ia_css_stream *stream)
@@ -8017,7 +7992,6 @@ ia_css_stream_configure_rx(struct ia_css_stream *stream)
        stream->reconfigure_css_rx = true;
        return 0;
 }
-#endif
 
 static struct ia_css_pipe *
 find_pipe(struct ia_css_pipe *pipes[], unsigned int num_pipes,
@@ -8103,9 +8077,7 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
 
        /* check if mipi size specified */
        if (stream_config->mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)
-#ifdef ISP2401
-               if (!stream_config->online)
-#endif
+               if (!IS_ISP2401 || !stream_config->online)
                {
                        unsigned int port = (unsigned int)stream_config->source.port.port;
 
@@ -8206,32 +8178,31 @@ ia_css_stream_create(const struct ia_css_stream_config *stream_config,
        switch (curr_stream->config.mode) {
        case IA_CSS_INPUT_MODE_SENSOR:
        case IA_CSS_INPUT_MODE_BUFFERED_SENSOR:
-#if !defined(ISP2401)
-               ia_css_stream_configure_rx(curr_stream);
-#endif
+               if (!IS_ISP2401)
+                       ia_css_stream_configure_rx(curr_stream);
                break;
        case IA_CSS_INPUT_MODE_TPG:
-#if !defined(ISP2401)
-               IA_CSS_LOG("tpg_configuration: x_mask=%d, y_mask=%d, x_delta=%d, y_delta=%d, xy_mask=%d",
-                          curr_stream->config.source.tpg.x_mask,
-                          curr_stream->config.source.tpg.y_mask,
-                          curr_stream->config.source.tpg.x_delta,
-                          curr_stream->config.source.tpg.y_delta,
-                          curr_stream->config.source.tpg.xy_mask);
-
-               sh_css_sp_configure_tpg(
-                   curr_stream->config.source.tpg.x_mask,
-                   curr_stream->config.source.tpg.y_mask,
-                   curr_stream->config.source.tpg.x_delta,
-                   curr_stream->config.source.tpg.y_delta,
-                   curr_stream->config.source.tpg.xy_mask);
-#endif
+               if (!IS_ISP2401) {
+                       IA_CSS_LOG("tpg_configuration: x_mask=%d, y_mask=%d, x_delta=%d, y_delta=%d, xy_mask=%d",
+                                  curr_stream->config.source.tpg.x_mask,
+                                  curr_stream->config.source.tpg.y_mask,
+                                  curr_stream->config.source.tpg.x_delta,
+                                  curr_stream->config.source.tpg.y_delta,
+                                  curr_stream->config.source.tpg.xy_mask);
+
+                       sh_css_sp_configure_tpg(
+                           curr_stream->config.source.tpg.x_mask,
+                           curr_stream->config.source.tpg.y_mask,
+                           curr_stream->config.source.tpg.x_delta,
+                           curr_stream->config.source.tpg.y_delta,
+                           curr_stream->config.source.tpg.xy_mask);
+               }
                break;
        case IA_CSS_INPUT_MODE_PRBS:
-#if !defined(ISP2401)
-               IA_CSS_LOG("mode prbs");
-               sh_css_sp_configure_prbs(curr_stream->config.source.prbs.seed);
-#endif
+               if (!IS_ISP2401) {
+                       IA_CSS_LOG("mode prbs");
+                       sh_css_sp_configure_prbs(curr_stream->config.source.prbs.seed);
+               }
                break;
        case IA_CSS_INPUT_MODE_MEMORY:
                IA_CSS_LOG("mode memory");
@@ -8473,46 +8444,48 @@ ia_css_stream_destroy(struct ia_css_stream *stream)
 
        if ((stream->last_pipe) &&
            ia_css_pipeline_is_mapped(stream->last_pipe->pipe_num)) {
-#if defined(ISP2401)
-               for (i = 0; i < stream->num_pipes; i++) {
-                       struct ia_css_pipe *entry = stream->pipes[i];
-                       unsigned int sp_thread_id;
-                       struct sh_css_sp_pipeline_terminal *sp_pipeline_input_terminal;
-
-                       assert(entry);
-                       if (entry) {
-                               /* get the SP thread id */
-                               if (!ia_css_pipeline_get_sp_thread_id(
-                                       ia_css_pipe_get_pipe_num(entry), &sp_thread_id))
-                                       return -EINVAL;
-                               /* get the target input terminal */
-                               sp_pipeline_input_terminal =
-                               &sh_css_sp_group.pipe_io[sp_thread_id].input;
-
-                               for (i = 0; i < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; i++) {
-                                       ia_css_isys_stream_h isys_stream =
-                                       &sp_pipeline_input_terminal->context.virtual_input_system_stream[i];
-                                       if (stream->config.isys_config[i].valid && isys_stream->valid)
-                                               ia_css_isys_stream_destroy(isys_stream);
-                               }
-                       }
-               }
-               if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+               if (IS_ISP2401) {
                        for (i = 0; i < stream->num_pipes; i++) {
                                struct ia_css_pipe *entry = stream->pipes[i];
-                               /*
-                                * free any mipi frames that are remaining:
-                                * some test stream create-destroy cycles do
-                                * not generate output frames
-                                * and the mipi buffer is not freed in the
-                                * deque function
-                                */
-                               if (entry)
-                                       free_mipi_frames(entry);
+                               unsigned int sp_thread_id;
+                               struct sh_css_sp_pipeline_terminal *sp_pipeline_input_terminal;
+
+                               assert(entry);
+                               if (entry) {
+                                       /* get the SP thread id */
+                                       if (!ia_css_pipeline_get_sp_thread_id(
+                                                       ia_css_pipe_get_pipe_num(entry), &sp_thread_id))
+                                               return -EINVAL;
+
+                                       /* get the target input terminal */
+                                       sp_pipeline_input_terminal =
+                                               &sh_css_sp_group.pipe_io[sp_thread_id].input;
+
+                                       for (i = 0; i < IA_CSS_STREAM_MAX_ISYS_STREAM_PER_CH; i++) {
+                                               ia_css_isys_stream_h isys_stream =
+                                                       &sp_pipeline_input_terminal->context.virtual_input_system_stream[i];
+                                               if (stream->config.isys_config[i].valid && isys_stream->valid)
+                                                       ia_css_isys_stream_destroy(isys_stream);
+                                       }
+                               }
+                       }
+
+                       if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+                               for (i = 0; i < stream->num_pipes; i++) {
+                                       struct ia_css_pipe *entry = stream->pipes[i];
+                                       /*
+                                        * free any mipi frames that are remaining:
+                                        * some test stream create-destroy cycles do
+                                        * not generate output frames
+                                        * and the mipi buffer is not freed in the
+                                        * deque function
+                                        */
+                                       if (entry)
+                                               free_mipi_frames(entry);
+                               }
                        }
+                       stream_unregister_with_csi_rx(stream);
                }
-               stream_unregister_with_csi_rx(stream);
-#endif
 
                for (i = 0; i < stream->num_pipes; i++) {
                        struct ia_css_pipe *curr_pipe = stream->pipes[i];
@@ -8605,15 +8578,13 @@ ia_css_stream_start(struct ia_css_stream *stream)
                return err;
        }
 
-#if defined(ISP2401)
-       if ((stream->config.mode == IA_CSS_INPUT_MODE_SENSOR) ||
-           (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR))
+       if (IS_ISP2401 &&
+           ((stream->config.mode == IA_CSS_INPUT_MODE_SENSOR) ||
+            (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR)))
                stream_register_with_csi_rx(stream);
-#endif
 
-#if !defined(ISP2401)
        /* Initialize mipi size checks */
-       if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+       if (!IS_ISP2401 && stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
                unsigned int idx;
                unsigned int port = (unsigned int)(stream->config.source.port.port);
 
@@ -8622,10 +8593,12 @@ ia_css_stream_start(struct ia_css_stream *stream)
                        sh_css_get_mipi_sizes_for_check(port, idx);
                }
        }
-#endif
 
        if (stream->config.mode != IA_CSS_INPUT_MODE_MEMORY) {
-               err = sh_css_config_input_network(stream);
+               if (IS_ISP2401)
+                       err = sh_css_config_input_network_2401(stream);
+               else
+                       err = sh_css_config_input_network_2400(stream);
                if (err)
                        return err;
        }
@@ -8646,16 +8619,14 @@ ia_css_stream_stop(struct ia_css_stream *stream)
        ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_stream_stop: stopping %d\n",
                            stream->last_pipe->mode);
 
-#if !defined(ISP2401)
        /* De-initialize mipi size checks */
-       if (stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
+       if (!IS_ISP2401 && stream->config.mode == IA_CSS_INPUT_MODE_BUFFERED_SENSOR) {
                unsigned int idx;
                unsigned int port = (unsigned int)(stream->config.source.port.port);
 
                for (idx = 0; idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT; idx++)
                        sh_css_sp_group.config.mipi_sizes_for_check[port][idx] = 0;
        }
-#endif
 
        err = ia_css_pipeline_request_stop(&stream->last_pipe->pipeline);
        if (err)
@@ -9035,7 +9006,6 @@ ia_css_stop_sp(void)
        if (!sh_css_write_host2sp_command(host2sp_cmd_terminate)) {
                IA_CSS_ERROR("Call to 'sh-css_write_host2sp_command()' failed");
                ia_css_debug_dump_sp_sw_debug_info();
-               ia_css_debug_dump_debug_info(NULL);
        }
 
        sh_css_sp_set_sp_running(false);
index d98f1323441eabec40225426e086b2929ed6404a..2349eb4d3767e7b0d0bec781bfb3882e98696e51 100644 (file)
@@ -22,9 +22,7 @@
 #include <platform_support.h>
 #include <linux/stdarg.h>
 
-#if !defined(ISP2401)
 #include "input_formatter.h"
-#endif
 #include "input_system.h"
 
 #include "ia_css_types.h"
 #define SH_CSS_MAX_IF_CONFIGS  3 /* Must match with IA_CSS_NR_OF_CONFIGS (not defined yet).*/
 #define SH_CSS_IF_CONFIG_NOT_NEEDED    0xFF
 
-/*
- * SH_CSS_MAX_SP_THREADS:
- *      sp threads visible to host with connected communication queues
- *      these threads are capable of running an image pipe
- * SH_CSS_MAX_SP_INTERNAL_THREADS:
- *      internal sp service threads, no communication queues to host
- *      these threads can't be used as image pipe
- */
-
-#if !defined(ISP2401)
-#define SH_CSS_SP_INTERNAL_METADATA_THREAD     1
-#else
-#define SH_CSS_SP_INTERNAL_METADATA_THREAD     0
-#endif
-
-#define SH_CSS_SP_INTERNAL_SERVICE_THREAD              1
-
 #define SH_CSS_MAX_SP_THREADS          5
 
-#define SH_CSS_MAX_SP_INTERNAL_THREADS (\
-        SH_CSS_SP_INTERNAL_SERVICE_THREAD +\
-        SH_CSS_SP_INTERNAL_METADATA_THREAD)
-
-#define SH_CSS_MAX_PIPELINES   SH_CSS_MAX_SP_THREADS
-
 /**
  * The C99 standard does not specify the exact object representation of structs;
  * the representation is compiler dependent.
@@ -357,14 +332,12 @@ struct sh_css_sp_debug_command {
        u32 dma_sw_reg;
 };
 
-#if !defined(ISP2401)
 /* SP input formatter configuration.*/
 struct sh_css_sp_input_formatter_set {
        u32                             stream_format;
        input_formatter_cfg_t   config_a;
        input_formatter_cfg_t   config_b;
 };
-#endif
 
 #define IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT (3)
 
@@ -377,7 +350,7 @@ struct sh_css_sp_config {
             frames are locked when their EOF event is successfully sent to the
             host (true) or when they are passed to the preview/video pipe
             (false). */
-#if !defined(ISP2401)
+
        struct {
                u8                                      a_changed;
                u8                                      b_changed;
@@ -385,15 +358,13 @@ struct sh_css_sp_config {
                struct sh_css_sp_input_formatter_set
                        set[SH_CSS_MAX_IF_CONFIGS]; /* CSI-2 port is used as index. */
        } input_formatter;
-#endif
-#if !defined(ISP2401)
+
        sync_generator_cfg_t    sync_gen;
        tpg_cfg_t               tpg;
        prbs_cfg_t              prbs;
        input_system_cfg_t      input_circuit;
        u8                      input_circuit_cfg_changed;
        u32             mipi_sizes_for_check[N_CSI_PORTS][IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT];
-#endif
        u8                 enable_isys_event_queue;
        u8                      disable_cont_vf;
 };
@@ -409,7 +380,6 @@ enum sh_css_stage_type {
 #define SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS_MASK \
        ((SH_CSS_PIPE_CONFIG_SAMPLE_PARAMS << SH_CSS_MAX_SP_THREADS) - 1)
 
-#if defined(ISP2401)
 struct sh_css_sp_pipeline_terminal {
        union {
                /* Input System 2401 */
@@ -442,7 +412,6 @@ struct sh_css_sp_pipeline_io_status {
        u32     running[N_INPUT_SYSTEM_CSI_PORT];       /** configured streams */
 };
 
-#endif
 enum sh_css_port_dir {
        SH_CSS_PORT_INPUT  = 0,
        SH_CSS_PORT_OUTPUT  = 1
@@ -641,10 +610,8 @@ struct sh_css_sp_stage {
 struct sh_css_sp_group {
        struct sh_css_sp_config         config;
        struct sh_css_sp_pipeline       pipe[SH_CSS_MAX_SP_THREADS];
-#if defined(ISP2401)
        struct sh_css_sp_pipeline_io    pipe_io[SH_CSS_MAX_SP_THREADS];
        struct sh_css_sp_pipeline_io_status     pipe_io_status;
-#endif
        struct sh_css_sp_debug_command  debug;
 };
 
@@ -922,13 +889,11 @@ sh_css_frame_info_set_width(struct ia_css_frame_info *info,
                            unsigned int width,
                            unsigned int aligned);
 
-#if !defined(ISP2401)
 
 unsigned int
 sh_css_get_mipi_sizes_for_check(const unsigned int port,
                                const unsigned int idx);
 
-#endif
 
 ia_css_ptr
 sh_css_store_sp_group_to_ddr(void);
@@ -971,11 +936,9 @@ sh_css_continuous_is_enabled(uint8_t pipe_num);
 struct ia_css_pipe *
 find_pipe_by_num(uint32_t pipe_num);
 
-#ifdef ISP2401
 void
 ia_css_get_crop_offsets(
     struct ia_css_pipe *pipe,
     struct ia_css_frame_info *in_frame);
-#endif
 
 #endif /* _SH_CSS_INTERNAL_H_ */
index ced21dedf7ac9dff6e07a781487d596388826545..b7c1e164ee2449952a88c39e145d5910ee699efd 100644 (file)
@@ -185,35 +185,6 @@ ia_css_mipi_frame_calculate_size(const unsigned int width,
        return err;
 }
 
-/*
- * Check if a source port or TPG/PRBS ID is valid
- */
-
-#if !defined(ISP2401)
-int
-ia_css_mipi_frame_enable_check_on_size(const enum mipi_port_id port,
-                                      const unsigned int       size_mem_words)
-{
-       u32 idx;
-
-       int err = -EBUSY;
-
-       OP___assert(port < N_CSI_PORTS);
-       OP___assert(size_mem_words != 0);
-
-       for (idx = 0; idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT &&
-            my_css.mipi_sizes_for_check[port][idx] != 0;
-            idx++) { /* do nothing */
-       }
-       if (idx < IA_CSS_MIPI_SIZE_CHECK_MAX_NOF_ENTRIES_PER_PORT) {
-               my_css.mipi_sizes_for_check[port][idx] = size_mem_words;
-               err = 0;
-       }
-
-       return err;
-}
-#endif
-
 void
 mipi_init(void)
 {
index 588f2adab058cd81299404fbb540a36fb9228c91..232744973ab887ee586ba7e9b5dfc88c9db7b34f 100644 (file)
@@ -3720,10 +3720,47 @@ struct ia_css_shading_table *ia_css_get_shading_table(struct ia_css_stream
 
 ia_css_ptr sh_css_store_sp_group_to_ddr(void)
 {
+       u8 *write_buf;
+       u8 *buf_ptr;
+
        IA_CSS_ENTER_LEAVE_PRIVATE("void");
+
+       write_buf = kzalloc(sizeof(u8) * 8192, GFP_KERNEL);
+       if (!write_buf)
+               return 0;
+
+       buf_ptr = write_buf;
+       if (IS_ISP2401) {
+               memcpy(buf_ptr, &sh_css_sp_group.config, 3);
+               buf_ptr += 3;
+               *buf_ptr++ = sh_css_sp_group.config.enable_isys_event_queue;
+               *buf_ptr++ = sh_css_sp_group.config.disable_cont_vf;
+               memset(buf_ptr, 0, 3);
+               buf_ptr += 3; /* Padding 3 bytes for struct sh_css_sp_config*/
+       } else {
+               memcpy(buf_ptr, &sh_css_sp_group.config, sizeof(sh_css_sp_group.config));
+               buf_ptr += sizeof(sh_css_sp_group.config);
+       }
+
+       memcpy(buf_ptr, &sh_css_sp_group.pipe, sizeof(sh_css_sp_group.pipe));
+       buf_ptr += sizeof(sh_css_sp_group.pipe);
+
+       if (IS_ISP2401) {
+               memcpy(buf_ptr, &sh_css_sp_group.pipe_io, sizeof(sh_css_sp_group.pipe_io));
+               buf_ptr += sizeof(sh_css_sp_group.pipe_io);
+               memcpy(buf_ptr, &sh_css_sp_group.pipe_io_status,
+                      sizeof(sh_css_sp_group.pipe_io_status));
+               buf_ptr += sizeof(sh_css_sp_group.pipe_io_status);
+       }
+
+       memcpy(buf_ptr, &sh_css_sp_group.debug, sizeof(sh_css_sp_group.debug));
+       buf_ptr += sizeof(sh_css_sp_group.debug);
+
        hmm_store(xmem_sp_group_ptrs,
-                  &sh_css_sp_group,
-                  sizeof(struct sh_css_sp_group));
+                 write_buf,
+                 buf_ptr - write_buf);
+
+       kfree(write_buf);
        return xmem_sp_group_ptrs;
 }
 
index f35c745c22c07d9eb8c5b302b820c738a195300b..cd7f5a3fecaa684b2c1076a7142d3bf373beab03 100644 (file)
@@ -17,9 +17,7 @@
 
 #include "sh_css_sp.h"
 
-#if !defined(ISP2401)
 #include "input_formatter.h"
-#endif
 
 #include "dma.h"       /* N_DMA_CHANNEL_ID */
 
@@ -228,11 +226,8 @@ sh_css_sp_start_binary_copy(unsigned int pipe_num,
        IA_CSS_LOG("pipe_id %d port_config %08x",
                   pipe->pipe_id, pipe->inout_port_config);
 
-#if !defined(ISP2401)
-       sh_css_sp_group.config.input_formatter.isp_2ppc = (uint8_t)two_ppc;
-#else
-       (void)two_ppc;
-#endif
+       if (!IS_ISP2401)
+               sh_css_sp_group.config.input_formatter.isp_2ppc = (uint8_t)two_ppc;
 
        sh_css_sp_stage.num = stage_num;
        sh_css_sp_stage.stage_type = SH_CSS_SP_STAGE_TYPE;
@@ -306,11 +301,8 @@ sh_css_sp_start_raw_copy(struct ia_css_frame *out_frame,
        IA_CSS_LOG("pipe_id %d port_config %08x",
                   pipe->pipe_id, pipe->inout_port_config);
 
-#if !defined(ISP2401)
-       sh_css_sp_group.config.input_formatter.isp_2ppc = (uint8_t)two_ppc;
-#else
-       (void)two_ppc;
-#endif
+       if (!IS_ISP2401)
+               sh_css_sp_group.config.input_formatter.isp_2ppc = (uint8_t)two_ppc;
 
        sh_css_sp_stage.num = stage_num;
        sh_css_sp_stage.xmem_bin_addr = 0x0;
@@ -633,7 +625,6 @@ set_view_finder_buffer(const struct ia_css_frame *frame)
        return 0;
 }
 
-#if !defined(ISP2401)
 void sh_css_sp_set_if_configs(
     const input_formatter_cfg_t        *config_a,
     const input_formatter_cfg_t        *config_b,
@@ -655,9 +646,7 @@ void sh_css_sp_set_if_configs(
 
        return;
 }
-#endif
 
-#if !defined(ISP2401)
 void
 sh_css_sp_program_input_circuit(int fmt_type,
                                int ch_id,
@@ -674,9 +663,7 @@ sh_css_sp_program_input_circuit(int fmt_type,
        sh_css_sp_group.config.input_circuit_cfg_changed = true;
        sh_css_sp_stage.program_input_circuit = true;
 }
-#endif
 
-#if !defined(ISP2401)
 void
 sh_css_sp_configure_sync_gen(int width, int height,
                             int hblank_cycles,
@@ -707,7 +694,6 @@ sh_css_sp_configure_prbs(int seed)
 {
        sh_css_sp_group.config.prbs.seed = seed;
 }
-#endif
 
 void
 sh_css_sp_configure_enable_raw_pool_locking(bool lock_all)
@@ -757,22 +743,18 @@ sh_css_sp_init_group(bool two_ppc,
                     bool no_isp_sync,
                     uint8_t if_config_index)
 {
-#if !defined(ISP2401)
-       sh_css_sp_group.config.input_formatter.isp_2ppc = two_ppc;
-#else
-       (void)two_ppc;
-#endif
+       if (!IS_ISP2401)
+               sh_css_sp_group.config.input_formatter.isp_2ppc = two_ppc;
 
        sh_css_sp_group.config.no_isp_sync = (uint8_t)no_isp_sync;
        /* decide whether the frame is processed online or offline */
        if (if_config_index == SH_CSS_IF_CONFIG_NOT_NEEDED) return;
-#if !defined(ISP2401)
-       assert(if_config_index < SH_CSS_MAX_IF_CONFIGS);
-       sh_css_sp_group.config.input_formatter.set[if_config_index].stream_format =
-           input_format;
-#else
-       (void)input_format;
-#endif
+
+       if (!IS_ISP2401) {
+               assert(if_config_index < SH_CSS_MAX_IF_CONFIGS);
+               sh_css_sp_group.config.input_formatter.set[if_config_index].stream_format =
+                   input_format;
+       }
 }
 
 void
@@ -1031,18 +1013,16 @@ sh_css_sp_init_stage(struct ia_css_binary *binary,
        if (err)
                return err;
 
-#ifdef ISP2401
-       pipe = find_pipe_by_num(sh_css_sp_group.pipe[thread_id].pipe_num);
-       if (!pipe)
-               return -EINVAL;
+       if (IS_ISP2401) {
+               pipe = find_pipe_by_num(sh_css_sp_group.pipe[thread_id].pipe_num);
+               if (!pipe)
+                       return -EINVAL;
 
-       if (args->in_frame)
-               ia_css_get_crop_offsets(pipe, &args->in_frame->frame_info);
-       else
-               ia_css_get_crop_offsets(pipe, &binary->in_frame_info);
-#else
-       (void)pipe; /*avoid build warning*/
-#endif
+               if (args->in_frame)
+                       ia_css_get_crop_offsets(pipe, &args->in_frame->frame_info);
+               else
+                       ia_css_get_crop_offsets(pipe, &binary->in_frame_info);
+       }
 
        err = configure_isp_from_args(&sh_css_sp_group.pipe[thread_id],
                                      binary, args, two_ppc, sh_css_sp_stage.deinterleaved);
index f69a79b0b0dad0fe83b0d8b51b61b14ae18347f7..36b693bd916a5046a1d0025cdfd70dbcb1454aad 100644 (file)
@@ -18,9 +18,7 @@
 
 #include <system_global.h>
 #include <type_support.h>
-#if !defined(ISP2401)
 #include "input_formatter.h"
-#endif
 
 #include "ia_css_binary.h"
 #include "ia_css_types.h"
@@ -149,13 +147,11 @@ sh_css_sp_get_debug_state(struct sh_css_sp_debug_state *state);
 
 #endif
 
-#if !defined(ISP2401)
 void
 sh_css_sp_set_if_configs(
     const input_formatter_cfg_t        *config_a,
     const input_formatter_cfg_t        *config_b,
     const uint8_t              if_config_index);
-#endif
 
 void
 sh_css_sp_program_input_circuit(int fmt_type,
index f5d963904201fa7e4aabd5c4d48ee394f2c2d58b..8e26663cecb63d32b696135d19bcfeebcb06dc1e 100644 (file)
@@ -488,12 +488,8 @@ static const struct vb2_ops isc_vb2_ops = {
 static int isc_querycap(struct file *file, void *priv,
                         struct v4l2_capability *cap)
 {
-       struct isc_device *isc = video_drvdata(file);
-
        strscpy(cap->driver, "microchip-isc", sizeof(cap->driver));
        strscpy(cap->card, "Atmel Image Sensor Controller", sizeof(cap->card));
-       snprintf(cap->bus_info, sizeof(cap->bus_info),
-                "platform:%s", isc->v4l2_dev.name);
 
        return 0;
 }
index 76ad802d694e1132b64351df6fb91cf91739a895..34f574b0b5216c55fa9d6693fe58b0bbff94e759 100644 (file)
@@ -2425,16 +2425,16 @@ int imgu_css_cfg_acc(struct imgu_css *css, unsigned int pipe,
                                        acc->awb_fr.stripes[1].grid_cfg.width,
                                        b_w_log2);
                acc->awb_fr.stripes[1].grid_cfg.x_end = end;
-
-               /*
-                * To reduce complexity of debubbling and loading
-                * statistics fix grid_height_per_slice to 1 for both
-                * stripes.
-                */
-               for (i = 0; i < stripes; i++)
-                       acc->awb_fr.stripes[i].grid_cfg.height_per_slice = 1;
        }
 
+       /*
+        * To reduce complexity of debubbling and loading
+        * statistics fix grid_height_per_slice to 1 for both
+        * stripes.
+        */
+       for (i = 0; i < stripes; i++)
+               acc->awb_fr.stripes[i].grid_cfg.height_per_slice = 1;
+
        if (imgu_css_awb_fr_ops_calc(css, pipe, &acc->awb_fr))
                return -EINVAL;
 
@@ -2597,15 +2597,15 @@ int imgu_css_cfg_acc(struct imgu_css *css, unsigned int pipe,
                        imgu_css_grid_end(acc->af.stripes[1].grid_cfg.x_start,
                                          acc->af.stripes[1].grid_cfg.width,
                                          b_w_log2);
-
-               /*
-                * To reduce complexity of debubbling and loading statistics
-                * fix grid_height_per_slice to 1 for both stripes
-                */
-               for (i = 0; i < stripes; i++)
-                       acc->af.stripes[i].grid_cfg.height_per_slice = 1;
        }
 
+       /*
+        * To reduce complexity of debubbling and loading statistics
+        * fix grid_height_per_slice to 1 for both stripes
+        */
+       for (i = 0; i < stripes; i++)
+               acc->af.stripes[i].grid_cfg.height_per_slice = 1;
+
        if (imgu_css_af_ops_calc(css, pipe, &acc->af))
                return -EINVAL;
 
@@ -2677,15 +2677,15 @@ int imgu_css_cfg_acc(struct imgu_css *css, unsigned int pipe,
                        imgu_css_grid_end(acc->awb.stripes[1].grid.x_start,
                                          acc->awb.stripes[1].grid.width,
                                          b_w_log2);
-
-               /*
-                * To reduce complexity of debubbling and loading statistics
-                * fix grid_height_per_slice to 1 for both stripes
-                */
-               for (i = 0; i < stripes; i++)
-                       acc->awb.stripes[i].grid.height_per_slice = 1;
        }
 
+       /*
+        * To reduce complexity of debubbling and loading statistics
+        * fix grid_height_per_slice to 1 for both stripes
+        */
+       for (i = 0; i < stripes; i++)
+               acc->awb.stripes[i].grid.height_per_slice = 1;
+
        if (imgu_css_awb_ops_calc(css, pipe, &acc->awb))
                return -EINVAL;
 
index 0c453b37f8c4ba5fdbac86915477fa32469edc6b..18ca22c3018a27d0af9e8da90490eab14693936d 100644 (file)
@@ -762,7 +762,6 @@ static int __maybe_unused imgu_suspend(struct device *dev)
        struct pci_dev *pci_dev = to_pci_dev(dev);
        struct imgu_device *imgu = pci_get_drvdata(pci_dev);
 
-       dev_dbg(dev, "enter %s\n", __func__);
        imgu->suspend_in_stream = imgu_css_is_streaming(&imgu->css);
        if (!imgu->suspend_in_stream)
                goto out;
@@ -783,7 +782,6 @@ static int __maybe_unused imgu_suspend(struct device *dev)
        imgu_powerdown(imgu);
        pm_runtime_force_suspend(dev);
 out:
-       dev_dbg(dev, "leave %s\n", __func__);
        return 0;
 }
 
@@ -793,8 +791,6 @@ static int __maybe_unused imgu_resume(struct device *dev)
        int r = 0;
        unsigned int pipe;
 
-       dev_dbg(dev, "enter %s\n", __func__);
-
        if (!imgu->suspend_in_stream)
                goto out;
 
@@ -821,8 +817,6 @@ static int __maybe_unused imgu_resume(struct device *dev)
        }
 
 out:
-       dev_dbg(dev, "leave %s\n", __func__);
-
        return r;
 }
 
index 04ce0e7eb5578487afcb5f4e406134852d2495d9..d2844414de4f71825aa0cbcf61cba4595c72c6f6 100644 (file)
@@ -1260,7 +1260,7 @@ static int csi2_init_entities(struct iss_csi2_device *csi2, const char *subname)
        struct media_pad *pads = csi2->pads;
        struct media_entity *me = &sd->entity;
        int ret;
-       char name[V4L2_SUBDEV_NAME_SIZE];
+       char name[32];
 
        v4l2_subdev_init(sd, &csi2_ops);
        sd->internal_ops = &csi2_internal_ops;
index 8e248d4a0aecfca9c409330fb2564a32a28edfc5..f52df683604525fd17b82087ef7f867f2d9155a2 100644 (file)
@@ -708,7 +708,7 @@ static struct platform_driver cedrus_driver = {
        .remove_new     = cedrus_remove,
        .driver         = {
                .name           = CEDRUS_NAME,
-               .of_match_table = of_match_ptr(cedrus_dt_match),
+               .of_match_table = cedrus_dt_match,
                .pm             = &cedrus_dev_pm_ops,
        },
 };
index b696bf884cbd69ebafe9490ae63a5a7834ea3d07..32af0e96e762b490dd9560a8ab4530e0991f593a 100644 (file)
@@ -172,12 +172,12 @@ int cedrus_hw_suspend(struct device *device)
 {
        struct cedrus_dev *dev = dev_get_drvdata(device);
 
-       reset_control_assert(dev->rstc);
-
        clk_disable_unprepare(dev->ram_clk);
        clk_disable_unprepare(dev->mod_clk);
        clk_disable_unprepare(dev->ahb_clk);
 
+       reset_control_assert(dev->rstc);
+
        return 0;
 }
 
@@ -186,11 +186,18 @@ int cedrus_hw_resume(struct device *device)
        struct cedrus_dev *dev = dev_get_drvdata(device);
        int ret;
 
+       ret = reset_control_reset(dev->rstc);
+       if (ret) {
+               dev_err(dev->dev, "Failed to apply reset\n");
+
+               return ret;
+       }
+
        ret = clk_prepare_enable(dev->ahb_clk);
        if (ret) {
                dev_err(dev->dev, "Failed to enable AHB clock\n");
 
-               return ret;
+               goto err_rst;
        }
 
        ret = clk_prepare_enable(dev->mod_clk);
@@ -207,21 +214,14 @@ int cedrus_hw_resume(struct device *device)
                goto err_mod_clk;
        }
 
-       ret = reset_control_reset(dev->rstc);
-       if (ret) {
-               dev_err(dev->dev, "Failed to apply reset\n");
-
-               goto err_ram_clk;
-       }
-
        return 0;
 
-err_ram_clk:
-       clk_disable_unprepare(dev->ram_clk);
 err_mod_clk:
        clk_disable_unprepare(dev->mod_clk);
 err_ahb_clk:
        clk_disable_unprepare(dev->ahb_clk);
+err_rst:
+       reset_control_assert(dev->rstc);
 
        return ret;
 }
index e79657920dc8ab338fe75c30ceaa6a32fc741057..9aa72863c213e3bcfdd440ccf211ae3639bfbad7 100644 (file)
@@ -607,10 +607,10 @@ static int tegra_csi_channel_init(struct tegra_csi_channel *chan)
        v4l2_subdev_init(subdev, &tegra_csi_ops);
        subdev->dev = csi->dev;
        if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
-               snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "%s-%d", "tpg",
+               snprintf(subdev->name, sizeof(subdev->name), "%s-%d", "tpg",
                         chan->csi_port_nums[0]);
        else
-               snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "%s",
+               snprintf(subdev->name, sizeof(subdev->name), "%s",
                         kbasename(chan->of_node->full_name));
 
        v4l2_set_subdevdata(subdev, chan);
index 191ecd19a6a7c78b20fbc3f35b35bb1cfda5654c..e95cc7bb190e1f76987584308a007bd59050134b 100644 (file)
@@ -163,7 +163,7 @@ static int tegra_vip_channel_init(struct tegra_vip *vip)
        subdev = &vip->chan.subdev;
        v4l2_subdev_init(subdev, &tegra_vip_ops);
        subdev->dev = vip->dev;
-       snprintf(subdev->name, V4L2_SUBDEV_NAME_SIZE, "%s",
+       snprintf(subdev->name, sizeof(subdev->name), "%s",
                 kbasename(vip->chan.of_node->full_name));
 
        v4l2_set_subdevdata(subdev, &vip->chan);
index 925ee1d61afb20e06cf01640b48d7576db51b083..8ca061d3bbb9215d04a4381a84f07b09147f2c8a 100644 (file)
@@ -170,13 +170,6 @@ static const struct serial8250_config uart_config[] = {
                .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
                .flags          = UART_CAP_FIFO,
        },
-       [PORT_AR7] = {
-               .name           = "AR7",
-               .fifo_size      = 16,
-               .tx_loadsz      = 16,
-               .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00,
-               .flags          = UART_CAP_FIFO /* | UART_CAP_AFE */,
-       },
        [PORT_U6_16550A] = {
                .name           = "U6_16550A",
                .fifo_size      = 64,
index ca56242972b3af646b6808b33d7f7c4947c45b6c..84547d998bcf3b1a0305e489ff9cbf5b65c276eb 100644 (file)
@@ -31,11 +31,9 @@ struct mlx5_vdpa_mr {
        struct list_head head;
        unsigned long num_directs;
        unsigned long num_klms;
-       /* state of dvq mr */
-       bool initialized;
 
-       /* serialize mkey creation and destruction */
-       struct mutex mkey_mtx;
+       struct vhost_iotlb *iotlb;
+
        bool user_mr;
 };
 
@@ -74,11 +72,12 @@ struct mlx5_vdpa_wq_ent {
 enum {
        MLX5_VDPA_DATAVQ_GROUP,
        MLX5_VDPA_CVQ_GROUP,
+       MLX5_VDPA_DATAVQ_DESC_GROUP,
        MLX5_VDPA_NUMVQ_GROUPS
 };
 
 enum {
-       MLX5_VDPA_NUM_AS = MLX5_VDPA_NUMVQ_GROUPS
+       MLX5_VDPA_NUM_AS = 2
 };
 
 struct mlx5_vdpa_dev {
@@ -93,7 +92,9 @@ struct mlx5_vdpa_dev {
        u16 max_idx;
        u32 generation;
 
-       struct mlx5_vdpa_mr mr;
+       struct mlx5_vdpa_mr *mr[MLX5_VDPA_NUM_AS];
+       /* serialize mr access */
+       struct mutex mr_mtx;
        struct mlx5_control_vq cvq;
        struct workqueue_struct *wq;
        unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS];
@@ -114,12 +115,19 @@ void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev);
 int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in,
                          int inlen);
 int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey);
-int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
-                            bool *change_map, unsigned int asid);
-int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
-                       unsigned int asid);
-void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev);
-void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid);
+struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
+                                        struct vhost_iotlb *iotlb);
+void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev);
+void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev,
+                         struct mlx5_vdpa_mr *mr);
+void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
+                        struct mlx5_vdpa_mr *mr,
+                        unsigned int asid);
+int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev,
+                               struct vhost_iotlb *iotlb,
+                               unsigned int asid);
+int mlx5_vdpa_create_dma_mr(struct mlx5_vdpa_dev *mvdev);
+int mlx5_vdpa_reset_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid);
 
 #define mlx5_vdpa_warn(__dev, format, ...)                                                         \
        dev_warn((__dev)->mdev->device, "%s:%d:(pid %d) warning: " format, __func__, __LINE__,     \
index 5a1971fcd87b109d33be10ae8bdb678428e5d2d8..2197c46e563a1f13414588260e3fda3195f1052c 100644 (file)
@@ -301,10 +301,13 @@ static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct
        sg_free_table(&mr->sg_head);
 }
 
-static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8 perm,
+static int add_direct_chain(struct mlx5_vdpa_dev *mvdev,
+                           struct mlx5_vdpa_mr *mr,
+                           u64 start,
+                           u64 size,
+                           u8 perm,
                            struct vhost_iotlb *iotlb)
 {
-       struct mlx5_vdpa_mr *mr = &mvdev->mr;
        struct mlx5_vdpa_direct_mr *dmr;
        struct mlx5_vdpa_direct_mr *n;
        LIST_HEAD(tmp);
@@ -354,9 +357,10 @@ err_alloc:
  * indirect memory key that provides access to the enitre address space given
  * by iotlb.
  */
-static int create_user_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+static int create_user_mr(struct mlx5_vdpa_dev *mvdev,
+                         struct mlx5_vdpa_mr *mr,
+                         struct vhost_iotlb *iotlb)
 {
-       struct mlx5_vdpa_mr *mr = &mvdev->mr;
        struct mlx5_vdpa_direct_mr *dmr;
        struct mlx5_vdpa_direct_mr *n;
        struct vhost_iotlb_map *map;
@@ -384,7 +388,7 @@ static int create_user_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb
                                                                       LOG_MAX_KLM_SIZE);
                                        mr->num_klms += nnuls;
                                }
-                               err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb);
+                               err = add_direct_chain(mvdev, mr, ps, pe - ps, pperm, iotlb);
                                if (err)
                                        goto err_chain;
                        }
@@ -393,7 +397,7 @@ static int create_user_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb
                        pperm = map->perm;
                }
        }
-       err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb);
+       err = add_direct_chain(mvdev, mr, ps, pe - ps, pperm, iotlb);
        if (err)
                goto err_chain;
 
@@ -450,20 +454,23 @@ static void destroy_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
        mlx5_vdpa_destroy_mkey(mvdev, mr->mkey);
 }
 
-static int dup_iotlb(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *src)
+static int dup_iotlb(struct vhost_iotlb *dst, struct vhost_iotlb *src)
 {
        struct vhost_iotlb_map *map;
        u64 start = 0, last = ULLONG_MAX;
        int err;
 
+       if (dst == src)
+               return -EINVAL;
+
        if (!src) {
-               err = vhost_iotlb_add_range(mvdev->cvq.iotlb, start, last, start, VHOST_ACCESS_RW);
+               err = vhost_iotlb_add_range(dst, start, last, start, VHOST_ACCESS_RW);
                return err;
        }
 
        for (map = vhost_iotlb_itree_first(src, start, last); map;
                map = vhost_iotlb_itree_next(map, start, last)) {
-               err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start, map->last,
+               err = vhost_iotlb_add_range(dst, map->start, map->last,
                                            map->addr, map->perm);
                if (err)
                        return err;
@@ -471,9 +478,9 @@ static int dup_iotlb(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *src)
        return 0;
 }
 
-static void prune_iotlb(struct mlx5_vdpa_dev *mvdev)
+static void prune_iotlb(struct vhost_iotlb *iotlb)
 {
-       vhost_iotlb_del_range(mvdev->cvq.iotlb, 0, ULLONG_MAX);
+       vhost_iotlb_del_range(iotlb, 0, ULLONG_MAX);
 }
 
 static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
@@ -489,133 +496,169 @@ static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr
        }
 }
 
-static void _mlx5_vdpa_destroy_cvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
+static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
 {
-       if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
-               return;
+       if (mr->user_mr)
+               destroy_user_mr(mvdev, mr);
+       else
+               destroy_dma_mr(mvdev, mr);
 
-       prune_iotlb(mvdev);
+       vhost_iotlb_free(mr->iotlb);
 }
 
-static void _mlx5_vdpa_destroy_dvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
+void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev,
+                         struct mlx5_vdpa_mr *mr)
 {
-       struct mlx5_vdpa_mr *mr = &mvdev->mr;
-
-       if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
+       if (!mr)
                return;
 
-       if (!mr->initialized)
-               return;
+       mutex_lock(&mvdev->mr_mtx);
 
-       if (mr->user_mr)
-               destroy_user_mr(mvdev, mr);
-       else
-               destroy_dma_mr(mvdev, mr);
+       _mlx5_vdpa_destroy_mr(mvdev, mr);
+
+       for (int i = 0; i < MLX5_VDPA_NUM_AS; i++) {
+               if (mvdev->mr[i] == mr)
+                       mvdev->mr[i] = NULL;
+       }
 
-       mr->initialized = false;
+       mutex_unlock(&mvdev->mr_mtx);
+
+       kfree(mr);
 }
 
-void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
+void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev *mvdev,
+                        struct mlx5_vdpa_mr *new_mr,
+                        unsigned int asid)
 {
-       struct mlx5_vdpa_mr *mr = &mvdev->mr;
+       struct mlx5_vdpa_mr *old_mr = mvdev->mr[asid];
 
-       mutex_lock(&mr->mkey_mtx);
+       mutex_lock(&mvdev->mr_mtx);
 
-       _mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
-       _mlx5_vdpa_destroy_cvq_mr(mvdev, asid);
+       mvdev->mr[asid] = new_mr;
+       if (old_mr) {
+               _mlx5_vdpa_destroy_mr(mvdev, old_mr);
+               kfree(old_mr);
+       }
 
-       mutex_unlock(&mr->mkey_mtx);
-}
+       mutex_unlock(&mvdev->mr_mtx);
 
-void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
-{
-       mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_CVQ_GROUP]);
-       mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]);
 }
 
-static int _mlx5_vdpa_create_cvq_mr(struct mlx5_vdpa_dev *mvdev,
-                                   struct vhost_iotlb *iotlb,
-                                   unsigned int asid)
+void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev *mvdev)
 {
-       if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
-               return 0;
+       for (int i = 0; i < MLX5_VDPA_NUM_AS; i++)
+               mlx5_vdpa_destroy_mr(mvdev, mvdev->mr[i]);
 
-       return dup_iotlb(mvdev, iotlb);
+       prune_iotlb(mvdev->cvq.iotlb);
 }
 
-static int _mlx5_vdpa_create_dvq_mr(struct mlx5_vdpa_dev *mvdev,
-                                   struct vhost_iotlb *iotlb,
-                                   unsigned int asid)
+static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
+                               struct mlx5_vdpa_mr *mr,
+                               struct vhost_iotlb *iotlb)
 {
-       struct mlx5_vdpa_mr *mr = &mvdev->mr;
        int err;
 
-       if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
-               return 0;
-
-       if (mr->initialized)
-               return 0;
-
        if (iotlb)
-               err = create_user_mr(mvdev, iotlb);
+               err = create_user_mr(mvdev, mr, iotlb);
        else
                err = create_dma_mr(mvdev, mr);
 
        if (err)
                return err;
 
-       mr->initialized = true;
+       mr->iotlb = vhost_iotlb_alloc(0, 0);
+       if (!mr->iotlb) {
+               err = -ENOMEM;
+               goto err_mr;
+       }
+
+       err = dup_iotlb(mr->iotlb, iotlb);
+       if (err)
+               goto err_iotlb;
 
        return 0;
+
+err_iotlb:
+       vhost_iotlb_free(mr->iotlb);
+
+err_mr:
+       if (iotlb)
+               destroy_user_mr(mvdev, mr);
+       else
+               destroy_dma_mr(mvdev, mr);
+
+       return err;
 }
 
-static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
-                               struct vhost_iotlb *iotlb, unsigned int asid)
+struct mlx5_vdpa_mr *mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
+                                        struct vhost_iotlb *iotlb)
 {
+       struct mlx5_vdpa_mr *mr;
        int err;
 
-       err = _mlx5_vdpa_create_dvq_mr(mvdev, iotlb, asid);
-       if (err)
-               return err;
+       mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+       if (!mr)
+               return ERR_PTR(-ENOMEM);
+
+       mutex_lock(&mvdev->mr_mtx);
+       err = _mlx5_vdpa_create_mr(mvdev, mr, iotlb);
+       mutex_unlock(&mvdev->mr_mtx);
 
-       err = _mlx5_vdpa_create_cvq_mr(mvdev, iotlb, asid);
        if (err)
                goto out_err;
 
-       return 0;
+       return mr;
 
 out_err:
-       _mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
-
-       return err;
+       kfree(mr);
+       return ERR_PTR(err);
 }
 
-int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
-                       unsigned int asid)
+int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev *mvdev,
+                               struct vhost_iotlb *iotlb,
+                               unsigned int asid)
 {
        int err;
 
-       mutex_lock(&mvdev->mr.mkey_mtx);
-       err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid);
-       mutex_unlock(&mvdev->mr.mkey_mtx);
+       if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
+               return 0;
+
+       spin_lock(&mvdev->cvq.iommu_lock);
+
+       prune_iotlb(mvdev->cvq.iotlb);
+       err = dup_iotlb(mvdev->cvq.iotlb, iotlb);
+
+       spin_unlock(&mvdev->cvq.iommu_lock);
+
        return err;
 }
 
-int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
-                            bool *change_map, unsigned int asid)
+int mlx5_vdpa_create_dma_mr(struct mlx5_vdpa_dev *mvdev)
 {
-       struct mlx5_vdpa_mr *mr = &mvdev->mr;
-       int err = 0;
+       struct mlx5_vdpa_mr *mr;
+
+       mr = mlx5_vdpa_create_mr(mvdev, NULL);
+       if (IS_ERR(mr))
+               return PTR_ERR(mr);
 
-       *change_map = false;
-       mutex_lock(&mr->mkey_mtx);
-       if (mr->initialized) {
-               mlx5_vdpa_info(mvdev, "memory map update\n");
-               *change_map = true;
+       mlx5_vdpa_update_mr(mvdev, mr, 0);
+
+       return mlx5_vdpa_update_cvq_iotlb(mvdev, NULL, 0);
+}
+
+int mlx5_vdpa_reset_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
+{
+       if (asid >= MLX5_VDPA_NUM_AS)
+               return -EINVAL;
+
+       mlx5_vdpa_destroy_mr(mvdev, mvdev->mr[asid]);
+
+       if (asid == 0 && MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
+               if (mlx5_vdpa_create_dma_mr(mvdev))
+                       mlx5_vdpa_warn(mvdev, "create DMA MR failed\n");
+       } else {
+               mlx5_vdpa_update_cvq_iotlb(mvdev, NULL, asid);
        }
-       if (!*change_map)
-               err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid);
-       mutex_unlock(&mr->mkey_mtx);
 
-       return err;
+       return 0;
 }
index d5a59c9035fbed9af6d762467e8e517d4c8e3ecc..5c5a41b64bfcd67d707669d02468606fac382706 100644 (file)
@@ -256,7 +256,7 @@ int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev)
                mlx5_vdpa_warn(mvdev, "resources already allocated\n");
                return -EINVAL;
        }
-       mutex_init(&mvdev->mr.mkey_mtx);
+       mutex_init(&mvdev->mr_mtx);
        res->uar = mlx5_get_uars_page(mdev);
        if (IS_ERR(res->uar)) {
                err = PTR_ERR(res->uar);
@@ -301,7 +301,7 @@ err_pd:
 err_uctx:
        mlx5_put_uars_page(mdev, res->uar);
 err_uars:
-       mutex_destroy(&mvdev->mr.mkey_mtx);
+       mutex_destroy(&mvdev->mr_mtx);
        return err;
 }
 
@@ -318,6 +318,6 @@ void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev)
        dealloc_pd(mvdev, res->pdn, res->uid);
        destroy_uctx(mvdev, res->uid);
        mlx5_put_uars_page(mvdev->mdev, res->uar);
-       mutex_destroy(&mvdev->mr.mkey_mtx);
+       mutex_destroy(&mvdev->mr_mtx);
        res->valid = false;
 }
index 946488b8989f4b379b811ad05e401527fa9ea9b3..12ac3397f39b819aa766e6da8a90e906b4350988 100644 (file)
@@ -7,6 +7,7 @@
 #include <uapi/linux/virtio_net.h>
 #include <uapi/linux/virtio_ids.h>
 #include <uapi/linux/vdpa.h>
+#include <uapi/linux/vhost_types.h>
 #include <linux/virtio_config.h>
 #include <linux/auxiliary_bus.h>
 #include <linux/mlx5/cq.h>
@@ -861,6 +862,9 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
 {
        int inlen = MLX5_ST_SZ_BYTES(create_virtio_net_q_in);
        u32 out[MLX5_ST_SZ_DW(create_virtio_net_q_out)] = {};
+       struct mlx5_vdpa_dev *mvdev = &ndev->mvdev;
+       struct mlx5_vdpa_mr *vq_mr;
+       struct mlx5_vdpa_mr *vq_desc_mr;
        void *obj_context;
        u16 mlx_features;
        void *cmd_hdr;
@@ -913,7 +917,14 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
        MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
        MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
        MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
-       MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey);
+       vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]];
+       if (vq_mr)
+               MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey);
+
+       vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]];
+       if (vq_desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported))
+               MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, vq_desc_mr->mkey);
+
        MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id);
        MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size);
        MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id);
@@ -2301,6 +2312,16 @@ static u32 mlx5_vdpa_get_vq_group(struct vdpa_device *vdev, u16 idx)
        return MLX5_VDPA_DATAVQ_GROUP;
 }
 
+static u32 mlx5_vdpa_get_vq_desc_group(struct vdpa_device *vdev, u16 idx)
+{
+       struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+
+       if (is_ctrl_vq_idx(mvdev, idx))
+               return MLX5_VDPA_CVQ_GROUP;
+
+       return MLX5_VDPA_DATAVQ_DESC_GROUP;
+}
+
 static u64 mlx_to_vritio_features(u16 dev_features)
 {
        u64 result = 0;
@@ -2539,6 +2560,11 @@ static void unregister_link_notifier(struct mlx5_vdpa_net *ndev)
                flush_workqueue(ndev->mvdev.wq);
 }
 
+static u64 mlx5_vdpa_get_backend_features(const struct vdpa_device *vdpa)
+{
+       return BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK);
+}
+
 static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
 {
        struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
@@ -2673,7 +2699,8 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev)
 }
 
 static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
-                               struct vhost_iotlb *iotlb, unsigned int asid)
+                               struct mlx5_vdpa_mr *new_mr,
+                               unsigned int asid)
 {
        struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
        int err;
@@ -2681,28 +2708,21 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
        suspend_vqs(ndev);
        err = save_channels_info(ndev);
        if (err)
-               goto err_mr;
+               return err;
 
        teardown_driver(ndev);
-       mlx5_vdpa_destroy_mr_asid(mvdev, asid);
-       err = mlx5_vdpa_create_mr(mvdev, iotlb, asid);
-       if (err)
-               goto err_mr;
+
+       mlx5_vdpa_update_mr(mvdev, new_mr, asid);
 
        if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || mvdev->suspended)
-               goto err_mr;
+               return 0;
 
        restore_channels_info(ndev);
        err = setup_driver(mvdev);
        if (err)
-               goto err_setup;
+               return err;
 
        return 0;
-
-err_setup:
-       mlx5_vdpa_destroy_mr_asid(mvdev, asid);
-err_mr:
-       return err;
 }
 
 /* reslock must be held for this function */
@@ -2841,7 +2861,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status)
 err_driver:
        unregister_link_notifier(ndev);
 err_setup:
-       mlx5_vdpa_destroy_mr(&ndev->mvdev);
+       mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
        ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED;
 err_clear:
        up_write(&ndev->reslock);
@@ -2856,7 +2876,7 @@ static void init_group_to_asid_map(struct mlx5_vdpa_dev *mvdev)
                mvdev->group2asid[i] = 0;
 }
 
-static int mlx5_vdpa_reset(struct vdpa_device *vdev)
+static int mlx5_vdpa_compat_reset(struct vdpa_device *vdev, u32 flags)
 {
        struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
        struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
@@ -2868,7 +2888,8 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
        unregister_link_notifier(ndev);
        teardown_driver(ndev);
        clear_vqs_ready(ndev);
-       mlx5_vdpa_destroy_mr(&ndev->mvdev);
+       if (flags & VDPA_RESET_F_CLEAN_MAP)
+               mlx5_vdpa_destroy_mr_resources(&ndev->mvdev);
        ndev->mvdev.status = 0;
        ndev->mvdev.suspended = false;
        ndev->cur_num_vqs = 0;
@@ -2879,8 +2900,9 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
        init_group_to_asid_map(mvdev);
        ++mvdev->generation;
 
-       if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
-               if (mlx5_vdpa_create_mr(mvdev, NULL, 0))
+       if ((flags & VDPA_RESET_F_CLEAN_MAP) &&
+           MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
+               if (mlx5_vdpa_create_dma_mr(mvdev))
                        mlx5_vdpa_warn(mvdev, "create MR failed\n");
        }
        up_write(&ndev->reslock);
@@ -2888,6 +2910,11 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
        return 0;
 }
 
+static int mlx5_vdpa_reset(struct vdpa_device *vdev)
+{
+       return mlx5_vdpa_compat_reset(vdev, 0);
+}
+
 static size_t mlx5_vdpa_get_config_size(struct vdpa_device *vdev)
 {
        return sizeof(struct virtio_net_config);
@@ -2919,18 +2946,38 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
 static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
                        unsigned int asid)
 {
-       bool change_map;
+       struct mlx5_vdpa_mr *new_mr;
        int err;
 
-       err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map, asid);
-       if (err) {
-               mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
-               return err;
+       if (asid >= MLX5_VDPA_NUM_AS)
+               return -EINVAL;
+
+       if (vhost_iotlb_itree_first(iotlb, 0, U64_MAX)) {
+               new_mr = mlx5_vdpa_create_mr(mvdev, iotlb);
+               if (IS_ERR(new_mr)) {
+                       err = PTR_ERR(new_mr);
+                       mlx5_vdpa_warn(mvdev, "create map failed(%d)\n", err);
+                       return err;
+               }
+       } else {
+               /* Empty iotlbs don't have an mr but will clear the previous mr. */
+               new_mr = NULL;
+       }
+
+       if (!mvdev->mr[asid]) {
+               mlx5_vdpa_update_mr(mvdev, new_mr, asid);
+       } else {
+               err = mlx5_vdpa_change_map(mvdev, new_mr, asid);
+               if (err) {
+                       mlx5_vdpa_warn(mvdev, "change map failed(%d)\n", err);
+                       goto out_err;
+               }
        }
 
-       if (change_map)
-               err = mlx5_vdpa_change_map(mvdev, iotlb, asid);
+       return mlx5_vdpa_update_cvq_iotlb(mvdev, iotlb, asid);
 
+out_err:
+       mlx5_vdpa_destroy_mr(mvdev, new_mr);
        return err;
 }
 
@@ -2947,6 +2994,18 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
        return err;
 }
 
+static int mlx5_vdpa_reset_map(struct vdpa_device *vdev, unsigned int asid)
+{
+       struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+       int err;
+
+       down_write(&ndev->reslock);
+       err = mlx5_vdpa_reset_mr(mvdev, asid);
+       up_write(&ndev->reslock);
+       return err;
+}
+
 static struct device *mlx5_get_vq_dma_dev(struct vdpa_device *vdev, u16 idx)
 {
        struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
@@ -2985,7 +3044,7 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
        ndev = to_mlx5_vdpa_ndev(mvdev);
 
        free_resources(ndev);
-       mlx5_vdpa_destroy_mr(mvdev);
+       mlx5_vdpa_destroy_mr_resources(mvdev);
        if (!is_zero_ether_addr(ndev->config.mac)) {
                pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev));
                mlx5_mpfs_del_mac(pfmdev, ndev->config.mac);
@@ -3169,12 +3228,19 @@ static int mlx5_set_group_asid(struct vdpa_device *vdev, u32 group,
                               unsigned int asid)
 {
        struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+       int err = 0;
 
        if (group >= MLX5_VDPA_NUMVQ_GROUPS)
                return -EINVAL;
 
        mvdev->group2asid[group] = asid;
-       return 0;
+
+       mutex_lock(&mvdev->mr_mtx);
+       if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mr[asid])
+               err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mr[asid]->iotlb, asid);
+       mutex_unlock(&mvdev->mr_mtx);
+
+       return err;
 }
 
 static const struct vdpa_config_ops mlx5_vdpa_ops = {
@@ -3191,7 +3257,9 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
        .get_vq_irq = mlx5_get_vq_irq,
        .get_vq_align = mlx5_vdpa_get_vq_align,
        .get_vq_group = mlx5_vdpa_get_vq_group,
+       .get_vq_desc_group = mlx5_vdpa_get_vq_desc_group, /* Op disabled if not supported. */
        .get_device_features = mlx5_vdpa_get_device_features,
+       .get_backend_features = mlx5_vdpa_get_backend_features,
        .set_driver_features = mlx5_vdpa_set_driver_features,
        .get_driver_features = mlx5_vdpa_get_driver_features,
        .set_config_cb = mlx5_vdpa_set_config_cb,
@@ -3201,11 +3269,13 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
        .get_status = mlx5_vdpa_get_status,
        .set_status = mlx5_vdpa_set_status,
        .reset = mlx5_vdpa_reset,
+       .compat_reset = mlx5_vdpa_compat_reset,
        .get_config_size = mlx5_vdpa_get_config_size,
        .get_config = mlx5_vdpa_get_config,
        .set_config = mlx5_vdpa_set_config,
        .get_generation = mlx5_vdpa_get_generation,
        .set_map = mlx5_vdpa_set_map,
+       .reset_map = mlx5_vdpa_reset_map,
        .set_group_asid = mlx5_set_group_asid,
        .get_vq_dma_dev = mlx5_get_vq_dma_dev,
        .free = mlx5_vdpa_free,
@@ -3289,6 +3359,7 @@ struct mlx5_vdpa_mgmtdev {
        struct vdpa_mgmt_dev mgtdev;
        struct mlx5_adev *madev;
        struct mlx5_vdpa_net *ndev;
+       struct vdpa_config_ops vdpa_ops;
 };
 
 static int config_func_mtu(struct mlx5_core_dev *mdev, u16 mtu)
@@ -3402,7 +3473,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
                max_vqs = 2;
        }
 
-       ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
+       ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mgtdev->vdpa_ops,
                                 MLX5_VDPA_NUMVQ_GROUPS, MLX5_VDPA_NUM_AS, name, false);
        if (IS_ERR(ndev))
                return PTR_ERR(ndev);
@@ -3485,7 +3556,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
                goto err_mpfs;
 
        if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
-               err = mlx5_vdpa_create_mr(mvdev, NULL, 0);
+               err = mlx5_vdpa_create_dma_mr(mvdev);
                if (err)
                        goto err_res;
        }
@@ -3515,7 +3586,7 @@ err_reg:
 err_res2:
        free_resources(ndev);
 err_mr:
-       mlx5_vdpa_destroy_mr(mvdev);
+       mlx5_vdpa_destroy_mr_resources(mvdev);
 err_res:
        mlx5_vdpa_free_resources(&ndev->mvdev);
 err_mpfs:
@@ -3575,6 +3646,10 @@ static int mlx5v_probe(struct auxiliary_device *adev,
                MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues) + 1;
        mgtdev->mgtdev.supported_features = get_supported_features(mdev);
        mgtdev->madev = madev;
+       mgtdev->vdpa_ops = mlx5_vdpa_ops;
+
+       if (!MLX5_CAP_DEV_VDPA_EMULATION(mdev, desc_group_mkey_supported))
+               mgtdev->vdpa_ops.get_vq_desc_group = NULL;
 
        err = vdpa_mgmtdev_register(&mgtdev->mgtdev);
        if (err)
index 76d41058add9a8a5f45edb405ed822efba6de67f..be2925d0d28368e8959a9696f2d1c92ba4eb7fd8 100644 (file)
@@ -139,7 +139,7 @@ static void vdpasim_vq_reset(struct vdpasim *vdpasim,
        vq->vring.notify = NULL;
 }
 
-static void vdpasim_do_reset(struct vdpasim *vdpasim)
+static void vdpasim_do_reset(struct vdpasim *vdpasim, u32 flags)
 {
        int i;
 
@@ -151,11 +151,13 @@ static void vdpasim_do_reset(struct vdpasim *vdpasim)
                                 &vdpasim->iommu_lock);
        }
 
-       for (i = 0; i < vdpasim->dev_attr.nas; i++) {
-               vhost_iotlb_reset(&vdpasim->iommu[i]);
-               vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX,
-                                     0, VHOST_MAP_RW);
-               vdpasim->iommu_pt[i] = true;
+       if (flags & VDPA_RESET_F_CLEAN_MAP) {
+               for (i = 0; i < vdpasim->dev_attr.nas; i++) {
+                       vhost_iotlb_reset(&vdpasim->iommu[i]);
+                       vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX,
+                                             0, VHOST_MAP_RW);
+                       vdpasim->iommu_pt[i] = true;
+               }
        }
 
        vdpasim->running = true;
@@ -259,8 +261,12 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
        if (!vdpasim->iommu_pt)
                goto err_iommu;
 
-       for (i = 0; i < vdpasim->dev_attr.nas; i++)
+       for (i = 0; i < vdpasim->dev_attr.nas; i++) {
                vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0);
+               vhost_iotlb_add_range(&vdpasim->iommu[i], 0, ULONG_MAX, 0,
+                                     VHOST_MAP_RW);
+               vdpasim->iommu_pt[i] = true;
+       }
 
        for (i = 0; i < dev_attr->nvqs; i++)
                vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
@@ -480,18 +486,23 @@ static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
        mutex_unlock(&vdpasim->mutex);
 }
 
-static int vdpasim_reset(struct vdpa_device *vdpa)
+static int vdpasim_compat_reset(struct vdpa_device *vdpa, u32 flags)
 {
        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 
        mutex_lock(&vdpasim->mutex);
        vdpasim->status = 0;
-       vdpasim_do_reset(vdpasim);
+       vdpasim_do_reset(vdpasim, flags);
        mutex_unlock(&vdpasim->mutex);
 
        return 0;
 }
 
+static int vdpasim_reset(struct vdpa_device *vdpa)
+{
+       return vdpasim_compat_reset(vdpa, 0);
+}
+
 static int vdpasim_suspend(struct vdpa_device *vdpa)
 {
        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
@@ -637,6 +648,25 @@ err:
        return ret;
 }
 
+static int vdpasim_reset_map(struct vdpa_device *vdpa, unsigned int asid)
+{
+       struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+       if (asid >= vdpasim->dev_attr.nas)
+               return -EINVAL;
+
+       spin_lock(&vdpasim->iommu_lock);
+       if (vdpasim->iommu_pt[asid])
+               goto out;
+       vhost_iotlb_reset(&vdpasim->iommu[asid]);
+       vhost_iotlb_add_range(&vdpasim->iommu[asid], 0, ULONG_MAX,
+                             0, VHOST_MAP_RW);
+       vdpasim->iommu_pt[asid] = true;
+out:
+       spin_unlock(&vdpasim->iommu_lock);
+       return 0;
+}
+
 static int vdpasim_bind_mm(struct vdpa_device *vdpa, struct mm_struct *mm)
 {
        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
@@ -749,6 +779,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
        .get_status             = vdpasim_get_status,
        .set_status             = vdpasim_set_status,
        .reset                  = vdpasim_reset,
+       .compat_reset           = vdpasim_compat_reset,
        .suspend                = vdpasim_suspend,
        .resume                 = vdpasim_resume,
        .get_config_size        = vdpasim_get_config_size,
@@ -759,6 +790,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
        .set_group_asid         = vdpasim_set_group_asid,
        .dma_map                = vdpasim_dma_map,
        .dma_unmap              = vdpasim_dma_unmap,
+       .reset_map              = vdpasim_reset_map,
        .bind_mm                = vdpasim_bind_mm,
        .unbind_mm              = vdpasim_unbind_mm,
        .free                   = vdpasim_free,
@@ -787,6 +819,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
        .get_status             = vdpasim_get_status,
        .set_status             = vdpasim_set_status,
        .reset                  = vdpasim_reset,
+       .compat_reset           = vdpasim_compat_reset,
        .suspend                = vdpasim_suspend,
        .resume                 = vdpasim_resume,
        .get_config_size        = vdpasim_get_config_size,
@@ -796,6 +829,7 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
        .get_iova_range         = vdpasim_get_iova_range,
        .set_group_asid         = vdpasim_set_group_asid,
        .set_map                = vdpasim_set_map,
+       .reset_map              = vdpasim_reset_map,
        .bind_mm                = vdpasim_bind_mm,
        .unbind_mm              = vdpasim_unbind_mm,
        .free                   = vdpasim_free,
index df7869537ef146fdc62b6d29180e5e051c7c8d2c..0ddd4b8abecb30ed4a30bc1aaf5345e9b175208f 100644 (file)
@@ -134,7 +134,6 @@ static DEFINE_MUTEX(vduse_lock);
 static DEFINE_IDR(vduse_idr);
 
 static dev_t vduse_major;
-static struct class *vduse_class;
 static struct cdev vduse_ctrl_cdev;
 static struct cdev vduse_cdev;
 static struct workqueue_struct *vduse_irq_wq;
@@ -1528,6 +1527,16 @@ static const struct kobj_type vq_type = {
        .default_groups = vq_groups,
 };
 
+static char *vduse_devnode(const struct device *dev, umode_t *mode)
+{
+       return kasprintf(GFP_KERNEL, "vduse/%s", dev_name(dev));
+}
+
+static const struct class vduse_class = {
+       .name = "vduse",
+       .devnode = vduse_devnode,
+};
+
 static void vduse_dev_deinit_vqs(struct vduse_dev *dev)
 {
        int i;
@@ -1638,7 +1647,7 @@ static int vduse_destroy_dev(char *name)
        mutex_unlock(&dev->lock);
 
        vduse_dev_reset(dev);
-       device_destroy(vduse_class, MKDEV(MAJOR(vduse_major), dev->minor));
+       device_destroy(&vduse_class, MKDEV(MAJOR(vduse_major), dev->minor));
        idr_remove(&vduse_idr, dev->minor);
        kvfree(dev->config);
        vduse_dev_deinit_vqs(dev);
@@ -1805,7 +1814,7 @@ static int vduse_create_dev(struct vduse_dev_config *config,
 
        dev->minor = ret;
        dev->msg_timeout = VDUSE_MSG_DEFAULT_TIMEOUT;
-       dev->dev = device_create_with_groups(vduse_class, NULL,
+       dev->dev = device_create_with_groups(&vduse_class, NULL,
                                MKDEV(MAJOR(vduse_major), dev->minor),
                                dev, vduse_dev_groups, "%s", config->name);
        if (IS_ERR(dev->dev)) {
@@ -1821,7 +1830,7 @@ static int vduse_create_dev(struct vduse_dev_config *config,
 
        return 0;
 err_vqs:
-       device_destroy(vduse_class, MKDEV(MAJOR(vduse_major), dev->minor));
+       device_destroy(&vduse_class, MKDEV(MAJOR(vduse_major), dev->minor));
 err_dev:
        idr_remove(&vduse_idr, dev->minor);
 err_idr:
@@ -1934,11 +1943,6 @@ static const struct file_operations vduse_ctrl_fops = {
        .llseek         = noop_llseek,
 };
 
-static char *vduse_devnode(const struct device *dev, umode_t *mode)
-{
-       return kasprintf(GFP_KERNEL, "vduse/%s", dev_name(dev));
-}
-
 struct vduse_mgmt_dev {
        struct vdpa_mgmt_dev mgmt_dev;
        struct device dev;
@@ -2082,11 +2086,9 @@ static int vduse_init(void)
        int ret;
        struct device *dev;
 
-       vduse_class = class_create("vduse");
-       if (IS_ERR(vduse_class))
-               return PTR_ERR(vduse_class);
-
-       vduse_class->devnode = vduse_devnode;
+       ret = class_register(&vduse_class);
+       if (ret)
+               return ret;
 
        ret = alloc_chrdev_region(&vduse_major, 0, VDUSE_DEV_MAX, "vduse");
        if (ret)
@@ -2099,7 +2101,7 @@ static int vduse_init(void)
        if (ret)
                goto err_ctrl_cdev;
 
-       dev = device_create(vduse_class, NULL, vduse_major, NULL, "control");
+       dev = device_create(&vduse_class, NULL, vduse_major, NULL, "control");
        if (IS_ERR(dev)) {
                ret = PTR_ERR(dev);
                goto err_device;
@@ -2141,13 +2143,13 @@ err_bound_wq:
 err_wq:
        cdev_del(&vduse_cdev);
 err_cdev:
-       device_destroy(vduse_class, vduse_major);
+       device_destroy(&vduse_class, vduse_major);
 err_device:
        cdev_del(&vduse_ctrl_cdev);
 err_ctrl_cdev:
        unregister_chrdev_region(vduse_major, VDUSE_DEV_MAX);
 err_chardev_region:
-       class_destroy(vduse_class);
+       class_unregister(&vduse_class);
        return ret;
 }
 module_init(vduse_init);
@@ -2159,10 +2161,10 @@ static void vduse_exit(void)
        destroy_workqueue(vduse_irq_bound_wq);
        destroy_workqueue(vduse_irq_wq);
        cdev_del(&vduse_cdev);
-       device_destroy(vduse_class, vduse_major);
+       device_destroy(&vduse_class, vduse_major);
        cdev_del(&vduse_ctrl_cdev);
        unregister_chrdev_region(vduse_major, VDUSE_DEV_MAX);
-       class_destroy(vduse_class);
+       class_unregister(&vduse_class);
 }
 module_exit(vduse_exit);
 
index 4e3b2c25c7213e16083c2b43cd5e947686d292ea..282aac45c690995cbe88151ea12cc565bf84d61d 100644 (file)
@@ -1158,7 +1158,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                        /*
                         * Set prot_iter to data_iter and truncate it to
                         * prot_bytes, and advance data_iter past any
-                        * preceeding prot_bytes that may be present.
+                        * preceding prot_bytes that may be present.
                         *
                         * Also fix up the exp_data_len to reflect only the
                         * actual data payload length.
index 78379ffd23363d99cd4c95daf048d1955fc185f3..30df5c58db73a846dc3c58ad821201694869a348 100644 (file)
@@ -131,6 +131,15 @@ static struct vhost_vdpa_as *vhost_vdpa_find_alloc_as(struct vhost_vdpa *v,
        return vhost_vdpa_alloc_as(v, asid);
 }
 
+static void vhost_vdpa_reset_map(struct vhost_vdpa *v, u32 asid)
+{
+       struct vdpa_device *vdpa = v->vdpa;
+       const struct vdpa_config_ops *ops = vdpa->config;
+
+       if (ops->reset_map)
+               ops->reset_map(vdpa, asid);
+}
+
 static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
 {
        struct vhost_vdpa_as *as = asid_to_as(v, asid);
@@ -140,6 +149,14 @@ static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
 
        hlist_del(&as->hash_link);
        vhost_vdpa_iotlb_unmap(v, &as->iotlb, 0ULL, 0ULL - 1, asid);
+       /*
+        * Devices with vendor specific IOMMU may need to restore
+        * iotlb to the initial or default state, which cannot be
+        * cleaned up in the all range unmap call above. Give them
+        * a chance to clean up or reset the map to the desired
+        * state.
+        */
+       vhost_vdpa_reset_map(v, asid);
        kfree(as);
 
        return 0;
@@ -210,13 +227,24 @@ static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
        irq_bypass_unregister_producer(&vq->call_ctx.producer);
 }
 
-static int vhost_vdpa_reset(struct vhost_vdpa *v)
+static int _compat_vdpa_reset(struct vhost_vdpa *v)
 {
        struct vdpa_device *vdpa = v->vdpa;
+       u32 flags = 0;
 
-       v->in_batch = 0;
+       if (v->vdev.vqs) {
+               flags |= !vhost_backend_has_feature(v->vdev.vqs[0],
+                                                   VHOST_BACKEND_F_IOTLB_PERSIST) ?
+                        VDPA_RESET_F_CLEAN_MAP : 0;
+       }
 
-       return vdpa_reset(vdpa);
+       return vdpa_reset(vdpa, flags);
+}
+
+static int vhost_vdpa_reset(struct vhost_vdpa *v)
+{
+       v->in_batch = 0;
+       return _compat_vdpa_reset(v);
 }
 
 static long vhost_vdpa_bind_mm(struct vhost_vdpa *v)
@@ -295,7 +323,7 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
                        vhost_vdpa_unsetup_vq_irq(v, i);
 
        if (status == 0) {
-               ret = vdpa_reset(vdpa);
+               ret = _compat_vdpa_reset(v);
                if (ret)
                        return ret;
        } else
@@ -389,6 +417,14 @@ static bool vhost_vdpa_can_resume(const struct vhost_vdpa *v)
        return ops->resume;
 }
 
+static bool vhost_vdpa_has_desc_group(const struct vhost_vdpa *v)
+{
+       struct vdpa_device *vdpa = v->vdpa;
+       const struct vdpa_config_ops *ops = vdpa->config;
+
+       return ops->get_vq_desc_group;
+}
+
 static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
 {
        struct vdpa_device *vdpa = v->vdpa;
@@ -414,6 +450,15 @@ static u64 vhost_vdpa_get_backend_features(const struct vhost_vdpa *v)
                return ops->get_backend_features(vdpa);
 }
 
+static bool vhost_vdpa_has_persistent_map(const struct vhost_vdpa *v)
+{
+       struct vdpa_device *vdpa = v->vdpa;
+       const struct vdpa_config_ops *ops = vdpa->config;
+
+       return (!ops->set_map && !ops->dma_map) || ops->reset_map ||
+              vhost_vdpa_get_backend_features(v) & BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST);
+}
+
 static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
 {
        struct vdpa_device *vdpa = v->vdpa;
@@ -605,6 +650,16 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
                else if (copy_to_user(argp, &s, sizeof(s)))
                        return -EFAULT;
                return 0;
+       case VHOST_VDPA_GET_VRING_DESC_GROUP:
+               if (!vhost_vdpa_has_desc_group(v))
+                       return -EOPNOTSUPP;
+               s.index = idx;
+               s.num = ops->get_vq_desc_group(vdpa, idx);
+               if (s.num >= vdpa->ngroups)
+                       return -EIO;
+               else if (copy_to_user(argp, &s, sizeof(s)))
+                       return -EFAULT;
+               return 0;
        case VHOST_VDPA_SET_GROUP_ASID:
                if (copy_from_user(&s, argp, sizeof(s)))
                        return -EFAULT;
@@ -690,6 +745,8 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
                if (copy_from_user(&features, featurep, sizeof(features)))
                        return -EFAULT;
                if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
+                                BIT_ULL(VHOST_BACKEND_F_DESC_ASID) |
+                                BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST) |
                                 BIT_ULL(VHOST_BACKEND_F_SUSPEND) |
                                 BIT_ULL(VHOST_BACKEND_F_RESUME) |
                                 BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK)))
@@ -700,6 +757,15 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
                if ((features & BIT_ULL(VHOST_BACKEND_F_RESUME)) &&
                     !vhost_vdpa_can_resume(v))
                        return -EOPNOTSUPP;
+               if ((features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) &&
+                   !(features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID)))
+                       return -EINVAL;
+               if ((features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) &&
+                    !vhost_vdpa_has_desc_group(v))
+                       return -EOPNOTSUPP;
+               if ((features & BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST)) &&
+                    !vhost_vdpa_has_persistent_map(v))
+                       return -EOPNOTSUPP;
                vhost_set_backend_features(&v->vdev, features);
                return 0;
        }
@@ -753,6 +819,10 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
                        features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
                if (vhost_vdpa_can_resume(v))
                        features |= BIT_ULL(VHOST_BACKEND_F_RESUME);
+               if (vhost_vdpa_has_desc_group(v))
+                       features |= BIT_ULL(VHOST_BACKEND_F_DESC_ASID);
+               if (vhost_vdpa_has_persistent_map(v))
+                       features |= BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST);
                features |= vhost_vdpa_get_backend_features(v);
                if (copy_to_user(featurep, &features, sizeof(features)))
                        r = -EFAULT;
@@ -1285,6 +1355,7 @@ static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
        vhost_vdpa_free_domain(v);
        vhost_dev_cleanup(&v->vdev);
        kfree(v->vdev.vqs);
+       v->vdev.vqs = NULL;
 }
 
 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
index b18c6b4f129a66cacc6041f05b52ef7bae2179ac..305f396c764c95b9335b58bf99e2deba098c8944 100644 (file)
@@ -3752,7 +3752,7 @@ release:
 }
 
 
-static int __exit amifb_remove(struct platform_device *pdev)
+static void __exit amifb_remove(struct platform_device *pdev)
 {
        struct fb_info *info = platform_get_drvdata(pdev);
 
@@ -3765,11 +3765,16 @@ static int __exit amifb_remove(struct platform_device *pdev)
        chipfree();
        framebuffer_release(info);
        amifb_video_off();
-       return 0;
 }
 
-static struct platform_driver amifb_driver = {
-       .remove = __exit_p(amifb_remove),
+/*
+ * amifb_remove() lives in .exit.text. For drivers registered via
+ * module_platform_driver_probe() this ok because they cannot get unboud at
+ * runtime. The driver needs to be marked with __refdata, otherwise modpost
+ * triggers a section mismatch warning.
+ */
+static struct platform_driver amifb_driver __refdata = {
+       .remove_new = __exit_p(amifb_remove),
        .driver   = {
                .name   = "amiga-video",
        },
index a908db2334098610501eb0e2adf359369e17cf78..9e391e5eaf9dae4d1acbbb6e64e9da749f699cd1 100644 (file)
@@ -220,7 +220,7 @@ static inline void atmel_lcdfb_power_control(struct atmel_lcdfb_info *sinfo, int
        }
 }
 
-static const struct fb_fix_screeninfo atmel_lcdfb_fix __initconst = {
+static const struct fb_fix_screeninfo atmel_lcdfb_fix = {
        .type           = FB_TYPE_PACKED_PIXELS,
        .visual         = FB_VISUAL_TRUECOLOR,
        .xpanstep       = 0,
@@ -841,7 +841,7 @@ static void atmel_lcdfb_task(struct work_struct *work)
        atmel_lcdfb_reset(sinfo);
 }
 
-static int __init atmel_lcdfb_init_fbinfo(struct atmel_lcdfb_info *sinfo)
+static int atmel_lcdfb_init_fbinfo(struct atmel_lcdfb_info *sinfo)
 {
        struct fb_info *info = sinfo->info;
        int ret = 0;
@@ -1017,7 +1017,7 @@ put_display_node:
        return ret;
 }
 
-static int __init atmel_lcdfb_probe(struct platform_device *pdev)
+static int atmel_lcdfb_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct fb_info *info;
@@ -1223,14 +1223,14 @@ out:
        return ret;
 }
 
-static int __exit atmel_lcdfb_remove(struct platform_device *pdev)
+static void atmel_lcdfb_remove(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct fb_info *info = dev_get_drvdata(dev);
        struct atmel_lcdfb_info *sinfo;
 
        if (!info || !info->par)
-               return 0;
+               return;
        sinfo = info->par;
 
        cancel_work_sync(&sinfo->task);
@@ -1252,8 +1252,6 @@ static int __exit atmel_lcdfb_remove(struct platform_device *pdev)
        }
 
        framebuffer_release(info);
-
-       return 0;
 }
 
 #ifdef CONFIG_PM
@@ -1301,7 +1299,8 @@ static int atmel_lcdfb_resume(struct platform_device *pdev)
 #endif
 
 static struct platform_driver atmel_lcdfb_driver = {
-       .remove         = __exit_p(atmel_lcdfb_remove),
+       .probe          = atmel_lcdfb_probe,
+       .remove_new     = atmel_lcdfb_remove,
        .suspend        = atmel_lcdfb_suspend,
        .resume         = atmel_lcdfb_resume,
        .driver         = {
@@ -1309,8 +1308,7 @@ static struct platform_driver atmel_lcdfb_driver = {
                .of_match_table = atmel_lcdfb_dt_ids,
        },
 };
-
-module_platform_driver_probe(atmel_lcdfb_driver, atmel_lcdfb_probe);
+module_platform_driver(atmel_lcdfb_driver);
 
 MODULE_DESCRIPTION("AT91 LCD Controller framebuffer driver");
 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
index 7fbd9f069ac2ed8c62c463a07b8d8c73d2a11849..0bced82fa4940d08066b43fb60e26c810a44dce7 100644 (file)
@@ -490,7 +490,7 @@ static enum fsl_diu_monitor_port fsl_diu_name_to_port(const char *s)
  * Workaround for failed writing desc register of planes.
  * Needed with MPC5121 DIU rev 2.0 silicon.
  */
-void wr_reg_wa(u32 *reg, u32 val)
+static void wr_reg_wa(u32 *reg, u32 val)
 {
        do {
                out_be32(reg, val);
index bf59daf862fc7184fe7194ffdbfa26c96334e512..a80939fe2ee6d1b1ea09f3e14122f0546fbfefa4 100644 (file)
@@ -1013,6 +1013,8 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
        } else if (IS_ENABLED(CONFIG_SYSFB)) {
                base = screen_info.lfb_base;
                size = screen_info.lfb_size;
+       } else {
+               goto err1;
        }
 
        /*
index e7e03e920729dce8c8878de75b1d3743c606085e..660499260f4651547fe5af38bf26999b4e794fda 100644 (file)
@@ -1421,7 +1421,6 @@ static int init_imstt(struct fb_info *info)
        if ((info->var.xres * info->var.yres) * (info->var.bits_per_pixel >> 3) > info->fix.smem_len
            || !(compute_imstt_regvals(par, info->var.xres, info->var.yres))) {
                printk("imsttfb: %ux%ux%u not supported\n", info->var.xres, info->var.yres, info->var.bits_per_pixel);
-               framebuffer_release(info);
                return -ENODEV;
        }
 
@@ -1453,14 +1452,11 @@ static int init_imstt(struct fb_info *info)
                      FBINFO_HWACCEL_FILLRECT |
                      FBINFO_HWACCEL_YPAN;
 
-       if (fb_alloc_cmap(&info->cmap, 0, 0)) {
-               framebuffer_release(info);
+       if (fb_alloc_cmap(&info->cmap, 0, 0))
                return -ENODEV;
-       }
 
        if (register_framebuffer(info) < 0) {
                fb_dealloc_cmap(&info->cmap);
-               framebuffer_release(info);
                return -ENODEV;
        }
 
@@ -1500,8 +1496,8 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        if (!request_mem_region(addr, size, "imsttfb")) {
                printk(KERN_ERR "imsttfb: Can't reserve memory region\n");
-               framebuffer_release(info);
-               return -ENODEV;
+               ret = -ENODEV;
+               goto release_info;
        }
 
        switch (pdev->device) {
@@ -1518,36 +1514,39 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                        printk(KERN_INFO "imsttfb: Device 0x%x unknown, "
                                         "contact maintainer.\n", pdev->device);
                        ret = -ENODEV;
-                       goto error;
+                       goto release_mem_region;
        }
 
        info->fix.smem_start = addr;
        info->screen_base = (__u8 *)ioremap(addr, par->ramdac == IBM ?
                                            0x400000 : 0x800000);
        if (!info->screen_base)
-               goto error;
+               goto release_mem_region;
        info->fix.mmio_start = addr + 0x800000;
        par->dc_regs = ioremap(addr + 0x800000, 0x1000);
        if (!par->dc_regs)
-               goto error;
+               goto unmap_screen_base;
        par->cmap_regs_phys = addr + 0x840000;
        par->cmap_regs = (__u8 *)ioremap(addr + 0x840000, 0x1000);
        if (!par->cmap_regs)
-               goto error;
+               goto unmap_dc_regs;
        info->pseudo_palette = par->palette;
        ret = init_imstt(info);
        if (ret)
-               goto error;
+               goto unmap_cmap_regs;
 
        pci_set_drvdata(pdev, info);
-       return ret;
+       return 0;
 
-error:
-       if (par->dc_regs)
-               iounmap(par->dc_regs);
-       if (info->screen_base)
-               iounmap(info->screen_base);
+unmap_cmap_regs:
+       iounmap(par->cmap_regs);
+unmap_dc_regs:
+       iounmap(par->dc_regs);
+unmap_screen_base:
+       iounmap(info->screen_base);
+release_mem_region:
        release_mem_region(addr, size);
+release_info:
        framebuffer_release(info);
        return ret;
 }
index dcb1b81d35db57cc15dba8040c92875b86e02e6b..b421b46d88efcad3cd6a623cc049573f6ccd8f35 100644 (file)
@@ -423,11 +423,9 @@ static void offb_init_fb(struct platform_device *parent, const char *name,
        fix = &info->fix;
        var = &info->var;
 
-       if (name) {
-               strcpy(fix->id, "OFfb ");
-               strncat(fix->id, name, sizeof(fix->id) - sizeof("OFfb "));
-               fix->id[sizeof(fix->id) - 1] = '\0';
-       } else
+       if (name)
+               snprintf(fix->id, sizeof(fix->id), "OFfb %s", name);
+       else
                snprintf(fix->id, sizeof(fix->id), "OFfb %pOFn", dp);
 
 
index 42c96f1cfc93c435ccf94154da2110bae0d4f082..694cf6318782bc8e60fe6dfd4445894099ba74ff 100644 (file)
@@ -1643,17 +1643,16 @@ static int omapfb_do_probe(struct platform_device *pdev,
                r = -ENOMEM;
                goto cleanup;
        }
-       fbdev->int_irq = platform_get_irq(pdev, 0);
-       if (fbdev->int_irq < 0) {
-               r = -ENXIO;
+
+       r = platform_get_irq(pdev, 0);
+       if (r < 0)
                goto cleanup;
-       }
+       fbdev->int_irq = r;
 
-       fbdev->ext_irq = platform_get_irq(pdev, 1);
-       if (fbdev->ext_irq < 0) {
-               r = -ENXIO;
+       r = platform_get_irq(pdev, 1);
+       if (r < 0)
                goto cleanup;
-       }
+       fbdev->ext_irq = r;
 
        init_state++;
 
@@ -1857,20 +1856,13 @@ static int __init omapfb_setup(char *options)
                if (!strncmp(this_opt, "accel", 5))
                        def_accel = 1;
                else if (!strncmp(this_opt, "vram:", 5)) {
+                       unsigned long long vram;
                        char *suffix;
-                       unsigned long vram;
-                       vram = (simple_strtoul(this_opt + 5, &suffix, 0));
+
+                       vram = memparse(this_opt + 5, &suffix);
                        switch (suffix[0]) {
                        case '\0':
                                break;
-                       case 'm':
-                       case 'M':
-                               vram *= 1024;
-                               fallthrough;
-                       case 'k':
-                       case 'K':
-                               vram *= 1024;
-                               break;
                        default:
                                pr_debug("omapfb: invalid vram suffix %c\n",
                                         suffix[0]);
index 0daaf9f89bab5cf638aab7594c2854a05558e8e3..c6786726a1af1da9cd50b940c9f0502a0512b317 100644 (file)
@@ -221,7 +221,7 @@ err_reg:
        return r;
 }
 
-static int __exit tvc_remove(struct platform_device *pdev)
+static void tvc_remove(struct platform_device *pdev)
 {
        struct panel_drv_data *ddata = platform_get_drvdata(pdev);
        struct omap_dss_device *dssdev = &ddata->dssdev;
@@ -233,8 +233,6 @@ static int __exit tvc_remove(struct platform_device *pdev)
        tvc_disconnect(dssdev);
 
        omap_dss_put_device(in);
-
-       return 0;
 }
 
 static const struct of_device_id tvc_of_match[] = {
@@ -247,11 +245,10 @@ MODULE_DEVICE_TABLE(of, tvc_of_match);
 
 static struct platform_driver tvc_connector_driver = {
        .probe  = tvc_probe,
-       .remove = __exit_p(tvc_remove),
+       .remove_new = tvc_remove,
        .driver = {
                .name   = "connector-analog-tv",
                .of_match_table = tvc_of_match,
-               .suppress_bind_attrs = true,
        },
 };
 
index c8ad3ef42bd31997df67d790783d40ff9ca17609..0cc9294f89b497eb963888a831fa91044e300fad 100644 (file)
@@ -303,7 +303,7 @@ err_reg:
        return r;
 }
 
-static int __exit dvic_remove(struct platform_device *pdev)
+static void dvic_remove(struct platform_device *pdev)
 {
        struct panel_drv_data *ddata = platform_get_drvdata(pdev);
        struct omap_dss_device *dssdev = &ddata->dssdev;
@@ -317,8 +317,6 @@ static int __exit dvic_remove(struct platform_device *pdev)
        omap_dss_put_device(in);
 
        i2c_put_adapter(ddata->i2c_adapter);
-
-       return 0;
 }
 
 static const struct of_device_id dvic_of_match[] = {
@@ -330,11 +328,10 @@ MODULE_DEVICE_TABLE(of, dvic_of_match);
 
 static struct platform_driver dvi_connector_driver = {
        .probe  = dvic_probe,
-       .remove = __exit_p(dvic_remove),
+       .remove_new = dvic_remove,
        .driver = {
                .name   = "connector-dvi",
                .of_match_table = dvic_of_match,
-               .suppress_bind_attrs = true,
        },
 };
 
index 8f9ff9fb4ca4c309574d4400f70d199cad23b73c..b862a32670aeff30c79214bf21ad5d0a58ce1419 100644 (file)
@@ -249,7 +249,7 @@ err_reg:
        return r;
 }
 
-static int __exit hdmic_remove(struct platform_device *pdev)
+static void hdmic_remove(struct platform_device *pdev)
 {
        struct panel_drv_data *ddata = platform_get_drvdata(pdev);
        struct omap_dss_device *dssdev = &ddata->dssdev;
@@ -261,8 +261,6 @@ static int __exit hdmic_remove(struct platform_device *pdev)
        hdmic_disconnect(dssdev);
 
        omap_dss_put_device(in);
-
-       return 0;
 }
 
 static const struct of_device_id hdmic_of_match[] = {
@@ -274,11 +272,10 @@ MODULE_DEVICE_TABLE(of, hdmic_of_match);
 
 static struct platform_driver hdmi_connector_driver = {
        .probe  = hdmic_probe,
-       .remove = __exit_p(hdmic_remove),
+       .remove_new = hdmic_remove,
        .driver = {
                .name   = "connector-hdmi",
                .of_match_table = hdmic_of_match,
-               .suppress_bind_attrs = true,
        },
 };
 
index dd29dc5c77ec8f999fb5b1ad42abb5e88a9a57e5..f0d3eb581166bd3a66a5f4fcb8053a9c9abf9752 100644 (file)
@@ -231,7 +231,7 @@ err_reg:
        return r;
 }
 
-static int __exit opa362_remove(struct platform_device *pdev)
+static void opa362_remove(struct platform_device *pdev)
 {
        struct panel_drv_data *ddata = platform_get_drvdata(pdev);
        struct omap_dss_device *dssdev = &ddata->dssdev;
@@ -248,8 +248,6 @@ static int __exit opa362_remove(struct platform_device *pdev)
                opa362_disconnect(dssdev, dssdev->dst);
 
        omap_dss_put_device(in);
-
-       return 0;
 }
 
 static const struct of_device_id opa362_of_match[] = {
@@ -260,11 +258,10 @@ MODULE_DEVICE_TABLE(of, opa362_of_match);
 
 static struct platform_driver opa362_driver = {
        .probe  = opa362_probe,
-       .remove = __exit_p(opa362_remove),
+       .remove_new = opa362_remove,
        .driver = {
                .name   = "amplifier-opa362",
                .of_match_table = opa362_of_match,
-               .suppress_bind_attrs = true,
        },
 };
 
index 7bac420169a69f1e88551ad4ae10895da5cba178..c8aca4592949cb3b1ea61a1cf40fc6debd3ec32a 100644 (file)
@@ -217,7 +217,7 @@ err_reg:
        return r;
 }
 
-static int __exit tfp410_remove(struct platform_device *pdev)
+static void tfp410_remove(struct platform_device *pdev)
 {
        struct panel_drv_data *ddata = platform_get_drvdata(pdev);
        struct omap_dss_device *dssdev = &ddata->dssdev;
@@ -234,8 +234,6 @@ static int __exit tfp410_remove(struct platform_device *pdev)
                tfp410_disconnect(dssdev, dssdev->dst);
 
        omap_dss_put_device(in);
-
-       return 0;
 }
 
 static const struct of_device_id tfp410_of_match[] = {
@@ -247,11 +245,10 @@ MODULE_DEVICE_TABLE(of, tfp410_of_match);
 
 static struct platform_driver tfp410_driver = {
        .probe  = tfp410_probe,
-       .remove = __exit_p(tfp410_remove),
+       .remove_new = tfp410_remove,
        .driver = {
                .name   = "tfp410",
                .of_match_table = tfp410_of_match,
-               .suppress_bind_attrs = true,
        },
 };
 
index 67f0c9250e9e4cc57b3e6d3cf8edab9a7d266d14..eb3926d0361b9e928365739fe2e3adda8c423b3d 100644 (file)
@@ -283,7 +283,7 @@ err_gpio:
        return r;
 }
 
-static int __exit tpd_remove(struct platform_device *pdev)
+static void tpd_remove(struct platform_device *pdev)
 {
        struct panel_drv_data *ddata = platform_get_drvdata(pdev);
        struct omap_dss_device *dssdev = &ddata->dssdev;
@@ -300,8 +300,6 @@ static int __exit tpd_remove(struct platform_device *pdev)
                tpd_disconnect(dssdev, dssdev->dst);
 
        omap_dss_put_device(in);
-
-       return 0;
 }
 
 static const struct of_device_id tpd_of_match[] = {
@@ -313,11 +311,10 @@ MODULE_DEVICE_TABLE(of, tpd_of_match);
 
 static struct platform_driver tpd_driver = {
        .probe  = tpd_probe,
-       .remove = __exit_p(tpd_remove),
+       .remove_new = tpd_remove,
        .driver = {
                .name   = "tpd12s015",
                .of_match_table = tpd_of_match,
-               .suppress_bind_attrs = true,
        },
 };
 
index 9790053c5877c0ec83146adb9a27ec518bc9536b..937f9091274f03d3e9855a77ca228c4527fa0b01 100644 (file)
@@ -211,7 +211,7 @@ err_reg:
        return r;
 }
 
-static int __exit panel_dpi_remove(struct platform_device *pdev)
+static void panel_dpi_remove(struct platform_device *pdev)
 {
        struct panel_drv_data *ddata = platform_get_drvdata(pdev);
        struct omap_dss_device *dssdev = &ddata->dssdev;
@@ -223,8 +223,6 @@ static int __exit panel_dpi_remove(struct platform_device *pdev)
        panel_dpi_disconnect(dssdev);
 
        omap_dss_put_device(in);
-
-       return 0;
 }
 
 static const struct of_device_id panel_dpi_of_match[] = {
@@ -236,11 +234,10 @@ MODULE_DEVICE_TABLE(of, panel_dpi_of_match);
 
 static struct platform_driver panel_dpi_driver = {
        .probe = panel_dpi_probe,
-       .remove = __exit_p(panel_dpi_remove),
+       .remove_new = panel_dpi_remove,
        .driver = {
                .name = "panel-dpi",
                .of_match_table = panel_dpi_of_match,
-               .suppress_bind_attrs = true,
        },
 };
 
index 77fce1223a640046846863cbc643d7879f346cfc..adb8881bac285c07c66d574c3c0b93cbfc1079b5 100644 (file)
@@ -1241,7 +1241,7 @@ err_reg:
        return r;
 }
 
-static int __exit dsicm_remove(struct platform_device *pdev)
+static void dsicm_remove(struct platform_device *pdev)
 {
        struct panel_drv_data *ddata = platform_get_drvdata(pdev);
        struct omap_dss_device *dssdev = &ddata->dssdev;
@@ -1269,8 +1269,6 @@ static int __exit dsicm_remove(struct platform_device *pdev)
 
        /* reset, to be sure that the panel is in a valid state */
        dsicm_hw_reset(ddata);
-
-       return 0;
 }
 
 static const struct of_device_id dsicm_of_match[] = {
@@ -1282,11 +1280,10 @@ MODULE_DEVICE_TABLE(of, dsicm_of_match);
 
 static struct platform_driver dsicm_driver = {
        .probe = dsicm_probe,
-       .remove = __exit_p(dsicm_remove),
+       .remove_new = dsicm_remove,
        .driver = {
                .name = "panel-dsi-cm",
                .of_match_table = dsicm_of_match,
-               .suppress_bind_attrs = true,
        },
 };
 
index cc30758300e25b0d211bf9723a960c477de8d68b..e37268cf8dcaec4536e11369343966beb5538aa7 100644 (file)
@@ -292,7 +292,7 @@ err_reg:
        return r;
 }
 
-static int __exit sharp_ls_remove(struct platform_device *pdev)
+static void sharp_ls_remove(struct platform_device *pdev)
 {
        struct panel_drv_data *ddata = platform_get_drvdata(pdev);
        struct omap_dss_device *dssdev = &ddata->dssdev;
@@ -304,8 +304,6 @@ static int __exit sharp_ls_remove(struct platform_device *pdev)
        sharp_ls_disconnect(dssdev);
 
        omap_dss_put_device(in);
-
-       return 0;
 }
 
 static const struct of_device_id sharp_ls_of_match[] = {
@@ -317,11 +315,10 @@ MODULE_DEVICE_TABLE(of, sharp_ls_of_match);
 
 static struct platform_driver sharp_ls_driver = {
        .probe = sharp_ls_probe,
-       .remove = __exit_p(sharp_ls_remove),
+       .remove_new = sharp_ls_remove,
        .driver = {
                .name = "panel-sharp-ls037v7dw01",
                .of_match_table = sharp_ls_of_match,
-               .suppress_bind_attrs = true,
        },
 };
 
index ee0dd4c6a6466aa00371d47be431c9996684e5a0..568e6e1eca628ea0fd15c28544a412ac871ef57e 100644 (file)
@@ -368,17 +368,10 @@ static int __init vrfb_probe(struct platform_device *pdev)
        return 0;
 }
 
-static void __exit vrfb_remove(struct platform_device *pdev)
-{
-       vrfb_loaded = false;
-}
-
 static struct platform_driver vrfb_driver = {
        .driver.name    = "omapvrfb",
-       .remove         = __exit_p(vrfb_remove),
 };
-
-module_platform_driver_probe(vrfb_driver, vrfb_probe);
+builtin_platform_driver_probe(vrfb_driver, vrfb_probe);
 
 MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
 MODULE_DESCRIPTION("OMAP VRFB");
index 58868f8880d655b771755002ee1804655078609c..a52b1ba43a48744a5e7609080a48609ea8a3cd1a 100644 (file)
@@ -574,7 +574,7 @@ static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg)
                break;
 
        case VIAFB_SET_GAMMA_LUT:
-               viafb_gamma_table = memdup_user(argp, 256 * sizeof(u32));
+               viafb_gamma_table = memdup_array_user(argp, 256, sizeof(u32));
                if (IS_ERR(viafb_gamma_table))
                        return PTR_ERR(viafb_gamma_table);
                viafb_set_gamma_table(viafb_bpp, viafb_gamma_table);
index f79ab13a5c28b0cd7a71d41ba8d3177914946351..40129b6f0eca41ced6c6213776571a5ae8374ebe 100644 (file)
@@ -48,10 +48,6 @@ source "drivers/virt/nitro_enclaves/Kconfig"
 
 source "drivers/virt/acrn/Kconfig"
 
-source "drivers/virt/coco/efi_secret/Kconfig"
-
-source "drivers/virt/coco/sev-guest/Kconfig"
-
-source "drivers/virt/coco/tdx-guest/Kconfig"
+source "drivers/virt/coco/Kconfig"
 
 endif
index e9aa6fc96fab7242a9963d9a834722af24e4b6a2..f29901bd782058d3552cdec2c2128ad47ce6fe27 100644 (file)
@@ -9,6 +9,4 @@ obj-y                           += vboxguest/
 
 obj-$(CONFIG_NITRO_ENCLAVES)   += nitro_enclaves/
 obj-$(CONFIG_ACRN_HSM)         += acrn/
-obj-$(CONFIG_EFI_SECRET)       += coco/efi_secret/
-obj-$(CONFIG_SEV_GUEST)                += coco/sev-guest/
-obj-$(CONFIG_INTEL_TDX_GUEST)  += coco/tdx-guest/
+obj-y                          += coco/
diff --git a/drivers/virt/coco/Kconfig b/drivers/virt/coco/Kconfig
new file mode 100644 (file)
index 0000000..87d142c
--- /dev/null
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Confidential computing related collateral
+#
+
+config TSM_REPORTS
+       select CONFIGFS_FS
+       tristate
+
+source "drivers/virt/coco/efi_secret/Kconfig"
+
+source "drivers/virt/coco/sev-guest/Kconfig"
+
+source "drivers/virt/coco/tdx-guest/Kconfig"
diff --git a/drivers/virt/coco/Makefile b/drivers/virt/coco/Makefile
new file mode 100644 (file)
index 0000000..18c1aba
--- /dev/null
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Confidential computing related collateral
+#
+obj-$(CONFIG_TSM_REPORTS)      += tsm.o
+obj-$(CONFIG_EFI_SECRET)       += efi_secret/
+obj-$(CONFIG_SEV_GUEST)                += sev-guest/
+obj-$(CONFIG_INTEL_TDX_GUEST)  += tdx-guest/
index da2d7ca531f0fb7cdcc7fbd9a1d660bb72ac950d..1cffc72c41cb1461020dff76db8b84ffb7527795 100644 (file)
@@ -5,6 +5,7 @@ config SEV_GUEST
        select CRYPTO
        select CRYPTO_AEAD2
        select CRYPTO_GCM
+       select TSM_REPORTS
        help
          SEV-SNP firmware provides the guest a mechanism to communicate with
          the PSP without risk from a malicious hypervisor who wishes to read,
index 97dbe715e96adfab478a255354f731c1dfdd55ee..bc564adcf499526aacdbc4fbc35e276f4cbdfee4 100644 (file)
 #include <linux/miscdevice.h>
 #include <linux/set_memory.h>
 #include <linux/fs.h>
+#include <linux/tsm.h>
 #include <crypto/aead.h>
 #include <linux/scatterlist.h>
 #include <linux/psp-sev.h>
+#include <linux/sockptr.h>
+#include <linux/cleanup.h>
+#include <linux/uuid.h>
 #include <uapi/linux/sev-guest.h>
 #include <uapi/linux/psp-sev.h>
 
@@ -57,6 +61,11 @@ struct snp_guest_dev {
 
        struct snp_secrets_page_layout *layout;
        struct snp_req_data input;
+       union {
+               struct snp_report_req report;
+               struct snp_derived_key_req derived_key;
+               struct snp_ext_report_req ext_report;
+       } req;
        u32 *os_area_msg_seqno;
        u8 *vmpck;
 };
@@ -470,11 +479,16 @@ static int handle_guest_request(struct snp_guest_dev *snp_dev, u64 exit_code,
        return 0;
 }
 
+struct snp_req_resp {
+       sockptr_t req_data;
+       sockptr_t resp_data;
+};
+
 static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
 {
        struct snp_guest_crypto *crypto = snp_dev->crypto;
+       struct snp_report_req *req = &snp_dev->req.report;
        struct snp_report_resp *resp;
-       struct snp_report_req req;
        int rc, resp_len;
 
        lockdep_assert_held(&snp_cmd_mutex);
@@ -482,7 +496,7 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
        if (!arg->req_data || !arg->resp_data)
                return -EINVAL;
 
-       if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
+       if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
                return -EFAULT;
 
        /*
@@ -496,7 +510,7 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
                return -ENOMEM;
 
        rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
-                                 SNP_MSG_REPORT_REQ, &req, sizeof(req), resp->data,
+                                 SNP_MSG_REPORT_REQ, req, sizeof(*req), resp->data,
                                  resp_len);
        if (rc)
                goto e_free;
@@ -511,9 +525,9 @@ e_free:
 
 static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
 {
+       struct snp_derived_key_req *req = &snp_dev->req.derived_key;
        struct snp_guest_crypto *crypto = snp_dev->crypto;
        struct snp_derived_key_resp resp = {0};
-       struct snp_derived_key_req req;
        int rc, resp_len;
        /* Response data is 64 bytes and max authsize for GCM is 16 bytes. */
        u8 buf[64 + 16];
@@ -532,11 +546,11 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
        if (sizeof(buf) < resp_len)
                return -ENOMEM;
 
-       if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
+       if (copy_from_user(req, (void __user *)arg->req_data, sizeof(*req)))
                return -EFAULT;
 
        rc = handle_guest_request(snp_dev, SVM_VMGEXIT_GUEST_REQUEST, arg,
-                                 SNP_MSG_KEY_REQ, &req, sizeof(req), buf, resp_len);
+                                 SNP_MSG_KEY_REQ, req, sizeof(*req), buf, resp_len);
        if (rc)
                return rc;
 
@@ -550,31 +564,39 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
        return rc;
 }
 
-static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
+static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg,
+                         struct snp_req_resp *io)
+
 {
+       struct snp_ext_report_req *req = &snp_dev->req.ext_report;
        struct snp_guest_crypto *crypto = snp_dev->crypto;
-       struct snp_ext_report_req req;
        struct snp_report_resp *resp;
        int ret, npages = 0, resp_len;
+       sockptr_t certs_address;
 
        lockdep_assert_held(&snp_cmd_mutex);
 
-       if (!arg->req_data || !arg->resp_data)
+       if (sockptr_is_null(io->req_data) || sockptr_is_null(io->resp_data))
                return -EINVAL;
 
-       if (copy_from_user(&req, (void __user *)arg->req_data, sizeof(req)))
+       if (copy_from_sockptr(req, io->req_data, sizeof(*req)))
                return -EFAULT;
 
-       /* userspace does not want certificate data */
-       if (!req.certs_len || !req.certs_address)
+       /* caller does not want certificate data */
+       if (!req->certs_len || !req->certs_address)
                goto cmd;
 
-       if (req.certs_len > SEV_FW_BLOB_MAX_SIZE ||
-           !IS_ALIGNED(req.certs_len, PAGE_SIZE))
+       if (req->certs_len > SEV_FW_BLOB_MAX_SIZE ||
+           !IS_ALIGNED(req->certs_len, PAGE_SIZE))
                return -EINVAL;
 
-       if (!access_ok((const void __user *)req.certs_address, req.certs_len))
-               return -EFAULT;
+       if (sockptr_is_kernel(io->resp_data)) {
+               certs_address = KERNEL_SOCKPTR((void *)req->certs_address);
+       } else {
+               certs_address = USER_SOCKPTR((void __user *)req->certs_address);
+               if (!access_ok(certs_address.user, req->certs_len))
+                       return -EFAULT;
+       }
 
        /*
         * Initialize the intermediate buffer with all zeros. This buffer
@@ -582,8 +604,8 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
         * the host. If host does not supply any certs in it, then copy
         * zeros to indicate that certificate data was not provided.
         */
-       memset(snp_dev->certs_data, 0, req.certs_len);
-       npages = req.certs_len >> PAGE_SHIFT;
+       memset(snp_dev->certs_data, 0, req->certs_len);
+       npages = req->certs_len >> PAGE_SHIFT;
 cmd:
        /*
         * The intermediate response buffer is used while decrypting the
@@ -597,28 +619,26 @@ cmd:
 
        snp_dev->input.data_npages = npages;
        ret = handle_guest_request(snp_dev, SVM_VMGEXIT_EXT_GUEST_REQUEST, arg,
-                                  SNP_MSG_REPORT_REQ, &req.data,
-                                  sizeof(req.data), resp->data, resp_len);
+                                  SNP_MSG_REPORT_REQ, &req->data,
+                                  sizeof(req->data), resp->data, resp_len);
 
        /* If certs length is invalid then copy the returned length */
        if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) {
-               req.certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
+               req->certs_len = snp_dev->input.data_npages << PAGE_SHIFT;
 
-               if (copy_to_user((void __user *)arg->req_data, &req, sizeof(req)))
+               if (copy_to_sockptr(io->req_data, req, sizeof(*req)))
                        ret = -EFAULT;
        }
 
        if (ret)
                goto e_free;
 
-       if (npages &&
-           copy_to_user((void __user *)req.certs_address, snp_dev->certs_data,
-                        req.certs_len)) {
+       if (npages && copy_to_sockptr(certs_address, snp_dev->certs_data, req->certs_len)) {
                ret = -EFAULT;
                goto e_free;
        }
 
-       if (copy_to_user((void __user *)arg->resp_data, resp, sizeof(*resp)))
+       if (copy_to_sockptr(io->resp_data, resp, sizeof(*resp)))
                ret = -EFAULT;
 
 e_free:
@@ -631,6 +651,7 @@ static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long
        struct snp_guest_dev *snp_dev = to_snp_dev(file);
        void __user *argp = (void __user *)arg;
        struct snp_guest_request_ioctl input;
+       struct snp_req_resp io;
        int ret = -ENOTTY;
 
        if (copy_from_user(&input, argp, sizeof(input)))
@@ -659,7 +680,14 @@ static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long
                ret = get_derived_key(snp_dev, &input);
                break;
        case SNP_GET_EXT_REPORT:
-               ret = get_ext_report(snp_dev, &input);
+               /*
+                * As get_ext_report() may be called from the ioctl() path and a
+                * kernel internal path (configfs-tsm), decorate the passed
+                * buffers as user pointers.
+                */
+               io.req_data = USER_SOCKPTR((void __user *)input.req_data);
+               io.resp_data = USER_SOCKPTR((void __user *)input.resp_data);
+               ret = get_ext_report(snp_dev, &input, &io);
                break;
        default:
                break;
@@ -743,6 +771,130 @@ static u8 *get_vmpck(int id, struct snp_secrets_page_layout *layout, u32 **seqno
        return key;
 }
 
+struct snp_msg_report_resp_hdr {
+       u32 status;
+       u32 report_size;
+       u8 rsvd[24];
+};
+
+struct snp_msg_cert_entry {
+       guid_t guid;
+       u32 offset;
+       u32 length;
+};
+
+static int sev_report_new(struct tsm_report *report, void *data)
+{
+       struct snp_msg_cert_entry *cert_table;
+       struct tsm_desc *desc = &report->desc;
+       struct snp_guest_dev *snp_dev = data;
+       struct snp_msg_report_resp_hdr hdr;
+       const u32 report_size = SZ_4K;
+       const u32 ext_size = SEV_FW_BLOB_MAX_SIZE;
+       u32 certs_size, i, size = report_size + ext_size;
+       int ret;
+
+       if (desc->inblob_len != SNP_REPORT_USER_DATA_SIZE)
+               return -EINVAL;
+
+       void *buf __free(kvfree) = kvzalloc(size, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       guard(mutex)(&snp_cmd_mutex);
+
+       /* Check if the VMPCK is not empty */
+       if (is_vmpck_empty(snp_dev)) {
+               dev_err_ratelimited(snp_dev->dev, "VMPCK is disabled\n");
+               return -ENOTTY;
+       }
+
+       cert_table = buf + report_size;
+       struct snp_ext_report_req ext_req = {
+               .data = { .vmpl = desc->privlevel },
+               .certs_address = (__u64)cert_table,
+               .certs_len = ext_size,
+       };
+       memcpy(&ext_req.data.user_data, desc->inblob, desc->inblob_len);
+
+       struct snp_guest_request_ioctl input = {
+               .msg_version = 1,
+               .req_data = (__u64)&ext_req,
+               .resp_data = (__u64)buf,
+               .exitinfo2 = 0xff,
+       };
+       struct snp_req_resp io = {
+               .req_data = KERNEL_SOCKPTR(&ext_req),
+               .resp_data = KERNEL_SOCKPTR(buf),
+       };
+
+       ret = get_ext_report(snp_dev, &input, &io);
+       if (ret)
+               return ret;
+
+       memcpy(&hdr, buf, sizeof(hdr));
+       if (hdr.status == SEV_RET_INVALID_PARAM)
+               return -EINVAL;
+       if (hdr.status == SEV_RET_INVALID_KEY)
+               return -EINVAL;
+       if (hdr.status)
+               return -ENXIO;
+       if ((hdr.report_size + sizeof(hdr)) > report_size)
+               return -ENOMEM;
+
+       void *rbuf __free(kvfree) = kvzalloc(hdr.report_size, GFP_KERNEL);
+       if (!rbuf)
+               return -ENOMEM;
+
+       memcpy(rbuf, buf + sizeof(hdr), hdr.report_size);
+       report->outblob = no_free_ptr(rbuf);
+       report->outblob_len = hdr.report_size;
+
+       certs_size = 0;
+       for (i = 0; i < ext_size / sizeof(struct snp_msg_cert_entry); i++) {
+               struct snp_msg_cert_entry *ent = &cert_table[i];
+
+               if (guid_is_null(&ent->guid) && !ent->offset && !ent->length)
+                       break;
+               certs_size = max(certs_size, ent->offset + ent->length);
+       }
+
+       /* Suspicious that the response populated entries without populating size */
+       if (!certs_size && i)
+               dev_warn_ratelimited(snp_dev->dev, "certificate slots conveyed without size\n");
+
+       /* No certs to report */
+       if (!certs_size)
+               return 0;
+
+       /* Suspicious that the certificate blob size contract was violated
+        */
+       if (certs_size > ext_size) {
+               dev_warn_ratelimited(snp_dev->dev, "certificate data truncated\n");
+               certs_size = ext_size;
+       }
+
+       void *cbuf __free(kvfree) = kvzalloc(certs_size, GFP_KERNEL);
+       if (!cbuf)
+               return -ENOMEM;
+
+       memcpy(cbuf, cert_table, certs_size);
+       report->auxblob = no_free_ptr(cbuf);
+       report->auxblob_len = certs_size;
+
+       return 0;
+}
+
+static const struct tsm_ops sev_tsm_ops = {
+       .name = KBUILD_MODNAME,
+       .report_new = sev_report_new,
+};
+
+static void unregister_sev_tsm(void *data)
+{
+       tsm_unregister(&sev_tsm_ops);
+}
+
 static int __init sev_guest_probe(struct platform_device *pdev)
 {
        struct snp_secrets_page_layout *layout;
@@ -816,6 +968,14 @@ static int __init sev_guest_probe(struct platform_device *pdev)
        snp_dev->input.resp_gpa = __pa(snp_dev->response);
        snp_dev->input.data_gpa = __pa(snp_dev->certs_data);
 
+       ret = tsm_register(&sev_tsm_ops, snp_dev, &tsm_report_extra_type);
+       if (ret)
+               goto e_free_cert_data;
+
+       ret = devm_add_action_or_reset(&pdev->dev, unregister_sev_tsm, NULL);
+       if (ret)
+               goto e_free_cert_data;
+
        ret =  misc_register(misc);
        if (ret)
                goto e_free_cert_data;
index 14246fc2fb02b661e1f1aabc1f419563e3f58c38..22dd59e194315a754472b510465af35a9f8c3b27 100644 (file)
@@ -1,6 +1,7 @@
 config TDX_GUEST_DRIVER
        tristate "TDX Guest driver"
        depends on INTEL_TDX_GUEST
+       select TSM_REPORTS
        help
          The driver provides userspace interface to communicate with
          the TDX module to request the TDX guest details like attestation
index 5e44a0fa69bd8ab958abbc7aca6f9fe492d59f8e..1253bf76b57031884420a35d808483a9cd7267be 100644 (file)
 #include <linux/mod_devicetable.h>
 #include <linux/string.h>
 #include <linux/uaccess.h>
+#include <linux/set_memory.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/tsm.h>
+#include <linux/sizes.h>
 
 #include <uapi/linux/tdx-guest.h>
 
 #include <asm/cpu_device_id.h>
 #include <asm/tdx.h>
 
+/*
+ * Intel's SGX QE implementation generally uses Quote size less
+ * than 8K (2K Quote data + ~5K of certificate blob).
+ */
+#define GET_QUOTE_BUF_SIZE             SZ_8K
+
+#define GET_QUOTE_CMD_VER              1
+
+/* TDX GetQuote status codes */
+#define GET_QUOTE_SUCCESS              0
+#define GET_QUOTE_IN_FLIGHT            0xffffffffffffffff
+
+/* struct tdx_quote_buf: Format of Quote request buffer.
+ * @version: Quote format version, filled by TD.
+ * @status: Status code of Quote request, filled by VMM.
+ * @in_len: Length of TDREPORT, filled by TD.
+ * @out_len: Length of Quote data, filled by VMM.
+ * @data: Quote data on output or TDREPORT on input.
+ *
+ * More details of Quote request buffer can be found in TDX
+ * Guest-Host Communication Interface (GHCI) for Intel TDX 1.0,
+ * section titled "TDG.VP.VMCALL<GetQuote>"
+ */
+struct tdx_quote_buf {
+       u64 version;
+       u64 status;
+       u32 in_len;
+       u32 out_len;
+       u8 data[];
+};
+
+/* Quote data buffer */
+static void *quote_data;
+
+/* Lock to streamline quote requests */
+static DEFINE_MUTEX(quote_lock);
+
+/*
+ * GetQuote request timeout in seconds. Expect that 30 seconds
+ * is enough time for QE to respond to any Quote requests.
+ */
+static u32 getquote_timeout = 30;
+
 static long tdx_get_report0(struct tdx_report_req __user *req)
 {
        u8 *reportdata, *tdreport;
@@ -53,6 +101,154 @@ out:
        return ret;
 }
 
+static void free_quote_buf(void *buf)
+{
+       size_t len = PAGE_ALIGN(GET_QUOTE_BUF_SIZE);
+       unsigned int count = len >> PAGE_SHIFT;
+
+       if (set_memory_encrypted((unsigned long)buf, count)) {
+               pr_err("Failed to restore encryption mask for Quote buffer, leak it\n");
+               return;
+       }
+
+       free_pages_exact(buf, len);
+}
+
+static void *alloc_quote_buf(void)
+{
+       size_t len = PAGE_ALIGN(GET_QUOTE_BUF_SIZE);
+       unsigned int count = len >> PAGE_SHIFT;
+       void *addr;
+
+       addr = alloc_pages_exact(len, GFP_KERNEL | __GFP_ZERO);
+       if (!addr)
+               return NULL;
+
+       if (set_memory_decrypted((unsigned long)addr, count)) {
+               free_pages_exact(addr, len);
+               return NULL;
+       }
+
+       return addr;
+}
+
+/*
+ * wait_for_quote_completion() - Wait for Quote request completion
+ * @quote_buf: Address of Quote buffer.
+ * @timeout: Timeout in seconds to wait for the Quote generation.
+ *
+ * As per TDX GHCI v1.0 specification, sec titled "TDG.VP.VMCALL<GetQuote>",
+ * the status field in the Quote buffer will be set to GET_QUOTE_IN_FLIGHT
+ * while VMM processes the GetQuote request, and will change it to success
+ * or error code after processing is complete. So wait till the status
+ * changes from GET_QUOTE_IN_FLIGHT or the request being timed out.
+ */
+static int wait_for_quote_completion(struct tdx_quote_buf *quote_buf, u32 timeout)
+{
+       int i = 0;
+
+       /*
+        * Quote requests usually take a few seconds to complete, so waking up
+        * once per second to recheck the status is fine for this use case.
+        */
+       while (quote_buf->status == GET_QUOTE_IN_FLIGHT && i++ < timeout) {
+               if (msleep_interruptible(MSEC_PER_SEC))
+                       return -EINTR;
+       }
+
+       return (i == timeout) ? -ETIMEDOUT : 0;
+}
+
+static int tdx_report_new(struct tsm_report *report, void *data)
+{
+       u8 *buf, *reportdata = NULL, *tdreport = NULL;
+       struct tdx_quote_buf *quote_buf = quote_data;
+       struct tsm_desc *desc = &report->desc;
+       int ret;
+       u64 err;
+
+       /* TODO: switch to guard(mutex_intr) */
+       if (mutex_lock_interruptible(&quote_lock))
+               return -EINTR;
+
+       /*
+        * If the previous request is timedout or interrupted, and the
+        * Quote buf status is still in GET_QUOTE_IN_FLIGHT (owned by
+        * VMM), don't permit any new request.
+        */
+       if (quote_buf->status == GET_QUOTE_IN_FLIGHT) {
+               ret = -EBUSY;
+               goto done;
+       }
+
+       if (desc->inblob_len != TDX_REPORTDATA_LEN) {
+               ret = -EINVAL;
+               goto done;
+       }
+
+       reportdata = kmalloc(TDX_REPORTDATA_LEN, GFP_KERNEL);
+       if (!reportdata) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       tdreport = kzalloc(TDX_REPORT_LEN, GFP_KERNEL);
+       if (!tdreport) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       memcpy(reportdata, desc->inblob, desc->inblob_len);
+
+       /* Generate TDREPORT0 using "TDG.MR.REPORT" TDCALL */
+       ret = tdx_mcall_get_report0(reportdata, tdreport);
+       if (ret) {
+               pr_err("GetReport call failed\n");
+               goto done;
+       }
+
+       memset(quote_data, 0, GET_QUOTE_BUF_SIZE);
+
+       /* Update Quote buffer header */
+       quote_buf->version = GET_QUOTE_CMD_VER;
+       quote_buf->in_len = TDX_REPORT_LEN;
+
+       memcpy(quote_buf->data, tdreport, TDX_REPORT_LEN);
+
+       err = tdx_hcall_get_quote(quote_data, GET_QUOTE_BUF_SIZE);
+       if (err) {
+               pr_err("GetQuote hypercall failed, status:%llx\n", err);
+               ret = -EIO;
+               goto done;
+       }
+
+       ret = wait_for_quote_completion(quote_buf, getquote_timeout);
+       if (ret) {
+               pr_err("GetQuote request timedout\n");
+               goto done;
+       }
+
+       buf = kvmemdup(quote_buf->data, quote_buf->out_len, GFP_KERNEL);
+       if (!buf) {
+               ret = -ENOMEM;
+               goto done;
+       }
+
+       report->outblob = buf;
+       report->outblob_len = quote_buf->out_len;
+
+       /*
+        * TODO: parse the PEM-formatted cert chain out of the quote buffer when
+        * provided
+        */
+done:
+       mutex_unlock(&quote_lock);
+       kfree(reportdata);
+       kfree(tdreport);
+
+       return ret;
+}
+
 static long tdx_guest_ioctl(struct file *file, unsigned int cmd,
                            unsigned long arg)
 {
@@ -82,17 +278,48 @@ static const struct x86_cpu_id tdx_guest_ids[] = {
 };
 MODULE_DEVICE_TABLE(x86cpu, tdx_guest_ids);
 
+static const struct tsm_ops tdx_tsm_ops = {
+       .name = KBUILD_MODNAME,
+       .report_new = tdx_report_new,
+};
+
 static int __init tdx_guest_init(void)
 {
+       int ret;
+
        if (!x86_match_cpu(tdx_guest_ids))
                return -ENODEV;
 
-       return misc_register(&tdx_misc_dev);
+       ret = misc_register(&tdx_misc_dev);
+       if (ret)
+               return ret;
+
+       quote_data = alloc_quote_buf();
+       if (!quote_data) {
+               pr_err("Failed to allocate Quote buffer\n");
+               ret = -ENOMEM;
+               goto free_misc;
+       }
+
+       ret = tsm_register(&tdx_tsm_ops, NULL, NULL);
+       if (ret)
+               goto free_quote;
+
+       return 0;
+
+free_quote:
+       free_quote_buf(quote_data);
+free_misc:
+       misc_deregister(&tdx_misc_dev);
+
+       return ret;
 }
 module_init(tdx_guest_init);
 
 static void __exit tdx_guest_exit(void)
 {
+       tsm_unregister(&tdx_tsm_ops);
+       free_quote_buf(quote_data);
        misc_deregister(&tdx_misc_dev);
 }
 module_exit(tdx_guest_exit);
diff --git a/drivers/virt/coco/tsm.c b/drivers/virt/coco/tsm.c
new file mode 100644 (file)
index 0000000..d1c2db8
--- /dev/null
@@ -0,0 +1,425 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright(c) 2023 Intel Corporation. All rights reserved. */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/tsm.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/rwsem.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/cleanup.h>
+#include <linux/configfs.h>
+
+static struct tsm_provider {
+       const struct tsm_ops *ops;
+       const struct config_item_type *type;
+       void *data;
+} provider;
+static DECLARE_RWSEM(tsm_rwsem);
+
+/**
+ * DOC: Trusted Security Module (TSM) Attestation Report Interface
+ *
+ * The TSM report interface is a common provider of blobs that facilitate
+ * attestation of a TVM (confidential computing guest) by an attestation
+ * service. A TSM report combines a user-defined blob (likely a public-key with
+ * a nonce for a key-exchange protocol) with a signed attestation report. That
+ * combined blob is then used to obtain secrets provided by an agent that can
+ * validate the attestation report. The expectation is that this interface is
+ * invoked infrequently, however configfs allows for multiple agents to
+ * own their own report generation instances to generate reports as
+ * often as needed.
+ *
+ * The attestation report format is TSM provider specific, when / if a standard
+ * materializes that can be published instead of the vendor layout. Until then
+ * the 'provider' attribute indicates the format of 'outblob', and optionally
+ * 'auxblob'.
+ */
+
+struct tsm_report_state {
+       struct tsm_report report;
+       unsigned long write_generation;
+       unsigned long read_generation;
+       struct config_item cfg;
+};
+
+enum tsm_data_select {
+       TSM_REPORT,
+       TSM_CERTS,
+};
+
+static struct tsm_report *to_tsm_report(struct config_item *cfg)
+{
+       struct tsm_report_state *state =
+               container_of(cfg, struct tsm_report_state, cfg);
+
+       return &state->report;
+}
+
+static struct tsm_report_state *to_state(struct tsm_report *report)
+{
+       return container_of(report, struct tsm_report_state, report);
+}
+
+static int try_advance_write_generation(struct tsm_report *report)
+{
+       struct tsm_report_state *state = to_state(report);
+
+       lockdep_assert_held_write(&tsm_rwsem);
+
+       /*
+        * Malicious or broken userspace has written enough times for
+        * read_generation == write_generation by modular arithmetic without an
+        * interim read. Stop accepting updates until the current report
+        * configuration is read.
+        */
+       if (state->write_generation == state->read_generation - 1)
+               return -EBUSY;
+       state->write_generation++;
+       return 0;
+}
+
+static ssize_t tsm_report_privlevel_store(struct config_item *cfg,
+                                         const char *buf, size_t len)
+{
+       struct tsm_report *report = to_tsm_report(cfg);
+       unsigned int val;
+       int rc;
+
+       rc = kstrtouint(buf, 0, &val);
+       if (rc)
+               return rc;
+
+       /*
+        * The valid privilege levels that a TSM might accept, if it accepts a
+        * privilege level setting at all, are a max of TSM_PRIVLEVEL_MAX (see
+        * SEV-SNP GHCB) and a minimum of a TSM selected floor value no less
+        * than 0.
+        */
+       if (provider.ops->privlevel_floor > val || val > TSM_PRIVLEVEL_MAX)
+               return -EINVAL;
+
+       guard(rwsem_write)(&tsm_rwsem);
+       rc = try_advance_write_generation(report);
+       if (rc)
+               return rc;
+       report->desc.privlevel = val;
+
+       return len;
+}
+CONFIGFS_ATTR_WO(tsm_report_, privlevel);
+
+static ssize_t tsm_report_privlevel_floor_show(struct config_item *cfg,
+                                              char *buf)
+{
+       guard(rwsem_read)(&tsm_rwsem);
+       return sysfs_emit(buf, "%u\n", provider.ops->privlevel_floor);
+}
+CONFIGFS_ATTR_RO(tsm_report_, privlevel_floor);
+
+static ssize_t tsm_report_inblob_write(struct config_item *cfg,
+                                      const void *buf, size_t count)
+{
+       struct tsm_report *report = to_tsm_report(cfg);
+       int rc;
+
+       guard(rwsem_write)(&tsm_rwsem);
+       rc = try_advance_write_generation(report);
+       if (rc)
+               return rc;
+
+       report->desc.inblob_len = count;
+       memcpy(report->desc.inblob, buf, count);
+       return count;
+}
+CONFIGFS_BIN_ATTR_WO(tsm_report_, inblob, NULL, TSM_INBLOB_MAX);
+
+static ssize_t tsm_report_generation_show(struct config_item *cfg, char *buf)
+{
+       struct tsm_report *report = to_tsm_report(cfg);
+       struct tsm_report_state *state = to_state(report);
+
+       guard(rwsem_read)(&tsm_rwsem);
+       return sysfs_emit(buf, "%lu\n", state->write_generation);
+}
+CONFIGFS_ATTR_RO(tsm_report_, generation);
+
+static ssize_t tsm_report_provider_show(struct config_item *cfg, char *buf)
+{
+       guard(rwsem_read)(&tsm_rwsem);
+       return sysfs_emit(buf, "%s\n", provider.ops->name);
+}
+CONFIGFS_ATTR_RO(tsm_report_, provider);
+
+static ssize_t __read_report(struct tsm_report *report, void *buf, size_t count,
+                            enum tsm_data_select select)
+{
+       loff_t offset = 0;
+       ssize_t len;
+       u8 *out;
+
+       if (select == TSM_REPORT) {
+               out = report->outblob;
+               len = report->outblob_len;
+       } else {
+               out = report->auxblob;
+               len = report->auxblob_len;
+       }
+
+       /*
+        * Recall that a NULL @buf is configfs requesting the size of
+        * the buffer.
+        */
+       if (!buf)
+               return len;
+       return memory_read_from_buffer(buf, count, &offset, out, len);
+}
+
+static ssize_t read_cached_report(struct tsm_report *report, void *buf,
+                                 size_t count, enum tsm_data_select select)
+{
+       struct tsm_report_state *state = to_state(report);
+
+       guard(rwsem_read)(&tsm_rwsem);
+       if (!report->desc.inblob_len)
+               return -EINVAL;
+
+       /*
+        * A given TSM backend always fills in ->outblob regardless of
+        * whether the report includes an auxblob or not.
+        */
+       if (!report->outblob ||
+           state->read_generation != state->write_generation)
+               return -EWOULDBLOCK;
+
+       return __read_report(report, buf, count, select);
+}
+
+static ssize_t tsm_report_read(struct tsm_report *report, void *buf,
+                              size_t count, enum tsm_data_select select)
+{
+       struct tsm_report_state *state = to_state(report);
+       const struct tsm_ops *ops;
+       ssize_t rc;
+
+       /* try to read from the existing report if present and valid... */
+       rc = read_cached_report(report, buf, count, select);
+       if (rc >= 0 || rc != -EWOULDBLOCK)
+               return rc;
+
+       /* slow path, report may need to be regenerated... */
+       guard(rwsem_write)(&tsm_rwsem);
+       ops = provider.ops;
+       if (!ops)
+               return -ENOTTY;
+       if (!report->desc.inblob_len)
+               return -EINVAL;
+
+       /* did another thread already generate this report? */
+       if (report->outblob &&
+           state->read_generation == state->write_generation)
+               goto out;
+
+       kvfree(report->outblob);
+       kvfree(report->auxblob);
+       report->outblob = NULL;
+       report->auxblob = NULL;
+       rc = ops->report_new(report, provider.data);
+       if (rc < 0)
+               return rc;
+       state->read_generation = state->write_generation;
+out:
+       return __read_report(report, buf, count, select);
+}
+
+static ssize_t tsm_report_outblob_read(struct config_item *cfg, void *buf,
+                                      size_t count)
+{
+       struct tsm_report *report = to_tsm_report(cfg);
+
+       return tsm_report_read(report, buf, count, TSM_REPORT);
+}
+CONFIGFS_BIN_ATTR_RO(tsm_report_, outblob, NULL, TSM_OUTBLOB_MAX);
+
+static ssize_t tsm_report_auxblob_read(struct config_item *cfg, void *buf,
+                                      size_t count)
+{
+       struct tsm_report *report = to_tsm_report(cfg);
+
+       return tsm_report_read(report, buf, count, TSM_CERTS);
+}
+CONFIGFS_BIN_ATTR_RO(tsm_report_, auxblob, NULL, TSM_OUTBLOB_MAX);
+
+#define TSM_DEFAULT_ATTRS() \
+       &tsm_report_attr_generation, \
+       &tsm_report_attr_provider
+
+static struct configfs_attribute *tsm_report_attrs[] = {
+       TSM_DEFAULT_ATTRS(),
+       NULL,
+};
+
+static struct configfs_attribute *tsm_report_extra_attrs[] = {
+       TSM_DEFAULT_ATTRS(),
+       &tsm_report_attr_privlevel,
+       &tsm_report_attr_privlevel_floor,
+       NULL,
+};
+
+#define TSM_DEFAULT_BIN_ATTRS() \
+       &tsm_report_attr_inblob, \
+       &tsm_report_attr_outblob
+
+static struct configfs_bin_attribute *tsm_report_bin_attrs[] = {
+       TSM_DEFAULT_BIN_ATTRS(),
+       NULL,
+};
+
+static struct configfs_bin_attribute *tsm_report_bin_extra_attrs[] = {
+       TSM_DEFAULT_BIN_ATTRS(),
+       &tsm_report_attr_auxblob,
+       NULL,
+};
+
+static void tsm_report_item_release(struct config_item *cfg)
+{
+       struct tsm_report *report = to_tsm_report(cfg);
+       struct tsm_report_state *state = to_state(report);
+
+       kvfree(report->auxblob);
+       kvfree(report->outblob);
+       kfree(state);
+}
+
+static struct configfs_item_operations tsm_report_item_ops = {
+       .release = tsm_report_item_release,
+};
+
+const struct config_item_type tsm_report_default_type = {
+       .ct_owner = THIS_MODULE,
+       .ct_bin_attrs = tsm_report_bin_attrs,
+       .ct_attrs = tsm_report_attrs,
+       .ct_item_ops = &tsm_report_item_ops,
+};
+EXPORT_SYMBOL_GPL(tsm_report_default_type);
+
+const struct config_item_type tsm_report_extra_type = {
+       .ct_owner = THIS_MODULE,
+       .ct_bin_attrs = tsm_report_bin_extra_attrs,
+       .ct_attrs = tsm_report_extra_attrs,
+       .ct_item_ops = &tsm_report_item_ops,
+};
+EXPORT_SYMBOL_GPL(tsm_report_extra_type);
+
+static struct config_item *tsm_report_make_item(struct config_group *group,
+                                               const char *name)
+{
+       struct tsm_report_state *state;
+
+       guard(rwsem_read)(&tsm_rwsem);
+       if (!provider.ops)
+               return ERR_PTR(-ENXIO);
+
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       if (!state)
+               return ERR_PTR(-ENOMEM);
+
+       config_item_init_type_name(&state->cfg, name, provider.type);
+       return &state->cfg;
+}
+
+static struct configfs_group_operations tsm_report_group_ops = {
+       .make_item = tsm_report_make_item,
+};
+
+static const struct config_item_type tsm_reports_type = {
+       .ct_owner = THIS_MODULE,
+       .ct_group_ops = &tsm_report_group_ops,
+};
+
+static const struct config_item_type tsm_root_group_type = {
+       .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem tsm_configfs = {
+       .su_group = {
+               .cg_item = {
+                       .ci_namebuf = "tsm",
+                       .ci_type = &tsm_root_group_type,
+               },
+       },
+       .su_mutex = __MUTEX_INITIALIZER(tsm_configfs.su_mutex),
+};
+
+int tsm_register(const struct tsm_ops *ops, void *priv,
+                const struct config_item_type *type)
+{
+       const struct tsm_ops *conflict;
+
+       if (!type)
+               type = &tsm_report_default_type;
+       if (!(type == &tsm_report_default_type || type == &tsm_report_extra_type))
+               return -EINVAL;
+
+       guard(rwsem_write)(&tsm_rwsem);
+       conflict = provider.ops;
+       if (conflict) {
+               pr_err("\"%s\" ops already registered\n", conflict->name);
+               return -EBUSY;
+       }
+
+       provider.ops = ops;
+       provider.data = priv;
+       provider.type = type;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(tsm_register);
+
+int tsm_unregister(const struct tsm_ops *ops)
+{
+       guard(rwsem_write)(&tsm_rwsem);
+       if (ops != provider.ops)
+               return -EBUSY;
+       provider.ops = NULL;
+       provider.data = NULL;
+       provider.type = NULL;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(tsm_unregister);
+
+static struct config_group *tsm_report_group;
+
+static int __init tsm_init(void)
+{
+       struct config_group *root = &tsm_configfs.su_group;
+       struct config_group *tsm;
+       int rc;
+
+       config_group_init(root);
+       rc = configfs_register_subsystem(&tsm_configfs);
+       if (rc)
+               return rc;
+
+       tsm = configfs_register_default_group(root, "report",
+                                             &tsm_reports_type);
+       if (IS_ERR(tsm)) {
+               configfs_unregister_subsystem(&tsm_configfs);
+               return PTR_ERR(tsm);
+       }
+       tsm_report_group = tsm;
+
+       return 0;
+}
+module_init(tsm_init);
+
+static void __exit tsm_exit(void)
+{
+       configfs_unregister_default_group(tsm_report_group);
+       configfs_unregister_subsystem(&tsm_configfs);
+}
+module_exit(tsm_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Provide Trusted Security Module attestation reports via configfs");
index 44dcb9e7b55ec044fb78bb32e9f0aefde8ecc5a4..1fe93e93f5bcc60d6736c3e27d90a485bc21a68c 100644 (file)
@@ -745,7 +745,7 @@ static void report_free_page_func(struct work_struct *work)
  *  2) update the host about the old page removed from vb->pages list;
  *
  * This function preforms the balloon page migration task.
- * Called through balloon_mapping->a_ops->migratepage
+ * Called through movable_operations->migrate_page
  */
 static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
                struct page *newpage, struct page *page, enum migrate_mode mode)
index d6bb68ba84e54b120236b40f173965efb1c9aff9..ee6a386d250b168bdd59153a62ddceb361c0af93 100644 (file)
@@ -39,6 +39,39 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
                __virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
 }
 
+static int __vp_check_common_size_one_feature(struct virtio_device *vdev, u32 fbit,
+                                           u32 offset, const char *fname)
+{
+       struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+       if (!__virtio_test_bit(vdev, fbit))
+               return 0;
+
+       if (likely(vp_dev->mdev.common_len >= offset))
+               return 0;
+
+       dev_err(&vdev->dev,
+               "virtio: common cfg size(%zu) does not match the feature %s\n",
+               vp_dev->mdev.common_len, fname);
+
+       return -EINVAL;
+}
+
+#define vp_check_common_size_one_feature(vdev, fbit, field) \
+       __vp_check_common_size_one_feature(vdev, fbit, \
+               offsetofend(struct virtio_pci_modern_common_cfg, field), #fbit)
+
+static int vp_check_common_size(struct virtio_device *vdev)
+{
+       if (vp_check_common_size_one_feature(vdev, VIRTIO_F_NOTIF_CONFIG_DATA, queue_notify_data))
+               return -EINVAL;
+
+       if (vp_check_common_size_one_feature(vdev, VIRTIO_F_RING_RESET, queue_reset))
+               return -EINVAL;
+
+       return 0;
+}
+
 /* virtio config->finalize_features() implementation */
 static int vp_finalize_features(struct virtio_device *vdev)
 {
@@ -57,6 +90,9 @@ static int vp_finalize_features(struct virtio_device *vdev)
                return -EINVAL;
        }
 
+       if (vp_check_common_size(vdev))
+               return -EINVAL;
+
        vp_modern_set_features(&vp_dev->mdev, vdev->features);
 
        return 0;
index 9cb601e16688dccc8373fef26a604e1ea5766765..e2a1fe7bb66cc9c4da102f1559da648173c3376f 100644 (file)
@@ -203,6 +203,10 @@ static inline void check_offsets(void)
                     offsetof(struct virtio_pci_common_cfg, queue_used_lo));
        BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
                     offsetof(struct virtio_pci_common_cfg, queue_used_hi));
+       BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NDATA !=
+                    offsetof(struct virtio_pci_modern_common_cfg, queue_notify_data));
+       BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_RESET !=
+                    offsetof(struct virtio_pci_modern_common_cfg, queue_reset));
 }
 
 /*
@@ -292,7 +296,7 @@ int vp_modern_probe(struct virtio_pci_modern_device *mdev)
        mdev->common = vp_modern_map_capability(mdev, common,
                                      sizeof(struct virtio_pci_common_cfg), 4,
                                      0, sizeof(struct virtio_pci_modern_common_cfg),
-                                     NULL, NULL);
+                                     &mdev->common_len, NULL);
        if (!mdev->common)
                goto err_map_common;
        mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1,
index 06ce6d8c2e00473bea0df046f296b3ad17838f65..8d63e5923d245b2eef2c7a51e9456b96b9636ca4 100644 (file)
@@ -100,7 +100,7 @@ static void virtio_vdpa_reset(struct virtio_device *vdev)
 {
        struct vdpa_device *vdpa = vd_get_vdpa(vdev);
 
-       vdpa_reset(vdpa);
+       vdpa_reset(vdpa, 0);
 }
 
 static bool virtio_vdpa_notify(struct virtqueue *vq)
diff --git a/drivers/vlynq/Kconfig b/drivers/vlynq/Kconfig
deleted file mode 100644 (file)
index e7f9492..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-menu "TI VLYNQ"
-       depends on AR7
-
-config VLYNQ
-       bool "TI VLYNQ bus support"
-       help
-         Support for Texas Instruments(R) VLYNQ bus.
-         The VLYNQ bus is a high-speed, serial and packetized
-         data bus which allows external peripherals of a SoC
-         to appear into the system's main memory.
-
-         If unsure, say N
-
-config VLYNQ_DEBUG
-       bool "VLYNQ bus debug"
-       depends on VLYNQ && DEBUG_KERNEL
-       help
-         Turn on VLYNQ bus debugging.
-
-endmenu
diff --git a/drivers/vlynq/Makefile b/drivers/vlynq/Makefile
deleted file mode 100644 (file)
index d9ce5b2..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-#
-# Makefile for kernel vlynq drivers
-#
-
-obj-$(CONFIG_VLYNQ) += vlynq.o
diff --git a/drivers/vlynq/vlynq.c b/drivers/vlynq/vlynq.c
deleted file mode 100644 (file)
index 4af6615..0000000
+++ /dev/null
@@ -1,799 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2006, 2007 Eugene Konev <ejka@openwrt.org>
- *
- * Parts of the VLYNQ specification can be found here:
- * http://www.ti.com/litv/pdf/sprue36a
- */
-
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/irq.h>
-
-#include <linux/vlynq.h>
-
-#define VLYNQ_CTRL_PM_ENABLE           0x80000000
-#define VLYNQ_CTRL_CLOCK_INT           0x00008000
-#define VLYNQ_CTRL_CLOCK_DIV(x)                (((x) & 7) << 16)
-#define VLYNQ_CTRL_INT_LOCAL           0x00004000
-#define VLYNQ_CTRL_INT_ENABLE          0x00002000
-#define VLYNQ_CTRL_INT_VECTOR(x)       (((x) & 0x1f) << 8)
-#define VLYNQ_CTRL_INT2CFG             0x00000080
-#define VLYNQ_CTRL_RESET               0x00000001
-
-#define VLYNQ_CTRL_CLOCK_MASK          (0x7 << 16)
-
-#define VLYNQ_INT_OFFSET               0x00000014
-#define VLYNQ_REMOTE_OFFSET            0x00000080
-
-#define VLYNQ_STATUS_LINK              0x00000001
-#define VLYNQ_STATUS_LERROR            0x00000080
-#define VLYNQ_STATUS_RERROR            0x00000100
-
-#define VINT_ENABLE                    0x00000100
-#define VINT_TYPE_EDGE                 0x00000080
-#define VINT_LEVEL_LOW                 0x00000040
-#define VINT_VECTOR(x)                 ((x) & 0x1f)
-#define VINT_OFFSET(irq)               (8 * ((irq) % 4))
-
-#define VLYNQ_AUTONEGO_V2              0x00010000
-
-struct vlynq_regs {
-       u32 revision;
-       u32 control;
-       u32 status;
-       u32 int_prio;
-       u32 int_status;
-       u32 int_pending;
-       u32 int_ptr;
-       u32 tx_offset;
-       struct vlynq_mapping rx_mapping[4];
-       u32 chip;
-       u32 autonego;
-       u32 unused[6];
-       u32 int_device[8];
-};
-
-#ifdef CONFIG_VLYNQ_DEBUG
-static void vlynq_dump_regs(struct vlynq_device *dev)
-{
-       int i;
-
-       printk(KERN_DEBUG "VLYNQ local=%p remote=%p\n",
-                       dev->local, dev->remote);
-       for (i = 0; i < 32; i++) {
-               printk(KERN_DEBUG "VLYNQ: local %d: %08x\n",
-                       i + 1, ((u32 *)dev->local)[i]);
-               printk(KERN_DEBUG "VLYNQ: remote %d: %08x\n",
-                       i + 1, ((u32 *)dev->remote)[i]);
-       }
-}
-
-static void vlynq_dump_mem(u32 *base, int count)
-{
-       int i;
-
-       for (i = 0; i < (count + 3) / 4; i++) {
-               if (i % 4 == 0)
-                       printk(KERN_DEBUG "\nMEM[0x%04x]:", i * 4);
-               printk(KERN_DEBUG " 0x%08x", *(base + i));
-       }
-       printk(KERN_DEBUG "\n");
-}
-#endif
-
-/* Check the VLYNQ link status with a given device */
-static int vlynq_linked(struct vlynq_device *dev)
-{
-       int i;
-
-       for (i = 0; i < 100; i++)
-               if (readl(&dev->local->status) & VLYNQ_STATUS_LINK)
-                       return 1;
-               else
-                       cpu_relax();
-
-       return 0;
-}
-
-static void vlynq_reset(struct vlynq_device *dev)
-{
-       writel(readl(&dev->local->control) | VLYNQ_CTRL_RESET,
-                       &dev->local->control);
-
-       /* Wait for the devices to finish resetting */
-       msleep(5);
-
-       /* Remove reset bit */
-       writel(readl(&dev->local->control) & ~VLYNQ_CTRL_RESET,
-                       &dev->local->control);
-
-       /* Give some time for the devices to settle */
-       msleep(5);
-}
-
-static void vlynq_irq_unmask(struct irq_data *d)
-{
-       struct vlynq_device *dev = irq_data_get_irq_chip_data(d);
-       int virq;
-       u32 val;
-
-       BUG_ON(!dev);
-       virq = d->irq - dev->irq_start;
-       val = readl(&dev->remote->int_device[virq >> 2]);
-       val |= (VINT_ENABLE | virq) << VINT_OFFSET(virq);
-       writel(val, &dev->remote->int_device[virq >> 2]);
-}
-
-static void vlynq_irq_mask(struct irq_data *d)
-{
-       struct vlynq_device *dev = irq_data_get_irq_chip_data(d);
-       int virq;
-       u32 val;
-
-       BUG_ON(!dev);
-       virq = d->irq - dev->irq_start;
-       val = readl(&dev->remote->int_device[virq >> 2]);
-       val &= ~(VINT_ENABLE << VINT_OFFSET(virq));
-       writel(val, &dev->remote->int_device[virq >> 2]);
-}
-
-static int vlynq_irq_type(struct irq_data *d, unsigned int flow_type)
-{
-       struct vlynq_device *dev = irq_data_get_irq_chip_data(d);
-       int virq;
-       u32 val;
-
-       BUG_ON(!dev);
-       virq = d->irq - dev->irq_start;
-       val = readl(&dev->remote->int_device[virq >> 2]);
-       switch (flow_type & IRQ_TYPE_SENSE_MASK) {
-       case IRQ_TYPE_EDGE_RISING:
-       case IRQ_TYPE_EDGE_FALLING:
-       case IRQ_TYPE_EDGE_BOTH:
-               val |= VINT_TYPE_EDGE << VINT_OFFSET(virq);
-               val &= ~(VINT_LEVEL_LOW << VINT_OFFSET(virq));
-               break;
-       case IRQ_TYPE_LEVEL_HIGH:
-               val &= ~(VINT_TYPE_EDGE << VINT_OFFSET(virq));
-               val &= ~(VINT_LEVEL_LOW << VINT_OFFSET(virq));
-               break;
-       case IRQ_TYPE_LEVEL_LOW:
-               val &= ~(VINT_TYPE_EDGE << VINT_OFFSET(virq));
-               val |= VINT_LEVEL_LOW << VINT_OFFSET(virq);
-               break;
-       default:
-               return -EINVAL;
-       }
-       writel(val, &dev->remote->int_device[virq >> 2]);
-       return 0;
-}
-
-static void vlynq_local_ack(struct irq_data *d)
-{
-       struct vlynq_device *dev = irq_data_get_irq_chip_data(d);
-       u32 status = readl(&dev->local->status);
-
-       pr_debug("%s: local status: 0x%08x\n",
-                      dev_name(&dev->dev), status);
-       writel(status, &dev->local->status);
-}
-
-static void vlynq_remote_ack(struct irq_data *d)
-{
-       struct vlynq_device *dev = irq_data_get_irq_chip_data(d);
-       u32 status = readl(&dev->remote->status);
-
-       pr_debug("%s: remote status: 0x%08x\n",
-                      dev_name(&dev->dev), status);
-       writel(status, &dev->remote->status);
-}
-
-static irqreturn_t vlynq_irq(int irq, void *dev_id)
-{
-       struct vlynq_device *dev = dev_id;
-       u32 status;
-       int virq = 0;
-
-       status = readl(&dev->local->int_status);
-       writel(status, &dev->local->int_status);
-
-       if (unlikely(!status))
-               spurious_interrupt();
-
-       while (status) {
-               if (status & 1)
-                       do_IRQ(dev->irq_start + virq);
-               status >>= 1;
-               virq++;
-       }
-
-       return IRQ_HANDLED;
-}
-
-static struct irq_chip vlynq_irq_chip = {
-       .name = "vlynq",
-       .irq_unmask = vlynq_irq_unmask,
-       .irq_mask = vlynq_irq_mask,
-       .irq_set_type = vlynq_irq_type,
-};
-
-static struct irq_chip vlynq_local_chip = {
-       .name = "vlynq local error",
-       .irq_unmask = vlynq_irq_unmask,
-       .irq_mask = vlynq_irq_mask,
-       .irq_ack = vlynq_local_ack,
-};
-
-static struct irq_chip vlynq_remote_chip = {
-       .name = "vlynq local error",
-       .irq_unmask = vlynq_irq_unmask,
-       .irq_mask = vlynq_irq_mask,
-       .irq_ack = vlynq_remote_ack,
-};
-
-static int vlynq_setup_irq(struct vlynq_device *dev)
-{
-       u32 val;
-       int i, virq;
-
-       if (dev->local_irq == dev->remote_irq) {
-               printk(KERN_ERR
-                      "%s: local vlynq irq should be different from remote\n",
-                      dev_name(&dev->dev));
-               return -EINVAL;
-       }
-
-       /* Clear local and remote error bits */
-       writel(readl(&dev->local->status), &dev->local->status);
-       writel(readl(&dev->remote->status), &dev->remote->status);
-
-       /* Now setup interrupts */
-       val = VLYNQ_CTRL_INT_VECTOR(dev->local_irq);
-       val |= VLYNQ_CTRL_INT_ENABLE | VLYNQ_CTRL_INT_LOCAL |
-               VLYNQ_CTRL_INT2CFG;
-       val |= readl(&dev->local->control);
-       writel(VLYNQ_INT_OFFSET, &dev->local->int_ptr);
-       writel(val, &dev->local->control);
-
-       val = VLYNQ_CTRL_INT_VECTOR(dev->remote_irq);
-       val |= VLYNQ_CTRL_INT_ENABLE;
-       val |= readl(&dev->remote->control);
-       writel(VLYNQ_INT_OFFSET, &dev->remote->int_ptr);
-       writel(val, &dev->remote->int_ptr);
-       writel(val, &dev->remote->control);
-
-       for (i = dev->irq_start; i <= dev->irq_end; i++) {
-               virq = i - dev->irq_start;
-               if (virq == dev->local_irq) {
-                       irq_set_chip_and_handler(i, &vlynq_local_chip,
-                                                handle_level_irq);
-                       irq_set_chip_data(i, dev);
-               } else if (virq == dev->remote_irq) {
-                       irq_set_chip_and_handler(i, &vlynq_remote_chip,
-                                                handle_level_irq);
-                       irq_set_chip_data(i, dev);
-               } else {
-                       irq_set_chip_and_handler(i, &vlynq_irq_chip,
-                                                handle_simple_irq);
-                       irq_set_chip_data(i, dev);
-                       writel(0, &dev->remote->int_device[virq >> 2]);
-               }
-       }
-
-       if (request_irq(dev->irq, vlynq_irq, IRQF_SHARED, "vlynq", dev)) {
-               printk(KERN_ERR "%s: request_irq failed\n",
-                                       dev_name(&dev->dev));
-               return -EAGAIN;
-       }
-
-       return 0;
-}
-
-static void vlynq_device_release(struct device *dev)
-{
-       struct vlynq_device *vdev = to_vlynq_device(dev);
-       kfree(vdev);
-}
-
-static int vlynq_device_match(struct device *dev,
-                             struct device_driver *drv)
-{
-       struct vlynq_device *vdev = to_vlynq_device(dev);
-       struct vlynq_driver *vdrv = to_vlynq_driver(drv);
-       struct vlynq_device_id *ids = vdrv->id_table;
-
-       while (ids->id) {
-               if (ids->id == vdev->dev_id) {
-                       vdev->divisor = ids->divisor;
-                       vlynq_set_drvdata(vdev, ids);
-                       printk(KERN_INFO "Driver found for VLYNQ "
-                               "device: %08x\n", vdev->dev_id);
-                       return 1;
-               }
-               printk(KERN_DEBUG "Not using the %08x VLYNQ device's driver"
-                       " for VLYNQ device: %08x\n", ids->id, vdev->dev_id);
-               ids++;
-       }
-       return 0;
-}
-
-static int vlynq_device_probe(struct device *dev)
-{
-       struct vlynq_device *vdev = to_vlynq_device(dev);
-       struct vlynq_driver *drv = to_vlynq_driver(dev->driver);
-       struct vlynq_device_id *id = vlynq_get_drvdata(vdev);
-       int result = -ENODEV;
-
-       if (drv->probe)
-               result = drv->probe(vdev, id);
-       if (result)
-               put_device(dev);
-       return result;
-}
-
-static void vlynq_device_remove(struct device *dev)
-{
-       struct vlynq_driver *drv = to_vlynq_driver(dev->driver);
-
-       if (drv->remove)
-               drv->remove(to_vlynq_device(dev));
-}
-
-int __vlynq_register_driver(struct vlynq_driver *driver, struct module *owner)
-{
-       driver->driver.name = driver->name;
-       driver->driver.bus = &vlynq_bus_type;
-       return driver_register(&driver->driver);
-}
-EXPORT_SYMBOL(__vlynq_register_driver);
-
-void vlynq_unregister_driver(struct vlynq_driver *driver)
-{
-       driver_unregister(&driver->driver);
-}
-EXPORT_SYMBOL(vlynq_unregister_driver);
-
-/*
- * A VLYNQ remote device can clock the VLYNQ bus master
- * using a dedicated clock line. In that case, both the
- * remove device and the bus master should have the same
- * serial clock dividers configured. Iterate through the
- * 8 possible dividers until we actually link with the
- * device.
- */
-static int __vlynq_try_remote(struct vlynq_device *dev)
-{
-       int i;
-
-       vlynq_reset(dev);
-       for (i = dev->dev_id ? vlynq_rdiv2 : vlynq_rdiv8; dev->dev_id ?
-                       i <= vlynq_rdiv8 : i >= vlynq_rdiv2;
-               dev->dev_id ? i++ : i--) {
-
-               if (!vlynq_linked(dev))
-                       break;
-
-               writel((readl(&dev->remote->control) &
-                               ~VLYNQ_CTRL_CLOCK_MASK) |
-                               VLYNQ_CTRL_CLOCK_INT |
-                               VLYNQ_CTRL_CLOCK_DIV(i - vlynq_rdiv1),
-                               &dev->remote->control);
-               writel((readl(&dev->local->control)
-                               & ~(VLYNQ_CTRL_CLOCK_INT |
-                               VLYNQ_CTRL_CLOCK_MASK)) |
-                               VLYNQ_CTRL_CLOCK_DIV(i - vlynq_rdiv1),
-                               &dev->local->control);
-
-               if (vlynq_linked(dev)) {
-                       printk(KERN_DEBUG
-                               "%s: using remote clock divisor %d\n",
-                               dev_name(&dev->dev), i - vlynq_rdiv1 + 1);
-                       dev->divisor = i;
-                       return 0;
-               } else {
-                       vlynq_reset(dev);
-               }
-       }
-
-       return -ENODEV;
-}
-
-/*
- * A VLYNQ remote device can be clocked by the VLYNQ bus
- * master using a dedicated clock line. In that case, only
- * the bus master configures the serial clock divider.
- * Iterate through the 8 possible dividers until we
- * actually get a link with the device.
- */
-static int __vlynq_try_local(struct vlynq_device *dev)
-{
-       int i;
-
-       vlynq_reset(dev);
-
-       for (i = dev->dev_id ? vlynq_ldiv2 : vlynq_ldiv8; dev->dev_id ?
-                       i <= vlynq_ldiv8 : i >= vlynq_ldiv2;
-               dev->dev_id ? i++ : i--) {
-
-               writel((readl(&dev->local->control) &
-                               ~VLYNQ_CTRL_CLOCK_MASK) |
-                               VLYNQ_CTRL_CLOCK_INT |
-                               VLYNQ_CTRL_CLOCK_DIV(i - vlynq_ldiv1),
-                               &dev->local->control);
-
-               if (vlynq_linked(dev)) {
-                       printk(KERN_DEBUG
-                               "%s: using local clock divisor %d\n",
-                               dev_name(&dev->dev), i - vlynq_ldiv1 + 1);
-                       dev->divisor = i;
-                       return 0;
-               } else {
-                       vlynq_reset(dev);
-               }
-       }
-
-       return -ENODEV;
-}
-
-/*
- * When using external clocking method, serial clock
- * is supplied by an external oscillator, therefore we
- * should mask the local clock bit in the clock control
- * register for both the bus master and the remote device.
- */
-static int __vlynq_try_external(struct vlynq_device *dev)
-{
-       vlynq_reset(dev);
-       if (!vlynq_linked(dev))
-               return -ENODEV;
-
-       writel((readl(&dev->remote->control) &
-                       ~VLYNQ_CTRL_CLOCK_INT),
-                       &dev->remote->control);
-
-       writel((readl(&dev->local->control) &
-                       ~VLYNQ_CTRL_CLOCK_INT),
-                       &dev->local->control);
-
-       if (vlynq_linked(dev)) {
-               printk(KERN_DEBUG "%s: using external clock\n",
-                       dev_name(&dev->dev));
-                       dev->divisor = vlynq_div_external;
-               return 0;
-       }
-
-       return -ENODEV;
-}
-
-static int __vlynq_enable_device(struct vlynq_device *dev)
-{
-       int result;
-       struct plat_vlynq_ops *ops = dev->dev.platform_data;
-
-       result = ops->on(dev);
-       if (result)
-               return result;
-
-       switch (dev->divisor) {
-       case vlynq_div_external:
-       case vlynq_div_auto:
-               /* When the device is brought from reset it should have clock
-                * generation negotiated by hardware.
-                * Check which device is generating clocks and perform setup
-                * accordingly */
-               if (vlynq_linked(dev) && readl(&dev->remote->control) &
-                  VLYNQ_CTRL_CLOCK_INT) {
-                       if (!__vlynq_try_remote(dev) ||
-                               !__vlynq_try_local(dev)  ||
-                               !__vlynq_try_external(dev))
-                               return 0;
-               } else {
-                       if (!__vlynq_try_external(dev) ||
-                               !__vlynq_try_local(dev)    ||
-                               !__vlynq_try_remote(dev))
-                               return 0;
-               }
-               break;
-       case vlynq_ldiv1:
-       case vlynq_ldiv2:
-       case vlynq_ldiv3:
-       case vlynq_ldiv4:
-       case vlynq_ldiv5:
-       case vlynq_ldiv6:
-       case vlynq_ldiv7:
-       case vlynq_ldiv8:
-               writel(VLYNQ_CTRL_CLOCK_INT |
-                       VLYNQ_CTRL_CLOCK_DIV(dev->divisor -
-                       vlynq_ldiv1), &dev->local->control);
-               writel(0, &dev->remote->control);
-               if (vlynq_linked(dev)) {
-                       printk(KERN_DEBUG
-                               "%s: using local clock divisor %d\n",
-                               dev_name(&dev->dev),
-                               dev->divisor - vlynq_ldiv1 + 1);
-                       return 0;
-               }
-               break;
-       case vlynq_rdiv1:
-       case vlynq_rdiv2:
-       case vlynq_rdiv3:
-       case vlynq_rdiv4:
-       case vlynq_rdiv5:
-       case vlynq_rdiv6:
-       case vlynq_rdiv7:
-       case vlynq_rdiv8:
-               writel(0, &dev->local->control);
-               writel(VLYNQ_CTRL_CLOCK_INT |
-                       VLYNQ_CTRL_CLOCK_DIV(dev->divisor -
-                       vlynq_rdiv1), &dev->remote->control);
-               if (vlynq_linked(dev)) {
-                       printk(KERN_DEBUG
-                               "%s: using remote clock divisor %d\n",
-                               dev_name(&dev->dev),
-                               dev->divisor - vlynq_rdiv1 + 1);
-                       return 0;
-               }
-               break;
-       }
-
-       ops->off(dev);
-       return -ENODEV;
-}
-
-int vlynq_enable_device(struct vlynq_device *dev)
-{
-       struct plat_vlynq_ops *ops = dev->dev.platform_data;
-       int result = -ENODEV;
-
-       result = __vlynq_enable_device(dev);
-       if (result)
-               return result;
-
-       result = vlynq_setup_irq(dev);
-       if (result)
-               ops->off(dev);
-
-       dev->enabled = !result;
-       return result;
-}
-EXPORT_SYMBOL(vlynq_enable_device);
-
-
-void vlynq_disable_device(struct vlynq_device *dev)
-{
-       struct plat_vlynq_ops *ops = dev->dev.platform_data;
-
-       dev->enabled = 0;
-       free_irq(dev->irq, dev);
-       ops->off(dev);
-}
-EXPORT_SYMBOL(vlynq_disable_device);
-
-int vlynq_set_local_mapping(struct vlynq_device *dev, u32 tx_offset,
-                           struct vlynq_mapping *mapping)
-{
-       int i;
-
-       if (!dev->enabled)
-               return -ENXIO;
-
-       writel(tx_offset, &dev->local->tx_offset);
-       for (i = 0; i < 4; i++) {
-               writel(mapping[i].offset, &dev->local->rx_mapping[i].offset);
-               writel(mapping[i].size, &dev->local->rx_mapping[i].size);
-       }
-       return 0;
-}
-EXPORT_SYMBOL(vlynq_set_local_mapping);
-
-int vlynq_set_remote_mapping(struct vlynq_device *dev, u32 tx_offset,
-                            struct vlynq_mapping *mapping)
-{
-       int i;
-
-       if (!dev->enabled)
-               return -ENXIO;
-
-       writel(tx_offset, &dev->remote->tx_offset);
-       for (i = 0; i < 4; i++) {
-               writel(mapping[i].offset, &dev->remote->rx_mapping[i].offset);
-               writel(mapping[i].size, &dev->remote->rx_mapping[i].size);
-       }
-       return 0;
-}
-EXPORT_SYMBOL(vlynq_set_remote_mapping);
-
-int vlynq_set_local_irq(struct vlynq_device *dev, int virq)
-{
-       int irq = dev->irq_start + virq;
-       if (dev->enabled)
-               return -EBUSY;
-
-       if ((irq < dev->irq_start) || (irq > dev->irq_end))
-               return -EINVAL;
-
-       if (virq == dev->remote_irq)
-               return -EINVAL;
-
-       dev->local_irq = virq;
-
-       return 0;
-}
-EXPORT_SYMBOL(vlynq_set_local_irq);
-
-int vlynq_set_remote_irq(struct vlynq_device *dev, int virq)
-{
-       int irq = dev->irq_start + virq;
-       if (dev->enabled)
-               return -EBUSY;
-
-       if ((irq < dev->irq_start) || (irq > dev->irq_end))
-               return -EINVAL;
-
-       if (virq == dev->local_irq)
-               return -EINVAL;
-
-       dev->remote_irq = virq;
-
-       return 0;
-}
-EXPORT_SYMBOL(vlynq_set_remote_irq);
-
-static int vlynq_probe(struct platform_device *pdev)
-{
-       struct vlynq_device *dev;
-       struct resource *regs_res, *mem_res, *irq_res;
-       int len, result;
-
-       regs_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
-       if (!regs_res)
-               return -ENODEV;
-
-       mem_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem");
-       if (!mem_res)
-               return -ENODEV;
-
-       irq_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "devirq");
-       if (!irq_res)
-               return -ENODEV;
-
-       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
-       if (!dev) {
-               printk(KERN_ERR
-                      "vlynq: failed to allocate device structure\n");
-               return -ENOMEM;
-       }
-
-       dev->id = pdev->id;
-       dev->dev.bus = &vlynq_bus_type;
-       dev->dev.parent = &pdev->dev;
-       dev_set_name(&dev->dev, "vlynq%d", dev->id);
-       dev->dev.platform_data = pdev->dev.platform_data;
-       dev->dev.release = vlynq_device_release;
-
-       dev->regs_start = regs_res->start;
-       dev->regs_end = regs_res->end;
-       dev->mem_start = mem_res->start;
-       dev->mem_end = mem_res->end;
-
-       len = resource_size(regs_res);
-       if (!request_mem_region(regs_res->start, len, dev_name(&dev->dev))) {
-               printk(KERN_ERR "%s: Can't request vlynq registers\n",
-                      dev_name(&dev->dev));
-               result = -ENXIO;
-               goto fail_request;
-       }
-
-       dev->local = ioremap(regs_res->start, len);
-       if (!dev->local) {
-               printk(KERN_ERR "%s: Can't remap vlynq registers\n",
-                      dev_name(&dev->dev));
-               result = -ENXIO;
-               goto fail_remap;
-       }
-
-       dev->remote = (struct vlynq_regs *)((void *)dev->local +
-                                           VLYNQ_REMOTE_OFFSET);
-
-       dev->irq = platform_get_irq_byname(pdev, "irq");
-       dev->irq_start = irq_res->start;
-       dev->irq_end = irq_res->end;
-       dev->local_irq = dev->irq_end - dev->irq_start;
-       dev->remote_irq = dev->local_irq - 1;
-
-       if (device_register(&dev->dev))
-               goto fail_register;
-       platform_set_drvdata(pdev, dev);
-
-       printk(KERN_INFO "%s: regs 0x%p, irq %d, mem 0x%p\n",
-              dev_name(&dev->dev), (void *)dev->regs_start, dev->irq,
-              (void *)dev->mem_start);
-
-       dev->dev_id = 0;
-       dev->divisor = vlynq_div_auto;
-       result = __vlynq_enable_device(dev);
-       if (result == 0) {
-               dev->dev_id = readl(&dev->remote->chip);
-               ((struct plat_vlynq_ops *)(dev->dev.platform_data))->off(dev);
-       }
-       if (dev->dev_id)
-               printk(KERN_INFO "Found a VLYNQ device: %08x\n", dev->dev_id);
-
-       return 0;
-
-fail_register:
-       iounmap(dev->local);
-fail_remap:
-fail_request:
-       release_mem_region(regs_res->start, len);
-       kfree(dev);
-       return result;
-}
-
-static int vlynq_remove(struct platform_device *pdev)
-{
-       struct vlynq_device *dev = platform_get_drvdata(pdev);
-
-       device_unregister(&dev->dev);
-       iounmap(dev->local);
-       release_mem_region(dev->regs_start,
-                          dev->regs_end - dev->regs_start + 1);
-
-       kfree(dev);
-
-       return 0;
-}
-
-static struct platform_driver vlynq_platform_driver = {
-       .driver.name = "vlynq",
-       .probe = vlynq_probe,
-       .remove = vlynq_remove,
-};
-
-struct bus_type vlynq_bus_type = {
-       .name = "vlynq",
-       .match = vlynq_device_match,
-       .probe = vlynq_device_probe,
-       .remove = vlynq_device_remove,
-};
-EXPORT_SYMBOL(vlynq_bus_type);
-
-static int vlynq_init(void)
-{
-       int res = 0;
-
-       res = bus_register(&vlynq_bus_type);
-       if (res)
-               goto fail_bus;
-
-       res = platform_driver_register(&vlynq_platform_driver);
-       if (res)
-               goto fail_platform;
-
-       return 0;
-
-fail_platform:
-       bus_unregister(&vlynq_bus_type);
-fail_bus:
-       return res;
-}
-
-static void vlynq_exit(void)
-{
-       platform_driver_unregister(&vlynq_platform_driver);
-       bus_unregister(&vlynq_bus_type);
-}
-
-module_init(vlynq_init);
-module_exit(vlynq_exit);
index 8cb6fa45d599a2fc6d8a5607b68c59fedc3dad98..7d22051b15a29946f47ffb0d3c0beb6ecf20e766 100644 (file)
@@ -1768,12 +1768,6 @@ config SIBYTE_WDOG
          To compile this driver as a loadable module, choose M here.
          The module will be called sb_wdog.
 
-config AR7_WDT
-       tristate "TI AR7 Watchdog Timer"
-       depends on AR7 || (MIPS && 32BIT && COMPILE_TEST)
-       help
-         Hardware driver for the TI AR7 Watchdog Timer.
-
 config TXX9_WDT
        tristate "Toshiba TXx9 Watchdog Timer"
        depends on CPU_TX49XX || (MIPS && COMPILE_TEST)
index 7eab9de311cb93a3579615ed91dbe2eecaa816dc..7cbc34514ec115cdc10cc5fd5d22c2404c8d49fe 100644 (file)
@@ -168,7 +168,6 @@ obj-$(CONFIG_INDYDOG) += indydog.o
 obj-$(CONFIG_JZ4740_WDT) += jz4740_wdt.o
 obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o
 obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o
-obj-$(CONFIG_AR7_WDT) += ar7_wdt.o
 obj-$(CONFIG_TXX9_WDT) += txx9wdt.o
 obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o
 octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o
index eddeb0fede896d88e8c1ea2283cd305f076d5df8..d4f739932f0be8394c827e9beed3c1bbc4aace0d 100644 (file)
@@ -173,6 +173,8 @@ static int apple_wdt_probe(struct platform_device *pdev)
        if (!wdt->clk_rate)
                return -EINVAL;
 
+       platform_set_drvdata(pdev, wdt);
+
        wdt->wdd.ops = &apple_wdt_ops;
        wdt->wdd.info = &apple_wdt_info;
        wdt->wdd.max_timeout = U32_MAX / wdt->clk_rate;
@@ -190,6 +192,28 @@ static int apple_wdt_probe(struct platform_device *pdev)
        return devm_watchdog_register_device(dev, &wdt->wdd);
 }
 
+static int apple_wdt_resume(struct device *dev)
+{
+       struct apple_wdt *wdt = dev_get_drvdata(dev);
+
+       if (watchdog_active(&wdt->wdd) || watchdog_hw_running(&wdt->wdd))
+               apple_wdt_start(&wdt->wdd);
+
+       return 0;
+}
+
+static int apple_wdt_suspend(struct device *dev)
+{
+       struct apple_wdt *wdt = dev_get_drvdata(dev);
+
+       if (watchdog_active(&wdt->wdd) || watchdog_hw_running(&wdt->wdd))
+               apple_wdt_stop(&wdt->wdd);
+
+       return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(apple_wdt_pm_ops, apple_wdt_suspend, apple_wdt_resume);
+
 static const struct of_device_id apple_wdt_of_match[] = {
        { .compatible = "apple,wdt" },
        {},
@@ -200,6 +224,7 @@ static struct platform_driver apple_wdt_driver = {
        .driver = {
                .name = "apple-watchdog",
                .of_match_table = apple_wdt_of_match,
+               .pm = pm_sleep_ptr(&apple_wdt_pm_ops),
        },
        .probe = apple_wdt_probe,
 };
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
deleted file mode 100644 (file)
index cdcaeb0..0000000
+++ /dev/null
@@ -1,315 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * drivers/watchdog/ar7_wdt.c
- *
- * Copyright (C) 2007 Nicolas Thill <nico@openwrt.org>
- * Copyright (c) 2005 Enrik Berkhan <Enrik.Berkhan@akk.org>
- *
- * Some code taken from:
- * National Semiconductor SCx200 Watchdog support
- * Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/errno.h>
-#include <linux/miscdevice.h>
-#include <linux/platform_device.h>
-#include <linux/watchdog.h>
-#include <linux/fs.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-#include <linux/clk.h>
-
-#include <asm/addrspace.h>
-#include <asm/mach-ar7/ar7.h>
-
-#define LONGNAME "TI AR7 Watchdog Timer"
-
-MODULE_AUTHOR("Nicolas Thill <nico@openwrt.org>");
-MODULE_DESCRIPTION(LONGNAME);
-MODULE_LICENSE("GPL");
-
-static int margin = 60;
-module_param(margin, int, 0);
-MODULE_PARM_DESC(margin, "Watchdog margin in seconds");
-
-static bool nowayout = WATCHDOG_NOWAYOUT;
-module_param(nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close");
-
-#define READ_REG(x) readl((void __iomem *)&(x))
-#define WRITE_REG(x, v) writel((v), (void __iomem *)&(x))
-
-struct ar7_wdt {
-       u32 kick_lock;
-       u32 kick;
-       u32 change_lock;
-       u32 change;
-       u32 disable_lock;
-       u32 disable;
-       u32 prescale_lock;
-       u32 prescale;
-};
-
-static unsigned long wdt_is_open;
-static unsigned expect_close;
-static DEFINE_SPINLOCK(wdt_lock);
-
-/* XXX currently fixed, allows max margin ~68.72 secs */
-#define prescale_value 0xffff
-
-/* Pointer to the remapped WDT IO space */
-static struct ar7_wdt *ar7_wdt;
-
-static struct clk *vbus_clk;
-
-static void ar7_wdt_kick(u32 value)
-{
-       WRITE_REG(ar7_wdt->kick_lock, 0x5555);
-       if ((READ_REG(ar7_wdt->kick_lock) & 3) == 1) {
-               WRITE_REG(ar7_wdt->kick_lock, 0xaaaa);
-               if ((READ_REG(ar7_wdt->kick_lock) & 3) == 3) {
-                       WRITE_REG(ar7_wdt->kick, value);
-                       return;
-               }
-       }
-       pr_err("failed to unlock WDT kick reg\n");
-}
-
-static void ar7_wdt_prescale(u32 value)
-{
-       WRITE_REG(ar7_wdt->prescale_lock, 0x5a5a);
-       if ((READ_REG(ar7_wdt->prescale_lock) & 3) == 1) {
-               WRITE_REG(ar7_wdt->prescale_lock, 0xa5a5);
-               if ((READ_REG(ar7_wdt->prescale_lock) & 3) == 3) {
-                       WRITE_REG(ar7_wdt->prescale, value);
-                       return;
-               }
-       }
-       pr_err("failed to unlock WDT prescale reg\n");
-}
-
-static void ar7_wdt_change(u32 value)
-{
-       WRITE_REG(ar7_wdt->change_lock, 0x6666);
-       if ((READ_REG(ar7_wdt->change_lock) & 3) == 1) {
-               WRITE_REG(ar7_wdt->change_lock, 0xbbbb);
-               if ((READ_REG(ar7_wdt->change_lock) & 3) == 3) {
-                       WRITE_REG(ar7_wdt->change, value);
-                       return;
-               }
-       }
-       pr_err("failed to unlock WDT change reg\n");
-}
-
-static void ar7_wdt_disable(u32 value)
-{
-       WRITE_REG(ar7_wdt->disable_lock, 0x7777);
-       if ((READ_REG(ar7_wdt->disable_lock) & 3) == 1) {
-               WRITE_REG(ar7_wdt->disable_lock, 0xcccc);
-               if ((READ_REG(ar7_wdt->disable_lock) & 3) == 2) {
-                       WRITE_REG(ar7_wdt->disable_lock, 0xdddd);
-                       if ((READ_REG(ar7_wdt->disable_lock) & 3) == 3) {
-                               WRITE_REG(ar7_wdt->disable, value);
-                               return;
-                       }
-               }
-       }
-       pr_err("failed to unlock WDT disable reg\n");
-}
-
-static void ar7_wdt_update_margin(int new_margin)
-{
-       u32 change;
-       u32 vbus_rate;
-
-       vbus_rate = clk_get_rate(vbus_clk);
-       change = new_margin * (vbus_rate / prescale_value);
-       if (change < 1)
-               change = 1;
-       if (change > 0xffff)
-               change = 0xffff;
-       ar7_wdt_change(change);
-       margin = change * prescale_value / vbus_rate;
-       pr_info("timer margin %d seconds (prescale %d, change %d, freq %d)\n",
-               margin, prescale_value, change, vbus_rate);
-}
-
-static void ar7_wdt_enable_wdt(void)
-{
-       pr_debug("enabling watchdog timer\n");
-       ar7_wdt_disable(1);
-       ar7_wdt_kick(1);
-}
-
-static void ar7_wdt_disable_wdt(void)
-{
-       pr_debug("disabling watchdog timer\n");
-       ar7_wdt_disable(0);
-}
-
-static int ar7_wdt_open(struct inode *inode, struct file *file)
-{
-       /* only allow one at a time */
-       if (test_and_set_bit(0, &wdt_is_open))
-               return -EBUSY;
-       ar7_wdt_enable_wdt();
-       expect_close = 0;
-
-       return stream_open(inode, file);
-}
-
-static int ar7_wdt_release(struct inode *inode, struct file *file)
-{
-       if (!expect_close)
-               pr_warn("watchdog device closed unexpectedly, will not disable the watchdog timer\n");
-       else if (!nowayout)
-               ar7_wdt_disable_wdt();
-       clear_bit(0, &wdt_is_open);
-       return 0;
-}
-
-static ssize_t ar7_wdt_write(struct file *file, const char *data,
-                            size_t len, loff_t *ppos)
-{
-       /* check for a magic close character */
-       if (len) {
-               size_t i;
-
-               spin_lock(&wdt_lock);
-               ar7_wdt_kick(1);
-               spin_unlock(&wdt_lock);
-
-               expect_close = 0;
-               for (i = 0; i < len; ++i) {
-                       char c;
-                       if (get_user(c, data + i))
-                               return -EFAULT;
-                       if (c == 'V')
-                               expect_close = 1;
-               }
-
-       }
-       return len;
-}
-
-static long ar7_wdt_ioctl(struct file *file,
-                                       unsigned int cmd, unsigned long arg)
-{
-       static const struct watchdog_info ident = {
-               .identity = LONGNAME,
-               .firmware_version = 1,
-               .options = (WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
-                                               WDIOF_MAGICCLOSE),
-       };
-       int new_margin;
-
-       switch (cmd) {
-       case WDIOC_GETSUPPORT:
-               if (copy_to_user((struct watchdog_info *)arg, &ident,
-                               sizeof(ident)))
-                       return -EFAULT;
-               return 0;
-       case WDIOC_GETSTATUS:
-       case WDIOC_GETBOOTSTATUS:
-               if (put_user(0, (int *)arg))
-                       return -EFAULT;
-               return 0;
-       case WDIOC_KEEPALIVE:
-               ar7_wdt_kick(1);
-               return 0;
-       case WDIOC_SETTIMEOUT:
-               if (get_user(new_margin, (int *)arg))
-                       return -EFAULT;
-               if (new_margin < 1)
-                       return -EINVAL;
-
-               spin_lock(&wdt_lock);
-               ar7_wdt_update_margin(new_margin);
-               ar7_wdt_kick(1);
-               spin_unlock(&wdt_lock);
-               fallthrough;
-       case WDIOC_GETTIMEOUT:
-               if (put_user(margin, (int *)arg))
-                       return -EFAULT;
-               return 0;
-       default:
-               return -ENOTTY;
-       }
-}
-
-static const struct file_operations ar7_wdt_fops = {
-       .owner          = THIS_MODULE,
-       .write          = ar7_wdt_write,
-       .unlocked_ioctl = ar7_wdt_ioctl,
-       .compat_ioctl   = compat_ptr_ioctl,
-       .open           = ar7_wdt_open,
-       .release        = ar7_wdt_release,
-       .llseek         = no_llseek,
-};
-
-static struct miscdevice ar7_wdt_miscdev = {
-       .minor          = WATCHDOG_MINOR,
-       .name           = "watchdog",
-       .fops           = &ar7_wdt_fops,
-};
-
-static int ar7_wdt_probe(struct platform_device *pdev)
-{
-       int rc;
-
-       ar7_wdt = devm_platform_ioremap_resource_byname(pdev, "regs");
-       if (IS_ERR(ar7_wdt))
-               return PTR_ERR(ar7_wdt);
-
-       vbus_clk = clk_get(NULL, "vbus");
-       if (IS_ERR(vbus_clk)) {
-               pr_err("could not get vbus clock\n");
-               return PTR_ERR(vbus_clk);
-       }
-
-       ar7_wdt_disable_wdt();
-       ar7_wdt_prescale(prescale_value);
-       ar7_wdt_update_margin(margin);
-
-       rc = misc_register(&ar7_wdt_miscdev);
-       if (rc) {
-               pr_err("unable to register misc device\n");
-               goto out;
-       }
-       return 0;
-
-out:
-       clk_put(vbus_clk);
-       vbus_clk = NULL;
-       return rc;
-}
-
-static void ar7_wdt_remove(struct platform_device *pdev)
-{
-       misc_deregister(&ar7_wdt_miscdev);
-       clk_put(vbus_clk);
-       vbus_clk = NULL;
-}
-
-static void ar7_wdt_shutdown(struct platform_device *pdev)
-{
-       if (!nowayout)
-               ar7_wdt_disable_wdt();
-}
-
-static struct platform_driver ar7_wdt_driver = {
-       .probe = ar7_wdt_probe,
-       .remove_new = ar7_wdt_remove,
-       .shutdown = ar7_wdt_shutdown,
-       .driver = {
-               .name = "ar7_wdt",
-       },
-};
-
-module_platform_driver(ar7_wdt_driver);
index b72a858bbac70235a10a78ca10592d4932c627b2..b4773a6aaf8cc7218d9846fa354c68b7d18ed0b4 100644 (file)
@@ -79,6 +79,8 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table);
 #define   WDT_TIMEOUT_STATUS_BOOT_SECONDARY    BIT(1)
 #define WDT_CLEAR_TIMEOUT_STATUS       0x14
 #define   WDT_CLEAR_TIMEOUT_AND_BOOT_CODE_SELECTION    BIT(0)
+#define WDT_RESET_MASK1                0x1c
+#define WDT_RESET_MASK2                0x20
 
 /*
  * WDT_RESET_WIDTH controls the characteristics of the external pulse (if
@@ -402,6 +404,8 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
 
        if ((of_device_is_compatible(np, "aspeed,ast2500-wdt")) ||
                (of_device_is_compatible(np, "aspeed,ast2600-wdt"))) {
+               u32 reset_mask[2];
+               size_t nrstmask = of_device_is_compatible(np, "aspeed,ast2600-wdt") ? 2 : 1;
                u32 reg = readl(wdt->base + WDT_RESET_WIDTH);
 
                reg &= wdt->cfg->ext_pulse_width_mask;
@@ -419,6 +423,13 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
                        reg |= WDT_OPEN_DRAIN_MAGIC;
 
                writel(reg, wdt->base + WDT_RESET_WIDTH);
+
+               ret = of_property_read_u32_array(np, "aspeed,reset-mask", reset_mask, nrstmask);
+               if (!ret) {
+                       writel(reset_mask[0], wdt->base + WDT_RESET_MASK1);
+                       if (nrstmask > 1)
+                               writel(reset_mask[1], wdt->base + WDT_RESET_MASK2);
+               }
        }
 
        if (!of_property_read_u32(np, "aspeed,ext-pulse-duration", &duration)) {
index fed7be24644209919bcbfd0332e294dbd62c44db..b111b28acb94829b14509cf12fcd1054463ccf90 100644 (file)
@@ -348,25 +348,21 @@ static int __init at91wdt_probe(struct platform_device *pdev)
        if (IS_ERR(wdt->base))
                return PTR_ERR(wdt->base);
 
-       wdt->sclk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(wdt->sclk))
-               return PTR_ERR(wdt->sclk);
-
-       err = clk_prepare_enable(wdt->sclk);
-       if (err) {
+       wdt->sclk = devm_clk_get_enabled(&pdev->dev, NULL);
+       if (IS_ERR(wdt->sclk)) {
                dev_err(&pdev->dev, "Could not enable slow clock\n");
-               return err;
+               return PTR_ERR(wdt->sclk);
        }
 
        if (pdev->dev.of_node) {
                err = of_at91wdt_init(pdev->dev.of_node, wdt);
                if (err)
-                       goto err_clk;
+                       return err;
        }
 
        err = at91_wdt_init(pdev, wdt);
        if (err)
-               goto err_clk;
+               return err;
 
        platform_set_drvdata(pdev, wdt);
 
@@ -374,11 +370,6 @@ static int __init at91wdt_probe(struct platform_device *pdev)
                wdt->wdd.timeout, wdt->nowayout);
 
        return 0;
-
-err_clk:
-       clk_disable_unprepare(wdt->sclk);
-
-       return err;
 }
 
 static int __exit at91wdt_remove(struct platform_device *pdev)
@@ -388,7 +379,6 @@ static int __exit at91wdt_remove(struct platform_device *pdev)
 
        pr_warn("I quit now, hardware will probably reboot!\n");
        del_timer(&wdt->timer);
-       clk_disable_unprepare(wdt->sclk);
 
        return 0;
 }
index b7b705060438865c3044c73bf824119ae10bbf6d..e5cc30622b12f7894299abc7a67eab22eee00dd8 100644 (file)
@@ -257,19 +257,13 @@ static int ath79_wdt_probe(struct platform_device *pdev)
        if (IS_ERR(wdt_base))
                return PTR_ERR(wdt_base);
 
-       wdt_clk = devm_clk_get(&pdev->dev, "wdt");
+       wdt_clk = devm_clk_get_enabled(&pdev->dev, "wdt");
        if (IS_ERR(wdt_clk))
                return PTR_ERR(wdt_clk);
 
-       err = clk_prepare_enable(wdt_clk);
-       if (err)
-               return err;
-
        wdt_freq = clk_get_rate(wdt_clk);
-       if (!wdt_freq) {
-               err = -EINVAL;
-               goto err_clk_disable;
-       }
+       if (!wdt_freq)
+               return -EINVAL;
 
        max_timeout = (0xfffffffful / wdt_freq);
        if (timeout < 1 || timeout > max_timeout) {
@@ -286,20 +280,15 @@ static int ath79_wdt_probe(struct platform_device *pdev)
        if (err) {
                dev_err(&pdev->dev,
                        "unable to register misc device, err=%d\n", err);
-               goto err_clk_disable;
+               return err;
        }
 
        return 0;
-
-err_clk_disable:
-       clk_disable_unprepare(wdt_clk);
-       return err;
 }
 
 static void ath79_wdt_remove(struct platform_device *pdev)
 {
        misc_deregister(&ath79_wdt_miscdev);
-       clk_disable_unprepare(wdt_clk);
 }
 
 static void ath79_wdt_shutdown(struct platform_device *pdev)
index 0923201ce8743af366bff7462c37bba9dff645da..a7b814ea740bb237020867673cbb5b904615f3cd 100644 (file)
@@ -5,12 +5,13 @@
  * Author: 2013, Alexander Shiyan <shc_work@mail.ru>
  */
 
-#include <linux/err.h>
 #include <linux/delay.h>
-#include <linux/module.h>
+#include <linux/err.h>
 #include <linux/gpio/consumer.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/property.h>
 #include <linux/watchdog.h>
 
 static bool nowayout = WATCHDOG_NOWAYOUT;
@@ -106,7 +107,6 @@ static const struct watchdog_ops gpio_wdt_ops = {
 static int gpio_wdt_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct device_node *np = dev->of_node;
        struct gpio_wdt_priv *priv;
        enum gpiod_flags gflags;
        unsigned int hw_margin;
@@ -119,7 +119,7 @@ static int gpio_wdt_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, priv);
 
-       ret = of_property_read_string(np, "hw_algo", &algo);
+       ret = device_property_read_string(dev, "hw_algo", &algo);
        if (ret)
                return ret;
        if (!strcmp(algo, "toggle")) {
@@ -136,16 +136,14 @@ static int gpio_wdt_probe(struct platform_device *pdev)
        if (IS_ERR(priv->gpiod))
                return PTR_ERR(priv->gpiod);
 
-       ret = of_property_read_u32(np,
-                                  "hw_margin_ms", &hw_margin);
+       ret = device_property_read_u32(dev, "hw_margin_ms", &hw_margin);
        if (ret)
                return ret;
        /* Disallow values lower than 2 and higher than 65535 ms */
        if (hw_margin < 2 || hw_margin > 65535)
                return -EINVAL;
 
-       priv->always_running = of_property_read_bool(np,
-                                                    "always-running");
+       priv->always_running = device_property_read_bool(dev, "always-running");
 
        watchdog_set_drvdata(&priv->wdd, priv);
 
index c703586c6e5f086bd3f577ad77b80bbb052f30b9..b21d7a74a42df79f26399d1cf24484ed140abb46 100644 (file)
@@ -23,6 +23,7 @@
 #define LPO_CLK_SHIFT          8
 #define WDOG_CS_CLK            (LPO_CLK << LPO_CLK_SHIFT)
 #define WDOG_CS_EN             BIT(7)
+#define WDOG_CS_INT_EN         BIT(6)
 #define WDOG_CS_UPDATE         BIT(5)
 #define WDOG_CS_WAIT           BIT(1)
 #define WDOG_CS_STOP           BIT(0)
@@ -62,6 +63,7 @@ struct imx7ulp_wdt_device {
        void __iomem *base;
        struct clk *clk;
        bool post_rcs_wait;
+       bool ext_reset;
        const struct imx_wdt_hw_feature *hw;
 };
 
@@ -285,6 +287,9 @@ static int imx7ulp_wdt_init(struct imx7ulp_wdt_device *wdt, unsigned int timeout
        if (wdt->hw->prescaler_enable)
                val |= WDOG_CS_PRES;
 
+       if (wdt->ext_reset)
+               val |= WDOG_CS_INT_EN;
+
        do {
                ret = _imx7ulp_wdt_init(wdt, timeout, val);
                toval = readl(wdt->base + WDOG_TOVAL);
@@ -321,6 +326,9 @@ static int imx7ulp_wdt_probe(struct platform_device *pdev)
                return PTR_ERR(imx7ulp_wdt->clk);
        }
 
+       /* The WDOG may need to do external reset through dedicated pin */
+       imx7ulp_wdt->ext_reset = of_property_read_bool(dev->of_node, "fsl,ext-reset-output");
+
        imx7ulp_wdt->post_rcs_wait = true;
        if (of_device_is_compatible(dev->of_node,
                                    "fsl,imx8ulp-wdt")) {
index 8ac021748d160d6d36e7f248e774eb21fd3adba6..e51fe1b78518f42a25b70fb3755fe93c5f5c9e22 100644 (file)
@@ -34,6 +34,7 @@
 
 #define SC_IRQ_WDOG                    1
 #define SC_IRQ_GROUP_WDOG              1
+#define SC_TIMER_ERR_BUSY              10
 
 static bool nowayout = WATCHDOG_NOWAYOUT;
 module_param(nowayout, bool, 0000);
@@ -61,7 +62,9 @@ static int imx_sc_wdt_start(struct watchdog_device *wdog)
 
        arm_smccc_smc(IMX_SIP_TIMER, IMX_SIP_TIMER_START_WDOG,
                      0, 0, 0, 0, 0, 0, &res);
-       if (res.a0)
+
+       /* Ignore if already enabled(SC_TIMER_ERR_BUSY) */
+       if (res.a0 && res.a0 != SC_TIMER_ERR_BUSY)
                return -EACCES;
 
        arm_smccc_smc(IMX_SIP_TIMER, IMX_SIP_TIMER_SET_WDOG_ACT,
index bb11229093966db7f16a28531fecb83e1a32367a..e888b1bdd1f2f41dd7bbe464326d224c0439c36e 100644 (file)
@@ -13,9 +13,9 @@
  *                 http://www.ite.com.tw/
  *
  *     Support of the watchdog timers, which are available on
- *     IT8607, IT8620, IT8622, IT8625, IT8628, IT8655, IT8665, IT8686,
- *     IT8702, IT8712, IT8716, IT8718, IT8720, IT8721, IT8726, IT8728,
- *     IT8772, IT8783 and IT8784.
+ *     IT8607, IT8613, IT8620, IT8622, IT8625, IT8628, IT8655, IT8665,
+ *     IT8686, IT8702, IT8712, IT8716, IT8718, IT8720, IT8721, IT8726,
+ *     IT8728, IT8772, IT8783 and IT8784.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -50,6 +50,7 @@
 /* Chip Id numbers */
 #define NO_DEV_ID      0xffff
 #define IT8607_ID      0x8607
+#define IT8613_ID      0x8613
 #define IT8620_ID      0x8620
 #define IT8622_ID      0x8622
 #define IT8625_ID      0x8625
@@ -277,6 +278,7 @@ static int __init it87_wdt_init(void)
                max_units = 65535;
                break;
        case IT8607_ID:
+       case IT8613_ID:
        case IT8620_ID:
        case IT8622_ID:
        case IT8625_ID:
index 607ce4b8df574ff6dba6709dc01c739007111ee1..ec0c08652ec2f9102144be0ad6b4831d0710b580 100644 (file)
@@ -105,6 +105,25 @@ static const struct watchdog_ops ixp4xx_wdt_ops = {
        .owner = THIS_MODULE,
 };
 
+/*
+ * The A0 version of the IXP422 had a bug in the watchdog making
+ * is useless, but we still need to use it to restart the system
+ * as it is the only way, so in this special case we register a
+ * "dummy" watchdog that doesn't really work, but will support
+ * the restart operation.
+ */
+static int ixp4xx_wdt_dummy(struct watchdog_device *wdd)
+{
+       return 0;
+}
+
+static const struct watchdog_ops ixp4xx_wdt_restart_only_ops = {
+       .start = ixp4xx_wdt_dummy,
+       .stop = ixp4xx_wdt_dummy,
+       .restart = ixp4xx_wdt_restart,
+       .owner = THIS_MODULE,
+};
+
 static const struct watchdog_info ixp4xx_wdt_info = {
        .options = WDIOF_KEEPALIVEPING
                | WDIOF_MAGICCLOSE
@@ -114,14 +133,17 @@ static const struct watchdog_info ixp4xx_wdt_info = {
 
 static int ixp4xx_wdt_probe(struct platform_device *pdev)
 {
+       static const struct watchdog_ops *iwdt_ops;
        struct device *dev = &pdev->dev;
        struct ixp4xx_wdt *iwdt;
        struct clk *clk;
        int ret;
 
        if (!(read_cpuid_id() & 0xf) && !cpu_is_ixp46x()) {
-               dev_err(dev, "Rev. A0 IXP42x CPU detected - watchdog disabled\n");
-               return -ENODEV;
+               dev_info(dev, "Rev. A0 IXP42x CPU detected - only restart supported\n");
+               iwdt_ops = &ixp4xx_wdt_restart_only_ops;
+       } else {
+               iwdt_ops = &ixp4xx_wdt_ops;
        }
 
        iwdt = devm_kzalloc(dev, sizeof(*iwdt), GFP_KERNEL);
@@ -141,7 +163,7 @@ static int ixp4xx_wdt_probe(struct platform_device *pdev)
                iwdt->rate = IXP4XX_TIMER_FREQ;
 
        iwdt->wdd.info = &ixp4xx_wdt_info;
-       iwdt->wdd.ops = &ixp4xx_wdt_ops;
+       iwdt->wdd.ops = iwdt_ops;
        iwdt->wdd.min_timeout = 1;
        iwdt->wdd.max_timeout = U32_MAX / iwdt->rate;
        iwdt->wdd.parent = dev;
index d7eb8286e11eca75006f5468a1f9b45c1214db67..098bb141a521e11347ab6d49a19bcf6a615def6d 100644 (file)
@@ -8,8 +8,8 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/module.h>
-#include <linux/of_platform.h>
 #include <linux/platform_device.h>
+#include <linux/of.h>
 #include <linux/watchdog.h>
 
 /*
@@ -190,6 +190,13 @@ static int gti_wdt_set_pretimeout(struct watchdog_device *wdev,
        struct gti_wdt_priv *priv = watchdog_get_drvdata(wdev);
        struct watchdog_device *wdog_dev = &priv->wdev;
 
+       if (!timeout) {
+               /* Disable Interrupt */
+               writeq(GTI_CWD_INT_ENA_CLR_VAL(priv->wdt_timer_idx),
+                      priv->base + GTI_CWD_INT_ENA_CLR);
+               return 0;
+       }
+
        /* pretimeout should 1/3 of max_timeout */
        if (timeout * 3 <= wdog_dev->max_timeout)
                return gti_wdt_settimeout(wdev, timeout * 3);
@@ -271,7 +278,7 @@ static int gti_wdt_probe(struct platform_device *pdev)
                                   &wdt_idx);
        if (!err) {
                if (wdt_idx >= priv->data->gti_num_timers)
-                       return dev_err_probe(&pdev->dev, err,
+                       return dev_err_probe(&pdev->dev, -EINVAL,
                                "GTI wdog timer index not valid");
 
                priv->wdt_timer_idx = wdt_idx;
@@ -292,6 +299,7 @@ static int gti_wdt_probe(struct platform_device *pdev)
 
        /* Maximum timeout is 3 times the pretimeout */
        wdog_dev->max_timeout = max_pretimeout * 3;
+       wdog_dev->max_hw_heartbeat_ms = max_pretimeout * 1000;
        /* Minimum first timeout (pretimeout) is 1, so min_timeout as 3 */
        wdog_dev->min_timeout = 3;
        wdog_dev->timeout = wdog_dev->pretimeout;
@@ -308,7 +316,7 @@ static int gti_wdt_probe(struct platform_device *pdev)
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0)
-               return dev_err_probe(&pdev->dev, irq, "IRQ resource not found\n");
+               return irq;
 
        err = devm_request_irq(dev, irq, gti_wdt_interrupt, 0,
                               pdev->name, &priv->wdev);
index 9c5b6616fc87e9340fb82863a415ce50e2376675..667e2c5b3431c7f758215a16338e59e4ba7327e5 100644 (file)
@@ -39,6 +39,7 @@
  * @tleft_idx: index for direct access to time left register;
  * @ping_idx:  index for direct access to ping register;
  * @reset_idx: index for direct access to reset cause register;
+ * @regmap_val_sz: size of value in register map;
  * @wd_type:   watchdog HW type;
  */
 struct mlxreg_wdt {
index 05657dc1d36a0181349d901a607d48e8a81c3998..352853e6fe712012a9245d0222ebbabd6a7c6773 100644 (file)
@@ -187,7 +187,7 @@ static int xwdt_probe(struct platform_device *pdev)
 
        watchdog_set_nowayout(xilinx_wdt_wdd, enable_once);
 
-       xdev->clk = devm_clk_get_enabled(dev, NULL);
+       xdev->clk = devm_clk_get_prepared(dev, NULL);
        if (IS_ERR(xdev->clk)) {
                if (PTR_ERR(xdev->clk) != -ENOENT)
                        return PTR_ERR(xdev->clk);
@@ -218,18 +218,25 @@ static int xwdt_probe(struct platform_device *pdev)
        spin_lock_init(&xdev->spinlock);
        watchdog_set_drvdata(xilinx_wdt_wdd, xdev);
 
+       rc = clk_enable(xdev->clk);
+       if (rc) {
+               dev_err(dev, "unable to enable clock\n");
+               return rc;
+       }
+
        rc = xwdt_selftest(xdev);
        if (rc == XWT_TIMER_FAILED) {
                dev_err(dev, "SelfTest routine error\n");
+               clk_disable(xdev->clk);
                return rc;
        }
 
+       clk_disable(xdev->clk);
+
        rc = devm_watchdog_register_device(dev, xilinx_wdt_wdd);
        if (rc)
                return rc;
 
-       clk_disable(xdev->clk);
-
        dev_info(dev, "Xilinx Watchdog Timer with timeout %ds\n",
                 xilinx_wdt_wdd->timeout);
 
index 421ebcda62e645af6808a16bbb2aa4a3a9b8066a..5f23913ce3b49c4d054eaebf9057e99b4689f8d7 100644 (file)
@@ -152,14 +152,14 @@ static int sbsa_gwdt_set_timeout(struct watchdog_device *wdd,
        timeout = clamp_t(unsigned int, timeout, 1, wdd->max_hw_heartbeat_ms / 1000);
 
        if (action)
-               sbsa_gwdt_reg_write(gwdt->clk * timeout, gwdt);
+               sbsa_gwdt_reg_write((u64)gwdt->clk * timeout, gwdt);
        else
                /*
                 * In the single stage mode, The first signal (WS0) is ignored,
                 * the timeout is (WOR * 2), so the WOR should be configured
                 * to half value of timeout.
                 */
-               sbsa_gwdt_reg_write(gwdt->clk / 2 * timeout, gwdt);
+               sbsa_gwdt_reg_write(((u64)gwdt->clk / 2) * timeout, gwdt);
 
        return 0;
 }
index d2aa43c00221330eecbc38f695455c1c73523f81..4c5b8d98a4f30e5b4c2eecba06946238be7dabb6 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/of.h>
-#include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
 #include <linux/watchdog.h>
@@ -42,7 +41,7 @@ struct st_wdog {
        void __iomem *base;
        struct device *dev;
        struct regmap *regmap;
-       struct st_wdog_syscfg *syscfg;
+       const struct st_wdog_syscfg *syscfg;
        struct clk *clk;
        unsigned long clkrate;
        bool warm_reset;
@@ -150,7 +149,6 @@ static void st_clk_disable_unprepare(void *data)
 static int st_wdog_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       const struct of_device_id *match;
        struct device_node *np = dev->of_node;
        struct st_wdog *st_wdog;
        struct regmap *regmap;
@@ -173,12 +171,7 @@ static int st_wdog_probe(struct platform_device *pdev)
        if (!st_wdog)
                return -ENOMEM;
 
-       match = of_match_device(st_wdog_match, dev);
-       if (!match) {
-               dev_err(dev, "Couldn't match device\n");
-               return -ENODEV;
-       }
-       st_wdog->syscfg = (struct st_wdog_syscfg *)match->data;
+       st_wdog->syscfg = (struct st_wdog_syscfg *)device_get_match_data(dev);
 
        base = devm_platform_ioremap_resource(pdev, 0);
        if (IS_ERR(base))
index e2d8c532bcb1a47df7cb9da0b387a55787828fdf..9d3ca848e8b62eb8aa282e67a6031f0373892b92 100644 (file)
@@ -136,11 +136,6 @@ static const struct watchdog_ops sp_wdt_ops = {
        .restart        = sp_wdt_restart,
 };
 
-static void sp_clk_disable_unprepare(void *data)
-{
-       clk_disable_unprepare(data);
-}
-
 static void sp_reset_control_assert(void *data)
 {
        reset_control_assert(data);
@@ -156,17 +151,9 @@ static int sp_wdt_probe(struct platform_device *pdev)
        if (!priv)
                return -ENOMEM;
 
-       priv->clk = devm_clk_get(dev, NULL);
+       priv->clk = devm_clk_get_enabled(dev, NULL);
        if (IS_ERR(priv->clk))
-               return dev_err_probe(dev, PTR_ERR(priv->clk), "Failed to get clock\n");
-
-       ret = clk_prepare_enable(priv->clk);
-       if (ret)
-               return dev_err_probe(dev, ret, "Failed to enable clock\n");
-
-       ret = devm_add_action_or_reset(dev, sp_clk_disable_unprepare, priv->clk);
-       if (ret)
-               return ret;
+               return dev_err_probe(dev, PTR_ERR(priv->clk), "Failed to enable clock\n");
 
        /* The timer and watchdog shared the STC reset */
        priv->rstc = devm_reset_control_get_shared(dev, NULL);
index 0ba99bed59fc4d6ef821f0494ac9daaeeb3591ea..650fdc7996e1c28c537b97e152c97b50b79a7f76 100644 (file)
@@ -269,7 +269,7 @@ static int wdat_wdt_stop(struct watchdog_device *wdd)
 
 static int wdat_wdt_ping(struct watchdog_device *wdd)
 {
-       return wdat_wdt_run_action(to_wdat_wdt(wdd), ACPI_WDAT_RESET, 0, NULL);
+       return wdat_wdt_run_action(to_wdat_wdt(wdd), ACPI_WDAT_RESET, wdd->timeout, NULL);
 }
 
 static int wdat_wdt_set_timeout(struct watchdog_device *wdd,
index d525957594b6b5b480b03cb681c9c56f969f2e0d..61dbe52bb3a3283077e51412b32ca56fea190da1 100644 (file)
@@ -732,4 +732,5 @@ module_exit(exit_v9fs)
 MODULE_AUTHOR("Latchesar Ionkov <lucho@ionkov.net>");
 MODULE_AUTHOR("Eric Van Hensbergen <ericvh@gmail.com>");
 MODULE_AUTHOR("Ron Minnich <rminnich@lanl.gov>");
+MODULE_DESCRIPTION("9P Client File System");
 MODULE_LICENSE("GPL");
index cdf441f22e0737c53ea94aee640adcb791ecd6f3..731e3d14b67d360e3ac2f14b2c139d38d4b4caae 100644 (file)
@@ -52,7 +52,6 @@ void v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
                           unsigned int flags);
 int v9fs_dir_release(struct inode *inode, struct file *filp);
 int v9fs_file_open(struct inode *inode, struct file *file);
-void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat);
 int v9fs_uflags2omode(int uflags, int extended);
 
 void v9fs_blank_wstat(struct p9_wstat *wstat);
index 053d1cef6e13179cc3a48ce609352712d12c6d3d..8604e3377ee7abafec9c05e2fb8474664705f097 100644 (file)
@@ -68,7 +68,7 @@ ssize_t v9fs_xattr_get(struct dentry *dentry, const char *name,
        struct p9_fid *fid;
        int ret;
 
-       p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu\n",
+       p9_debug(P9_DEBUG_VFS, "name = '%s' value_len = %zu\n",
                 name, buffer_size);
        fid = v9fs_fid_lookup(dentry);
        if (IS_ERR(fid))
@@ -139,7 +139,8 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
 
 ssize_t v9fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
 {
-       return v9fs_xattr_get(dentry, NULL, buffer, buffer_size);
+       /* Txattrwalk with an empty string lists xattrs instead */
+       return v9fs_xattr_get(dentry, "", buffer, buffer_size);
 }
 
 static int v9fs_xattr_handler_get(const struct xattr_handler *handler,
index 2fe4a5832fcf4779a25aa122fd2327af1c46d45e..d6b9758ee23dcb8404dd08c7c892e912a6acfdc3 100644 (file)
@@ -568,6 +568,7 @@ static struct dentry *affs_fh_to_parent(struct super_block *sb, struct fid *fid,
 }
 
 const struct export_operations affs_export_ops = {
+       .encode_fh = generic_encode_ino32_fh,
        .fh_to_dentry = affs_fh_to_dentry,
        .fh_to_parent = affs_fh_to_parent,
        .get_parent = affs_get_parent,
index df13a4f9a6e30fdc43c9f5f4038996d00164bc98..c08c2c7d6fbbab79e34b0321b7bdc300fd575dfb 100644 (file)
@@ -24,7 +24,6 @@ config BCACHEFS_FS
        select XXHASH
        select SRCU
        select SYMBOLIC_ERRNAME
-       select MEAN_AND_VARIANCE
        help
        The bcachefs filesystem - a modern, copy on write filesystem, with
        support for multiple devices, compression, checksumming, etc.
@@ -42,7 +41,6 @@ config BCACHEFS_POSIX_ACL
 config BCACHEFS_DEBUG_TRANSACTIONS
        bool "bcachefs runtime info"
        depends on BCACHEFS_FS
-       default y
        help
        This makes the list of running btree transactions available in debugfs.
 
@@ -78,7 +76,7 @@ config BCACHEFS_NO_LATENCY_ACCT
 config MEAN_AND_VARIANCE_UNIT_TEST
        tristate "mean_and_variance unit tests" if !KUNIT_ALL_TESTS
        depends on KUNIT
-       select MEAN_AND_VARIANCE
+       depends on BCACHEFS_FS
        default KUNIT_ALL_TESTS
        help
          This option enables the kunit tests for mean_and_variance module.
index 0749731b9072aec5219b1d7724e1b7cd5c3cf04f..45b64f89258c2e42db47682cdfbb3c5d370d7062 100644 (file)
@@ -70,6 +70,7 @@ bcachefs-y            :=      \
        reflink.o               \
        replicas.o              \
        sb-clean.o              \
+       sb-errors.o             \
        sb-members.o            \
        siphash.o               \
        six.o                   \
index 2d516207e223f097d75c056288620297bfcbedb6..1fec0e67891f120efefed775c8010bc1b6675a86 100644 (file)
@@ -192,123 +192,109 @@ static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
        return DIV_ROUND_UP(bytes, sizeof(u64));
 }
 
-int bch2_alloc_v1_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_alloc_v1_invalid(struct bch_fs *c, struct bkey_s_c k,
                          enum bkey_invalid_flags flags,
                          struct printbuf *err)
 {
        struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
+       int ret = 0;
 
        /* allow for unknown fields */
-       if (bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v)) {
-               prt_printf(err, "incorrect value size (%zu < %u)",
-                      bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       return 0;
+       bkey_fsck_err_on(bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v), c, err,
+                        alloc_v1_val_size_bad,
+                        "incorrect value size (%zu < %u)",
+                        bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
+fsck_err:
+       return ret;
 }
 
-int bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_alloc_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
                          enum bkey_invalid_flags flags,
                          struct printbuf *err)
 {
        struct bkey_alloc_unpacked u;
+       int ret = 0;
 
-       if (bch2_alloc_unpack_v2(&u, k)) {
-               prt_printf(err, "unpack error");
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       return 0;
+       bkey_fsck_err_on(bch2_alloc_unpack_v2(&u, k), c, err,
+                        alloc_v2_unpack_error,
+                        "unpack error");
+fsck_err:
+       return ret;
 }
 
-int bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_alloc_v3_invalid(struct bch_fs *c, struct bkey_s_c k,
                          enum bkey_invalid_flags flags,
                          struct printbuf *err)
 {
        struct bkey_alloc_unpacked u;
+       int ret = 0;
 
-       if (bch2_alloc_unpack_v3(&u, k)) {
-               prt_printf(err, "unpack error");
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       return 0;
+       bkey_fsck_err_on(bch2_alloc_unpack_v3(&u, k), c, err,
+                        alloc_v2_unpack_error,
+                        "unpack error");
+fsck_err:
+       return ret;
 }
 
-int bch2_alloc_v4_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
                          enum bkey_invalid_flags flags, struct printbuf *err)
 {
        struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
+       int ret = 0;
 
-       if (alloc_v4_u64s(a.v) > bkey_val_u64s(k.k)) {
-               prt_printf(err, "bad val size (%u > %zu)",
-                      alloc_v4_u64s(a.v), bkey_val_u64s(k.k));
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(alloc_v4_u64s(a.v) > bkey_val_u64s(k.k), c, err,
+                        alloc_v4_val_size_bad,
+                        "bad val size (%u > %zu)",
+                        alloc_v4_u64s(a.v), bkey_val_u64s(k.k));
 
-       if (!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) &&
-           BCH_ALLOC_V4_NR_BACKPOINTERS(a.v)) {
-               prt_printf(err, "invalid backpointers_start");
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) &&
+                        BCH_ALLOC_V4_NR_BACKPOINTERS(a.v), c, err,
+                        alloc_v4_backpointers_start_bad,
+                        "invalid backpointers_start");
 
-       if (alloc_data_type(*a.v, a.v->data_type) != a.v->data_type) {
-               prt_printf(err, "invalid data type (got %u should be %u)",
-                      a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(alloc_data_type(*a.v, a.v->data_type) != a.v->data_type, c, err,
+                        alloc_key_data_type_bad,
+                        "invalid data type (got %u should be %u)",
+                        a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
 
        switch (a.v->data_type) {
        case BCH_DATA_free:
        case BCH_DATA_need_gc_gens:
        case BCH_DATA_need_discard:
-               if (a.v->dirty_sectors ||
-                   a.v->cached_sectors ||
-                   a.v->stripe) {
-                       prt_printf(err, "empty data type free but have data");
-                       return -BCH_ERR_invalid_bkey;
-               }
+               bkey_fsck_err_on(a.v->dirty_sectors ||
+                                a.v->cached_sectors ||
+                                a.v->stripe, c, err,
+                                alloc_key_empty_but_have_data,
+                                "empty data type free but have data");
                break;
        case BCH_DATA_sb:
        case BCH_DATA_journal:
        case BCH_DATA_btree:
        case BCH_DATA_user:
        case BCH_DATA_parity:
-               if (!a.v->dirty_sectors) {
-                       prt_printf(err, "data_type %s but dirty_sectors==0",
-                              bch2_data_types[a.v->data_type]);
-                       return -BCH_ERR_invalid_bkey;
-               }
+               bkey_fsck_err_on(!a.v->dirty_sectors, c, err,
+                                alloc_key_dirty_sectors_0,
+                                "data_type %s but dirty_sectors==0",
+                                bch2_data_types[a.v->data_type]);
                break;
        case BCH_DATA_cached:
-               if (!a.v->cached_sectors ||
-                   a.v->dirty_sectors ||
-                   a.v->stripe) {
-                       prt_printf(err, "data type inconsistency");
-                       return -BCH_ERR_invalid_bkey;
-               }
-
-               if (!a.v->io_time[READ] &&
-                   c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs) {
-                       prt_printf(err, "cached bucket with read_time == 0");
-                       return -BCH_ERR_invalid_bkey;
-               }
+               bkey_fsck_err_on(!a.v->cached_sectors ||
+                                a.v->dirty_sectors ||
+                                a.v->stripe, c, err,
+                                alloc_key_cached_inconsistency,
+                                "data type inconsistency");
+
+               bkey_fsck_err_on(!a.v->io_time[READ] &&
+                                c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs,
+                                c, err,
+                                alloc_key_cached_but_read_time_zero,
+                                "cached bucket with read_time == 0");
                break;
        case BCH_DATA_stripe:
                break;
        }
-
-       return 0;
-}
-
-static inline u64 swab40(u64 x)
-{
-       return (((x & 0x00000000ffULL) << 32)|
-               ((x & 0x000000ff00ULL) << 16)|
-               ((x & 0x0000ff0000ULL) >>  0)|
-               ((x & 0x00ff000000ULL) >> 16)|
-               ((x & 0xff00000000ULL) >> 32));
+fsck_err:
+       return ret;
 }
 
 void bch2_alloc_v4_swab(struct bkey_s k)
@@ -324,6 +310,7 @@ void bch2_alloc_v4_swab(struct bkey_s k)
        a->io_time[1]           = swab64(a->io_time[1]);
        a->stripe               = swab32(a->stripe);
        a->nr_external_backpointers = swab32(a->nr_external_backpointers);
+       a->fragmentation_lru    = swab64(a->fragmentation_lru);
 
        bps = alloc_v4_backpointers(a);
        for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(a); bp++) {
@@ -521,17 +508,18 @@ static unsigned alloc_gen(struct bkey_s_c k, unsigned offset)
                : 0;
 }
 
-int bch2_bucket_gens_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_bucket_gens_invalid(struct bch_fs *c, struct bkey_s_c k,
                             enum bkey_invalid_flags flags,
                             struct printbuf *err)
 {
-       if (bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens)) {
-               prt_printf(err, "bad val size (%zu != %zu)",
-                      bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens));
-               return -BCH_ERR_invalid_bkey;
-       }
+       int ret = 0;
 
-       return 0;
+       bkey_fsck_err_on(bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens), c, err,
+                        bucket_gens_val_size_bad,
+                        "bad val size (%zu != %zu)",
+                        bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens));
+fsck_err:
+       return ret;
 }
 
 void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
@@ -727,7 +715,7 @@ static int bch2_bucket_do_index(struct btree_trans *trans,
                        "incorrect key when %s %s:%llu:%llu:0 (got %s should be %s)\n"
                        "  for %s",
                        set ? "setting" : "clearing",
-                       bch2_btree_ids[btree],
+                       bch2_btree_id_str(btree),
                        iter.pos.inode,
                        iter.pos.offset,
                        bch2_bkey_types[old.k->type],
@@ -986,6 +974,7 @@ int bch2_check_alloc_key(struct btree_trans *trans,
        int ret;
 
        if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_k.k->p), c,
+                       alloc_key_to_missing_dev_bucket,
                        "alloc key for invalid device:bucket %llu:%llu",
                        alloc_k.k->p.inode, alloc_k.k->p.offset))
                return bch2_btree_delete_at(trans, alloc_iter, 0);
@@ -1005,7 +994,8 @@ int bch2_check_alloc_key(struct btree_trans *trans,
 
        if (k.k->type != discard_key_type &&
            (c->opts.reconstruct_alloc ||
-            fsck_err(c, "incorrect key in need_discard btree (got %s should be %s)\n"
+            fsck_err(c, need_discard_key_wrong,
+                     "incorrect key in need_discard btree (got %s should be %s)\n"
                      "  %s",
                      bch2_bkey_types[k.k->type],
                      bch2_bkey_types[discard_key_type],
@@ -1035,7 +1025,8 @@ int bch2_check_alloc_key(struct btree_trans *trans,
 
        if (k.k->type != freespace_key_type &&
            (c->opts.reconstruct_alloc ||
-            fsck_err(c, "incorrect key in freespace btree (got %s should be %s)\n"
+            fsck_err(c, freespace_key_wrong,
+                     "incorrect key in freespace btree (got %s should be %s)\n"
                      "  %s",
                      bch2_bkey_types[k.k->type],
                      bch2_bkey_types[freespace_key_type],
@@ -1066,7 +1057,8 @@ int bch2_check_alloc_key(struct btree_trans *trans,
 
        if (a->gen != alloc_gen(k, gens_offset) &&
            (c->opts.reconstruct_alloc ||
-            fsck_err(c, "incorrect gen in bucket_gens btree (got %u should be %u)\n"
+            fsck_err(c, bucket_gens_key_wrong,
+                     "incorrect gen in bucket_gens btree (got %u should be %u)\n"
                      "  %s",
                      alloc_gen(k, gens_offset), a->gen,
                      (printbuf_reset(&buf),
@@ -1124,7 +1116,8 @@ int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
 
        if (k.k->type != KEY_TYPE_set &&
            (c->opts.reconstruct_alloc ||
-            fsck_err(c, "hole in alloc btree missing in freespace btree\n"
+            fsck_err(c, freespace_hole_missing,
+                     "hole in alloc btree missing in freespace btree\n"
                      "  device %llu buckets %llu-%llu",
                      freespace_iter->pos.inode,
                      freespace_iter->pos.offset,
@@ -1187,6 +1180,7 @@ int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
 
                for (i = gens_offset; i < gens_end_offset; i++) {
                        if (fsck_err_on(g.v.gens[i], c,
+                                       bucket_gens_hole_wrong,
                                        "hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)",
                                        bucket_gens_pos_to_alloc(k.k->p, i).inode,
                                        bucket_gens_pos_to_alloc(k.k->p, i).offset,
@@ -1244,8 +1238,9 @@ static noinline_for_stack int __bch2_check_discard_freespace_key(struct btree_tr
                return ret;
 
        if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), c,
+                       need_discard_freespace_key_to_invalid_dev_bucket,
                        "entry in %s btree for nonexistant dev:bucket %llu:%llu",
-                       bch2_btree_ids[iter->btree_id], pos.inode, pos.offset))
+                       bch2_btree_id_str(iter->btree_id), pos.inode, pos.offset))
                goto delete;
 
        a = bch2_alloc_to_v4(alloc_k, &a_convert);
@@ -1253,9 +1248,10 @@ static noinline_for_stack int __bch2_check_discard_freespace_key(struct btree_tr
        if (fsck_err_on(a->data_type != state ||
                        (state == BCH_DATA_free &&
                         genbits != alloc_freespace_genbits(*a)), c,
+                       need_discard_freespace_key_bad,
                        "%s\n  incorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)",
                        (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
-                       bch2_btree_ids[iter->btree_id],
+                       bch2_btree_id_str(iter->btree_id),
                        iter->pos.inode,
                        iter->pos.offset,
                        a->data_type == state,
@@ -1320,6 +1316,7 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans,
        dev_exists = bch2_dev_exists2(c, k.k->p.inode);
        if (!dev_exists) {
                if (fsck_err_on(!dev_exists, c,
+                               bucket_gens_to_invalid_dev,
                                "bucket_gens key for invalid device:\n  %s",
                                (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
                        ret = bch2_btree_delete_at(trans, iter, 0);
@@ -1330,6 +1327,7 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans,
        ca = bch_dev_bkey_exists(c, k.k->p.inode);
        if (fsck_err_on(end <= ca->mi.first_bucket ||
                        start >= ca->mi.nbuckets, c,
+                       bucket_gens_to_invalid_buckets,
                        "bucket_gens key for invalid buckets:\n  %s",
                        (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
                ret = bch2_btree_delete_at(trans, iter, 0);
@@ -1338,6 +1336,7 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans,
 
        for (b = start; b < ca->mi.first_bucket; b++)
                if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
+                               bucket_gens_nonzero_for_invalid_buckets,
                                "bucket_gens key has nonzero gen for invalid bucket")) {
                        g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
                        need_update = true;
@@ -1345,6 +1344,7 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans,
 
        for (b = ca->mi.nbuckets; b < end; b++)
                if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
+                               bucket_gens_nonzero_for_invalid_buckets,
                                "bucket_gens key has nonzero gen for invalid bucket")) {
                        g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
                        need_update = true;
@@ -1495,11 +1495,13 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
                return ret;
 
        if (fsck_err_on(!a->io_time[READ], c,
+                       alloc_key_cached_but_read_time_zero,
                        "cached bucket with read_time 0\n"
                        "  %s",
                (printbuf_reset(&buf),
                 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)) ||
            fsck_err_on(lru_k.k->type != KEY_TYPE_set, c,
+                       alloc_key_to_missing_lru_entry,
                        "missing lru entry\n"
                        "  %s",
                        (printbuf_reset(&buf),
@@ -2075,6 +2077,17 @@ void bch2_recalc_capacity(struct bch_fs *c)
        closure_wake_up(&c->freelist_wait);
 }
 
+u64 bch2_min_rw_member_capacity(struct bch_fs *c)
+{
+       struct bch_dev *ca;
+       unsigned i;
+       u64 ret = U64_MAX;
+
+       for_each_rw_member(ca, c, i)
+               ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size);
+       return ret;
+}
+
 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
 {
        struct open_bucket *ob;
index 97042067d2a960e32f586b5686f4e30490cf0974..73faf99a222aac3b33035432666e4d9b272c6fe9 100644 (file)
@@ -149,13 +149,13 @@ struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *, struct bkey_s
 
 int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int);
 
-int bch2_alloc_v1_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_alloc_v1_invalid(struct bch_fs *, struct bkey_s_c,
                          enum bkey_invalid_flags, struct printbuf *);
-int bch2_alloc_v2_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_alloc_v2_invalid(struct bch_fs *, struct bkey_s_c,
                          enum bkey_invalid_flags, struct printbuf *);
-int bch2_alloc_v3_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_alloc_v3_invalid(struct bch_fs *, struct bkey_s_c,
                          enum bkey_invalid_flags, struct printbuf *);
-int bch2_alloc_v4_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_alloc_v4_invalid(struct bch_fs *, struct bkey_s_c,
                          enum bkey_invalid_flags, struct printbuf *);
 void bch2_alloc_v4_swab(struct bkey_s);
 void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
@@ -193,7 +193,7 @@ void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
        .min_val_size   = 48,                           \
 })
 
-int bch2_bucket_gens_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_bucket_gens_invalid(struct bch_fs *, struct bkey_s_c,
                             enum bkey_invalid_flags, struct printbuf *);
 void bch2_bucket_gens_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
@@ -249,6 +249,7 @@ int bch2_dev_freespace_init(struct bch_fs *, struct bch_dev *, u64, u64);
 int bch2_fs_freespace_init(struct bch_fs *);
 
 void bch2_recalc_capacity(struct bch_fs *);
+u64 bch2_min_rw_member_capacity(struct bch_fs *);
 
 void bch2_dev_allocator_remove(struct bch_fs *, struct bch_dev *);
 void bch2_dev_allocator_add(struct bch_fs *, struct bch_dev *);
index 3bc4abd3d7d5725e821e37b43282a3d92de149db..b85c7765272f6e4ae5e8aceb5a4bbaa89c535912 100644 (file)
@@ -399,12 +399,23 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
                        struct bucket_alloc_state *s,
                        struct closure *cl)
 {
-       struct btree_iter iter;
-       struct bkey_s_c k;
+       struct btree_iter iter, citer;
+       struct bkey_s_c k, ck;
        struct open_bucket *ob = NULL;
-       u64 alloc_start = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx);
-       u64 alloc_cursor = max(alloc_start, READ_ONCE(ca->alloc_cursor));
+       u64 first_bucket = max_t(u64, ca->mi.first_bucket, ca->new_fs_bucket_idx);
+       u64 alloc_start = max(first_bucket, READ_ONCE(ca->alloc_cursor));
+       u64 alloc_cursor = alloc_start;
        int ret;
+
+       /*
+        * Scan with an uncached iterator to avoid polluting the key cache. An
+        * uncached iter will return a cached key if one exists, but if not
+        * there is no other underlying protection for the associated key cache
+        * slot. To avoid racing bucket allocations, look up the cached key slot
+        * of any likely allocation candidate before attempting to proceed with
+        * the allocation. This provides proper exclusion on the associated
+        * bucket.
+        */
 again:
        for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor),
                           BTREE_ITER_SLOTS, k, ret) {
@@ -419,25 +430,38 @@ again:
                        continue;
 
                a = bch2_alloc_to_v4(k, &a_convert);
-
                if (a->data_type != BCH_DATA_free)
                        continue;
 
+               /* now check the cached key to serialize concurrent allocs of the bucket */
+               ck = bch2_bkey_get_iter(trans, &citer, BTREE_ID_alloc, k.k->p, BTREE_ITER_CACHED);
+               ret = bkey_err(ck);
+               if (ret)
+                       break;
+
+               a = bch2_alloc_to_v4(ck, &a_convert);
+               if (a->data_type != BCH_DATA_free)
+                       goto next;
+
                s->buckets_seen++;
 
                ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, watermark, a, s, cl);
+next:
+               citer.path->preserve = false;
+               bch2_trans_iter_exit(trans, &citer);
                if (ob)
                        break;
        }
        bch2_trans_iter_exit(trans, &iter);
 
+       alloc_cursor = iter.pos.offset;
        ca->alloc_cursor = alloc_cursor;
 
        if (!ob && ret)
                ob = ERR_PTR(ret);
 
-       if (!ob && alloc_cursor > alloc_start) {
-               alloc_cursor = alloc_start;
+       if (!ob && alloc_start > first_bucket) {
+               alloc_cursor = alloc_start = first_bucket;
                goto again;
        }
 
index cc856150a948ea7859feedcd0bb99493f328fd04..ef02c9bb0354173eddc4b86d6c998965ba747678 100644 (file)
@@ -5,6 +5,7 @@
 #include "backpointers.h"
 #include "btree_cache.h"
 #include "btree_update.h"
+#include "btree_update_interior.h"
 #include "btree_write_buffer.h"
 #include "error.h"
 
@@ -37,25 +38,26 @@ static bool extent_matches_bp(struct bch_fs *c,
        return false;
 }
 
-int bch2_backpointer_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_backpointer_invalid(struct bch_fs *c, struct bkey_s_c k,
                             enum bkey_invalid_flags flags,
                             struct printbuf *err)
 {
        struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
        struct bpos bucket = bp_pos_to_bucket(c, bp.k->p);
+       int ret = 0;
 
-       if (!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset))) {
-               prt_str(err, "backpointer at wrong pos");
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       return 0;
+       bkey_fsck_err_on(!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset)),
+                        c, err,
+                        backpointer_pos_wrong,
+                        "backpointer at wrong pos");
+fsck_err:
+       return ret;
 }
 
 void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer *bp)
 {
        prt_printf(out, "btree=%s l=%u offset=%llu:%u len=%u pos=",
-              bch2_btree_ids[bp->btree_id],
+              bch2_btree_id_str(bp->btree_id),
               bp->level,
               (u64) (bp->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT),
               (u32) bp->bucket_offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT),
@@ -76,7 +78,7 @@ void bch2_backpointer_swab(struct bkey_s k)
 {
        struct bkey_s_backpointer bp = bkey_s_to_backpointer(k);
 
-       bp.v->bucket_offset     = swab32(bp.v->bucket_offset);
+       bp.v->bucket_offset     = swab40(bp.v->bucket_offset);
        bp.v->bucket_len        = swab32(bp.v->bucket_len);
        bch2_bpos_swab(&bp.v->pos);
 }
@@ -219,18 +221,22 @@ out:
 static void backpointer_not_found(struct btree_trans *trans,
                                  struct bpos bp_pos,
                                  struct bch_backpointer bp,
-                                 struct bkey_s_c k,
-                                 const char *thing_it_points_to)
+                                 struct bkey_s_c k)
 {
        struct bch_fs *c = trans->c;
        struct printbuf buf = PRINTBUF;
        struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
 
+       /*
+        * If we're using the btree write buffer, the backpointer we were
+        * looking at may have already been deleted - failure to find what it
+        * pointed to is not an error:
+        */
        if (likely(!bch2_backpointers_no_use_write_buffer))
                return;
 
        prt_printf(&buf, "backpointer doesn't match %s it points to:\n  ",
-                  thing_it_points_to);
+                  bp.level ? "btree node" : "extent");
        prt_printf(&buf, "bucket: ");
        bch2_bpos_to_text(&buf, bucket);
        prt_printf(&buf, "\n  ");
@@ -256,56 +262,37 @@ struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans,
                                         struct bch_backpointer bp,
                                         unsigned iter_flags)
 {
-       struct bch_fs *c = trans->c;
-       struct btree_root *r = bch2_btree_id_root(c, bp.btree_id);
-       struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
-       struct bkey_s_c k;
-
-       bch2_trans_node_iter_init(trans, iter,
-                                 bp.btree_id,
-                                 bp.pos,
-                                 0,
-                                 min(bp.level, r->level),
-                                 iter_flags);
-       k = bch2_btree_iter_peek_slot(iter);
-       if (bkey_err(k)) {
-               bch2_trans_iter_exit(trans, iter);
-               return k;
-       }
-
-       if (bp.level == r->level + 1)
-               k = bkey_i_to_s_c(&r->key);
-
-       if (k.k && extent_matches_bp(c, bp.btree_id, bp.level, k, bucket, bp))
-               return k;
-
-       bch2_trans_iter_exit(trans, iter);
+       if (likely(!bp.level)) {
+               struct bch_fs *c = trans->c;
+               struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
+               struct bkey_s_c k;
+
+               bch2_trans_node_iter_init(trans, iter,
+                                         bp.btree_id,
+                                         bp.pos,
+                                         0, 0,
+                                         iter_flags);
+               k = bch2_btree_iter_peek_slot(iter);
+               if (bkey_err(k)) {
+                       bch2_trans_iter_exit(trans, iter);
+                       return k;
+               }
 
-       if (unlikely(bch2_backpointers_no_use_write_buffer)) {
-               if (bp.level) {
-                       struct btree *b;
+               if (k.k && extent_matches_bp(c, bp.btree_id, bp.level, k, bucket, bp))
+                       return k;
 
-                       /*
-                        * If a backpointer for a btree node wasn't found, it may be
-                        * because it was overwritten by a new btree node that hasn't
-                        * been written out yet - backpointer_get_node() checks for
-                        * this:
-                        */
-                       b = bch2_backpointer_get_node(trans, iter, bp_pos, bp);
-                       if (!IS_ERR_OR_NULL(b))
-                               return bkey_i_to_s_c(&b->key);
+               bch2_trans_iter_exit(trans, iter);
+               backpointer_not_found(trans, bp_pos, bp, k);
+               return bkey_s_c_null;
+       } else {
+               struct btree *b = bch2_backpointer_get_node(trans, iter, bp_pos, bp);
 
+               if (IS_ERR_OR_NULL(b)) {
                        bch2_trans_iter_exit(trans, iter);
-
-                       if (IS_ERR(b))
-                               return bkey_s_c_err(PTR_ERR(b));
-                       return bkey_s_c_null;
+                       return IS_ERR(b) ? bkey_s_c_err(PTR_ERR(b)) : bkey_s_c_null;
                }
-
-               backpointer_not_found(trans, bp_pos, bp, k, "extent");
+               return bkey_i_to_s_c(&b->key);
        }
-
-       return bkey_s_c_null;
 }
 
 struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
@@ -329,6 +316,8 @@ struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
        if (IS_ERR(b))
                goto err;
 
+       BUG_ON(b->c.level != bp.level - 1);
+
        if (b && extent_matches_bp(c, bp.btree_id, bp.level,
                                   bkey_i_to_s_c(&b->key),
                                   bucket, bp))
@@ -337,8 +326,7 @@ struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
        if (b && btree_node_will_make_reachable(b)) {
                b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node);
        } else {
-               backpointer_not_found(trans, bp_pos, bp,
-                                     bkey_i_to_s_c(&b->key), "btree node");
+               backpointer_not_found(trans, bp_pos, bp, bkey_i_to_s_c(&b->key));
                b = NULL;
        }
 err:
@@ -356,6 +344,7 @@ static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_
        int ret = 0;
 
        if (fsck_err_on(!bch2_dev_exists2(c, k.k->p.inode), c,
+                       backpointer_to_missing_device,
                        "backpointer for missing device:\n%s",
                        (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
                ret = bch2_btree_delete_at(trans, bp_iter, 0);
@@ -369,6 +358,7 @@ static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_
                goto out;
 
        if (fsck_err_on(alloc_k.k->type != KEY_TYPE_alloc_v4, c,
+                       backpointer_to_missing_alloc,
                        "backpointer for nonexistent alloc key: %llu:%llu:0\n%s",
                        alloc_iter.pos.inode, alloc_iter.pos.offset,
                        (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
@@ -453,14 +443,14 @@ fsck_err:
        return ret;
 missing:
        prt_printf(&buf, "missing backpointer for btree=%s l=%u ",
-              bch2_btree_ids[bp.btree_id], bp.level);
+              bch2_btree_id_str(bp.btree_id), bp.level);
        bch2_bkey_val_to_text(&buf, c, orig_k);
        prt_printf(&buf, "\nbp pos ");
        bch2_bpos_to_text(&buf, bp_iter.pos);
 
        if (c->sb.version_upgrade_complete < bcachefs_metadata_version_backpointers ||
            c->opts.reconstruct_alloc ||
-           fsck_err(c, "%s", buf.buf))
+           fsck_err(c, ptr_to_missing_backpointer, "%s", buf.buf))
                ret = bch2_bucket_backpointer_mod(trans, bucket, bp, orig_k, true);
 
        goto out;
@@ -793,7 +783,9 @@ static int check_one_backpointer(struct btree_trans *trans,
        }
 
        if (fsck_err_on(!k.k, c,
-                       "backpointer for missing extent\n  %s",
+                       backpointer_to_missing_ptr,
+                       "backpointer for missing %s\n  %s",
+                       bp.v->level ? "btree node" : "extent",
                        (bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf))) {
                ret = bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p);
                goto out;
index 547e0617602ab21049571e171eca313ab968a65c..ab866feeaf660f497cc58ddf73a2692ab32865ac 100644 (file)
@@ -7,7 +7,16 @@
 #include "buckets.h"
 #include "super.h"
 
-int bch2_backpointer_invalid(const struct bch_fs *, struct bkey_s_c k,
+static inline u64 swab40(u64 x)
+{
+       return (((x & 0x00000000ffULL) << 32)|
+               ((x & 0x000000ff00ULL) << 16)|
+               ((x & 0x0000ff0000ULL) >>  0)|
+               ((x & 0x00ff000000ULL) >> 16)|
+               ((x & 0xff00000000ULL) >> 32));
+}
+
+int bch2_backpointer_invalid(struct bch_fs *, struct bkey_s_c k,
                             enum bkey_invalid_flags, struct printbuf *);
 void bch2_backpointer_to_text(struct printbuf *, const struct bch_backpointer *);
 void bch2_backpointer_k_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
index 1fbed1f8378d1aeeca4d781f9fa3c64583e2a833..be2edced52133e6592092d5d8e20c643cd8b372a 100644 (file)
@@ -2,20 +2,9 @@
 #ifndef _BCACHEFS_BBPOS_H
 #define _BCACHEFS_BBPOS_H
 
+#include "bbpos_types.h"
 #include "bkey_methods.h"
-
-struct bbpos {
-       enum btree_id           btree;
-       struct bpos             pos;
-};
-
-static inline struct bbpos BBPOS(enum btree_id btree, struct bpos pos)
-{
-       return (struct bbpos) { btree, pos };
-}
-
-#define BBPOS_MIN      BBPOS(0, POS_MIN)
-#define BBPOS_MAX      BBPOS(BTREE_ID_NR - 1, POS_MAX)
+#include "btree_cache.h"
 
 static inline int bbpos_cmp(struct bbpos l, struct bbpos r)
 {
@@ -40,7 +29,7 @@ static inline struct bbpos bbpos_successor(struct bbpos pos)
 
 static inline void bch2_bbpos_to_text(struct printbuf *out, struct bbpos pos)
 {
-       prt_str(out, bch2_btree_ids[pos.btree]);
+       prt_str(out, bch2_btree_id_str(pos.btree));
        prt_char(out, ':');
        bch2_bpos_to_text(out, pos.pos);
 }
diff --git a/fs/bcachefs/bbpos_types.h b/fs/bcachefs/bbpos_types.h
new file mode 100644 (file)
index 0000000..5198e94
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_BBPOS_TYPES_H
+#define _BCACHEFS_BBPOS_TYPES_H
+
+struct bbpos {
+       enum btree_id           btree;
+       struct bpos             pos;
+};
+
+static inline struct bbpos BBPOS(enum btree_id btree, struct bpos pos)
+{
+       return (struct bbpos) { btree, pos };
+}
+
+#define BBPOS_MIN      BBPOS(0, POS_MIN)
+#define BBPOS_MAX      BBPOS(BTREE_ID_NR - 1, POS_MAX)
+
+#endif /* _BCACHEFS_BBPOS_TYPES_H */
index 53ffa88cae16cab4e883459dae1fb61424b3b7ec..9cb8684959ee17affdc2a558c14414477fef922d 100644 (file)
 #include "nocow_locking_types.h"
 #include "opts.h"
 #include "recovery_types.h"
+#include "sb-errors_types.h"
 #include "seqmutex.h"
 #include "util.h"
 
@@ -418,6 +419,7 @@ enum bch_time_stats {
 #include "buckets_types.h"
 #include "buckets_waiting_for_journal_types.h"
 #include "clock_types.h"
+#include "disk_groups_types.h"
 #include "ec_types.h"
 #include "journal_types.h"
 #include "keylist_types.h"
@@ -463,6 +465,7 @@ enum gc_phase {
        GC_PHASE_BTREE_snapshot_trees,
        GC_PHASE_BTREE_deleted_inodes,
        GC_PHASE_BTREE_logged_ops,
+       GC_PHASE_BTREE_rebalance_work,
 
        GC_PHASE_PENDING_DELETE,
 };
@@ -500,6 +503,8 @@ struct bch_dev {
         * Committed by bch2_write_super() -> bch_fs_mi_update()
         */
        struct bch_member_cpu   mi;
+       atomic64_t              errors[BCH_MEMBER_ERROR_NR];
+
        __uuid_t                uuid;
        char                    name[BDEVNAME_SIZE];
 
@@ -578,7 +583,7 @@ enum {
        BCH_FS_INITIAL_GC_UNFIXED,      /* kill when we enumerate fsck errors */
        BCH_FS_NEED_ANOTHER_GC,
 
-       BCH_FS_HAVE_DELETED_SNAPSHOTS,
+       BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS,
 
        /* errors: */
        BCH_FS_ERROR,
@@ -938,9 +943,6 @@ struct bch_fs {
        struct list_head        moving_context_list;
        struct mutex            moving_context_lock;
 
-       struct list_head        data_progress_list;
-       struct mutex            data_progress_lock;
-
        /* REBALANCE */
        struct bch_fs_rebalance rebalance;
 
@@ -991,11 +993,6 @@ struct bch_fs {
        struct bio_set          dio_read_bioset;
        struct bio_set          nocow_flush_bioset;
 
-       /* ERRORS */
-       struct list_head        fsck_errors;
-       struct mutex            fsck_error_lock;
-       bool                    fsck_alloc_err;
-
        /* QUOTAS */
        struct bch_memquota_type quotas[QTYP_NR];
 
@@ -1044,6 +1041,14 @@ struct bch_fs {
        struct bch2_time_stats  times[BCH_TIME_STAT_NR];
 
        struct btree_transaction_stats btree_transaction_stats[BCH_TRANSACTIONS_NR];
+
+       /* ERRORS */
+       struct list_head        fsck_error_msgs;
+       struct mutex            fsck_error_msgs_lock;
+       bool                    fsck_alloc_msgs_err;
+
+       bch_sb_errors_cpu       fsck_error_counts;
+       struct mutex            fsck_error_counts_lock;
 };
 
 extern struct wait_queue_head bch2_read_only_wait;
index 99749f3315fec5fafb0b6d051ca01f0b7384834a..0a750953ff921b9d62d9fd1918da27d375c2c6dc 100644 (file)
@@ -613,31 +613,17 @@ struct bch_extent_stripe_ptr {
 #endif
 };
 
-struct bch_extent_reservation {
-#if defined(__LITTLE_ENDIAN_BITFIELD)
-       __u64                   type:6,
-                               unused:22,
-                               replicas:4,
-                               generation:32;
-#elif defined (__BIG_ENDIAN_BITFIELD)
-       __u64                   generation:32,
-                               replicas:4,
-                               unused:22,
-                               type:6;
-#endif
-};
-
 struct bch_extent_rebalance {
 #if defined(__LITTLE_ENDIAN_BITFIELD)
-       __u64                   type:7,
-                               unused:33,
-                               compression:8,
+       __u64                   type:6,
+                               unused:34,
+                               compression:8, /* enum bch_compression_opt */
                                target:16;
 #elif defined (__BIG_ENDIAN_BITFIELD)
        __u64                   target:16,
                                compression:8,
-                               unused:33,
-                               type:7;
+                               unused:34,
+                               type:6;
 #endif
 };
 
@@ -838,34 +824,30 @@ enum inode_opt_id {
        Inode_opt_nr,
 };
 
-enum {
-       /*
-        * User flags (get/settable with FS_IOC_*FLAGS, correspond to FS_*_FL
-        * flags)
-        */
-       __BCH_INODE_SYNC                = 0,
-       __BCH_INODE_IMMUTABLE           = 1,
-       __BCH_INODE_APPEND              = 2,
-       __BCH_INODE_NODUMP              = 3,
-       __BCH_INODE_NOATIME             = 4,
-
-       __BCH_INODE_I_SIZE_DIRTY        = 5, /* obsolete */
-       __BCH_INODE_I_SECTORS_DIRTY     = 6, /* obsolete */
-       __BCH_INODE_UNLINKED            = 7,
-       __BCH_INODE_BACKPTR_UNTRUSTED   = 8,
-
-       /* bits 20+ reserved for packed fields below: */
-};
-
-#define BCH_INODE_SYNC         (1 << __BCH_INODE_SYNC)
-#define BCH_INODE_IMMUTABLE    (1 << __BCH_INODE_IMMUTABLE)
-#define BCH_INODE_APPEND       (1 << __BCH_INODE_APPEND)
-#define BCH_INODE_NODUMP       (1 << __BCH_INODE_NODUMP)
-#define BCH_INODE_NOATIME      (1 << __BCH_INODE_NOATIME)
-#define BCH_INODE_I_SIZE_DIRTY (1 << __BCH_INODE_I_SIZE_DIRTY)
-#define BCH_INODE_I_SECTORS_DIRTY (1 << __BCH_INODE_I_SECTORS_DIRTY)
-#define BCH_INODE_UNLINKED     (1 << __BCH_INODE_UNLINKED)
-#define BCH_INODE_BACKPTR_UNTRUSTED (1 << __BCH_INODE_BACKPTR_UNTRUSTED)
+#define BCH_INODE_FLAGS()                      \
+       x(sync,                         0)      \
+       x(immutable,                    1)      \
+       x(append,                       2)      \
+       x(nodump,                       3)      \
+       x(noatime,                      4)      \
+       x(i_size_dirty,                 5)      \
+       x(i_sectors_dirty,              6)      \
+       x(unlinked,                     7)      \
+       x(backptr_untrusted,            8)
+
+/* bits 20+ reserved for packed fields below: */
+
+enum bch_inode_flags {
+#define x(t, n)        BCH_INODE_##t = 1U << n,
+       BCH_INODE_FLAGS()
+#undef x
+};
+
+enum __bch_inode_flags {
+#define x(t, n)        __BCH_INODE_##t = n,
+       BCH_INODE_FLAGS()
+#undef x
+};
 
 LE32_BITMASK(INODE_STR_HASH,   struct bch_inode, bi_flags, 20, 24);
 LE32_BITMASK(INODE_NR_FIELDS,  struct bch_inode, bi_flags, 24, 31);
@@ -1232,7 +1214,8 @@ struct bch_sb_field {
        x(journal_seq_blacklist, 8)             \
        x(journal_v2,   9)                      \
        x(counters,     10)                     \
-       x(members_v2,   11)
+       x(members_v2,   11)                     \
+       x(errors,       12)
 
 enum bch_sb_field_type {
 #define x(f, nr)       BCH_SB_FIELD_##f = nr,
@@ -1282,6 +1265,18 @@ enum bch_iops_measurement {
        BCH_IOPS_NR
 };
 
+#define BCH_MEMBER_ERROR_TYPES()               \
+       x(read,         0)                      \
+       x(write,        1)                      \
+       x(checksum,     2)
+
+enum bch_member_error_type {
+#define x(t, n) BCH_MEMBER_ERROR_##t = n,
+       BCH_MEMBER_ERROR_TYPES()
+#undef x
+       BCH_MEMBER_ERROR_NR
+};
+
 struct bch_member {
        __uuid_t                uuid;
        __le64                  nbuckets;       /* device size */
@@ -1292,6 +1287,9 @@ struct bch_member {
 
        __le64                  flags;
        __le32                  iops[4];
+       __le64                  errors[BCH_MEMBER_ERROR_NR];
+       __le64                  errors_at_reset[BCH_MEMBER_ERROR_NR];
+       __le64                  errors_reset_time;
 };
 
 #define BCH_MEMBER_V1_BYTES    56
@@ -1615,11 +1613,20 @@ struct journal_seq_blacklist_entry {
 
 struct bch_sb_field_journal_seq_blacklist {
        struct bch_sb_field     field;
+       struct journal_seq_blacklist_entry start[];
+};
 
-       struct journal_seq_blacklist_entry start[0];
-       __u64                   _data[];
+struct bch_sb_field_errors {
+       struct bch_sb_field     field;
+       struct bch_sb_field_error_entry {
+               __le64          v;
+               __le64          last_error_time;
+       }                       entries[];
 };
 
+LE64_BITMASK(BCH_SB_ERROR_ENTRY_ID,    struct bch_sb_field_error_entry, v,  0, 16);
+LE64_BITMASK(BCH_SB_ERROR_ENTRY_NR,    struct bch_sb_field_error_entry, v, 16, 64);
+
 /* Superblock: */
 
 /*
@@ -1682,7 +1689,9 @@ struct bch_sb_field_journal_seq_blacklist {
        x(snapshot_skiplists,           BCH_VERSION(1,  1),             \
          BIT_ULL(BCH_RECOVERY_PASS_check_snapshots))                   \
        x(deleted_inodes,               BCH_VERSION(1,  2),             \
-         BIT_ULL(BCH_RECOVERY_PASS_check_inodes))
+         BIT_ULL(BCH_RECOVERY_PASS_check_inodes))                      \
+       x(rebalance_work,               BCH_VERSION(1,  3),             \
+         BIT_ULL(BCH_RECOVERY_PASS_set_fs_needs_rebalance))
 
 enum bcachefs_metadata_version {
        bcachefs_metadata_version_min = 9,
@@ -1693,7 +1702,7 @@ enum bcachefs_metadata_version {
 };
 
 static const __maybe_unused
-unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_major_minor;
+unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_rebalance_work;
 
 #define bcachefs_metadata_version_current      (bcachefs_metadata_version_max - 1)
 
@@ -2247,7 +2256,8 @@ LE32_BITMASK(JSET_NO_FLUSH,       struct jset, flags, 5, 6);
 enum btree_id_flags {
        BTREE_ID_EXTENTS        = BIT(0),
        BTREE_ID_SNAPSHOTS      = BIT(1),
-       BTREE_ID_DATA           = BIT(2),
+       BTREE_ID_SNAPSHOT_FIELD = BIT(2),
+       BTREE_ID_DATA           = BIT(3),
 };
 
 #define BCH_BTREE_IDS()                                                                \
@@ -2302,11 +2312,13 @@ enum btree_id_flags {
          BIT_ULL(KEY_TYPE_bucket_gens))                                        \
        x(snapshot_trees,       15,     0,                                      \
          BIT_ULL(KEY_TYPE_snapshot_tree))                                      \
-       x(deleted_inodes,       16,     BTREE_ID_SNAPSHOTS,                     \
+       x(deleted_inodes,       16,     BTREE_ID_SNAPSHOT_FIELD,                \
          BIT_ULL(KEY_TYPE_set))                                                \
        x(logged_ops,           17,     0,                                      \
          BIT_ULL(KEY_TYPE_logged_op_truncate)|                                 \
-         BIT_ULL(KEY_TYPE_logged_op_finsert))
+         BIT_ULL(KEY_TYPE_logged_op_finsert))                                  \
+       x(rebalance_work,       18,     BTREE_ID_SNAPSHOT_FIELD,                \
+         BIT_ULL(KEY_TYPE_set)|BIT_ULL(KEY_TYPE_cookie))
 
 enum btree_id {
 #define x(name, nr, ...) BTREE_ID_##name = nr,
index 5184502092369d963f076383ee9bf3ca5a358f6c..831be01809f2c9271d4db159377decd1b8686bb6 100644 (file)
@@ -92,19 +92,15 @@ enum bkey_lr_packed {
 #define bkey_lr_packed(_l, _r)                                         \
        ((_l)->format + ((_r)->format << 1))
 
-#define bkey_copy(_dst, _src)                                  \
-do {                                                           \
-       BUILD_BUG_ON(!type_is(_dst, struct bkey_i *) &&         \
-                    !type_is(_dst, struct bkey_packed *));     \
-       BUILD_BUG_ON(!type_is(_src, struct bkey_i *) &&         \
-                    !type_is(_src, struct bkey_packed *));     \
-       EBUG_ON((u64 *) (_dst) > (u64 *) (_src) &&              \
-               (u64 *) (_dst) < (u64 *) (_src) +               \
-               ((struct bkey *) (_src))->u64s);                \
-                                                               \
-       memcpy_u64s_small((_dst), (_src),                       \
-                         ((struct bkey *) (_src))->u64s);      \
-} while (0)
+static inline void bkey_p_copy(struct bkey_packed *dst, const struct bkey_packed *src)
+{
+       memcpy_u64s_small(dst, src, src->u64s);
+}
+
+static inline void bkey_copy(struct bkey_i *dst, const struct bkey_i *src)
+{
+       memcpy_u64s_small(dst, src, src->k.u64s);
+}
 
 struct btree;
 
index d9fb1fc81f1e17fb45dedf76349177a0abc9f22a..761f5e33b1e69e94ca0aaaa41a9825e496b5840f 100644 (file)
@@ -3,6 +3,7 @@
 #include "bcachefs.h"
 #include "backpointers.h"
 #include "bkey_methods.h"
+#include "btree_cache.h"
 #include "btree_types.h"
 #include "alloc_background.h"
 #include "dirent.h"
@@ -25,7 +26,7 @@ const char * const bch2_bkey_types[] = {
        NULL
 };
 
-static int deleted_key_invalid(const struct bch_fs *c, struct bkey_s_c k,
+static int deleted_key_invalid(struct bch_fs *c, struct bkey_s_c k,
                               enum bkey_invalid_flags flags, struct printbuf *err)
 {
        return 0;
@@ -39,23 +40,24 @@ static int deleted_key_invalid(const struct bch_fs *c, struct bkey_s_c k,
        .key_invalid = deleted_key_invalid,             \
 })
 
-static int empty_val_key_invalid(const struct bch_fs *c, struct bkey_s_c k,
+static int empty_val_key_invalid(struct bch_fs *c, struct bkey_s_c k,
                                 enum bkey_invalid_flags flags, struct printbuf *err)
 {
-       if (bkey_val_bytes(k.k)) {
-               prt_printf(err, "incorrect value size (%zu != 0)",
-                      bkey_val_bytes(k.k));
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       return 0;
+       int ret = 0;
+
+       bkey_fsck_err_on(bkey_val_bytes(k.k), c, err,
+                        bkey_val_size_nonzero,
+                        "incorrect value size (%zu != 0)",
+                        bkey_val_bytes(k.k));
+fsck_err:
+       return ret;
 }
 
 #define bch2_bkey_ops_error ((struct bkey_ops) {       \
        .key_invalid = empty_val_key_invalid,           \
 })
 
-static int key_type_cookie_invalid(const struct bch_fs *c, struct bkey_s_c k,
+static int key_type_cookie_invalid(struct bch_fs *c, struct bkey_s_c k,
                                   enum bkey_invalid_flags flags, struct printbuf *err)
 {
        return 0;
@@ -70,7 +72,7 @@ static int key_type_cookie_invalid(const struct bch_fs *c, struct bkey_s_c k,
        .key_invalid = empty_val_key_invalid,           \
 })
 
-static int key_type_inline_data_invalid(const struct bch_fs *c, struct bkey_s_c k,
+static int key_type_inline_data_invalid(struct bch_fs *c, struct bkey_s_c k,
                                        enum bkey_invalid_flags flags, struct printbuf *err)
 {
        return 0;
@@ -91,18 +93,6 @@ static void key_type_inline_data_to_text(struct printbuf *out, struct bch_fs *c,
        .val_to_text    = key_type_inline_data_to_text, \
 })
 
-static int key_type_set_invalid(const struct bch_fs *c, struct bkey_s_c k,
-                               enum bkey_invalid_flags flags, struct printbuf *err)
-{
-       if (bkey_val_bytes(k.k)) {
-               prt_printf(err, "incorrect value size (%zu != %zu)",
-                      bkey_val_bytes(k.k), sizeof(struct bch_cookie));
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       return 0;
-}
-
 static bool key_type_set_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
 {
        bch2_key_resize(l.k, l.k->size + r.k->size);
@@ -110,7 +100,7 @@ static bool key_type_set_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_
 }
 
 #define bch2_bkey_ops_set ((struct bkey_ops) {         \
-       .key_invalid    = key_type_set_invalid,         \
+       .key_invalid    = empty_val_key_invalid,        \
        .key_merge      = key_type_set_merge,           \
 })
 
@@ -128,84 +118,95 @@ int bch2_bkey_val_invalid(struct bch_fs *c, struct bkey_s_c k,
                          struct printbuf *err)
 {
        const struct bkey_ops *ops = bch2_bkey_type_ops(k.k->type);
+       int ret = 0;
 
-       if (bkey_val_bytes(k.k) < ops->min_val_size) {
-               prt_printf(err, "bad val size (%zu < %u)",
-                          bkey_val_bytes(k.k), ops->min_val_size);
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(bkey_val_bytes(k.k) < ops->min_val_size, c, err,
+                        bkey_val_size_too_small,
+                        "bad val size (%zu < %u)",
+                        bkey_val_bytes(k.k), ops->min_val_size);
 
        if (!ops->key_invalid)
                return 0;
 
-       return ops->key_invalid(c, k, flags, err);
+       ret = ops->key_invalid(c, k, flags, err);
+fsck_err:
+       return ret;
 }
 
 static u64 bch2_key_types_allowed[] = {
-#define x(name, nr, flags, keys)       [BKEY_TYPE_##name] = BIT_ULL(KEY_TYPE_deleted)|keys,
-       BCH_BTREE_IDS()
-#undef x
        [BKEY_TYPE_btree] =
                BIT_ULL(KEY_TYPE_deleted)|
                BIT_ULL(KEY_TYPE_btree_ptr)|
                BIT_ULL(KEY_TYPE_btree_ptr_v2),
+#define x(name, nr, flags, keys)       [BKEY_TYPE_##name] = BIT_ULL(KEY_TYPE_deleted)|keys,
+       BCH_BTREE_IDS()
+#undef x
 };
 
+const char *bch2_btree_node_type_str(enum btree_node_type type)
+{
+       return type == BKEY_TYPE_btree ? "internal btree node" : bch2_btree_id_str(type - 1);
+}
+
 int __bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
                        enum btree_node_type type,
                        enum bkey_invalid_flags flags,
                        struct printbuf *err)
 {
-       if (k.k->u64s < BKEY_U64s) {
-               prt_printf(err, "u64s too small (%u < %zu)", k.k->u64s, BKEY_U64s);
-               return -BCH_ERR_invalid_bkey;
-       }
+       int ret = 0;
 
-       if (flags & BKEY_INVALID_COMMIT  &&
-           !(bch2_key_types_allowed[type] & BIT_ULL(k.k->type))) {
-               prt_printf(err, "invalid key type for btree %s (%s)",
-                          bch2_btree_ids[type], bch2_bkey_types[k.k->type]);
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(k.k->u64s < BKEY_U64s, c, err,
+                        bkey_u64s_too_small,
+                        "u64s too small (%u < %zu)", k.k->u64s, BKEY_U64s);
 
-       if (btree_node_type_is_extents(type) && !bkey_whiteout(k.k)) {
-               if (k.k->size == 0) {
-                       prt_printf(err, "size == 0");
-                       return -BCH_ERR_invalid_bkey;
-               }
+       if (type >= BKEY_TYPE_NR)
+               return 0;
 
-               if (k.k->size > k.k->p.offset) {
-                       prt_printf(err, "size greater than offset (%u > %llu)",
-                              k.k->size, k.k->p.offset);
-                       return -BCH_ERR_invalid_bkey;
-               }
+       bkey_fsck_err_on((flags & BKEY_INVALID_COMMIT) &&
+                        !(bch2_key_types_allowed[type] & BIT_ULL(k.k->type)), c, err,
+                        bkey_invalid_type_for_btree,
+                        "invalid key type for btree %s (%s)",
+                        bch2_btree_node_type_str(type), bch2_bkey_types[k.k->type]);
+
+       if (btree_node_type_is_extents(type) && !bkey_whiteout(k.k)) {
+               bkey_fsck_err_on(k.k->size == 0, c, err,
+                                bkey_extent_size_zero,
+                                "size == 0");
+
+               bkey_fsck_err_on(k.k->size > k.k->p.offset, c, err,
+                                bkey_extent_size_greater_than_offset,
+                                "size greater than offset (%u > %llu)",
+                                k.k->size, k.k->p.offset);
        } else {
-               if (k.k->size) {
-                       prt_printf(err, "size != 0");
-                       return -BCH_ERR_invalid_bkey;
-               }
+               bkey_fsck_err_on(k.k->size, c, err,
+                                bkey_size_nonzero,
+                                "size != 0");
        }
 
        if (type != BKEY_TYPE_btree) {
-               if (!btree_type_has_snapshots((enum btree_id) type) &&
-                   k.k->p.snapshot) {
-                       prt_printf(err, "nonzero snapshot");
-                       return -BCH_ERR_invalid_bkey;
-               }
-
-               if (btree_type_has_snapshots((enum btree_id) type) &&
-                   !k.k->p.snapshot) {
-                       prt_printf(err, "snapshot == 0");
-                       return -BCH_ERR_invalid_bkey;
+               enum btree_id btree = type - 1;
+
+               if (btree_type_has_snapshots(btree)) {
+                       bkey_fsck_err_on(!k.k->p.snapshot, c, err,
+                                        bkey_snapshot_zero,
+                                        "snapshot == 0");
+               } else if (!btree_type_has_snapshot_field(btree)) {
+                       bkey_fsck_err_on(k.k->p.snapshot, c, err,
+                                        bkey_snapshot_nonzero,
+                                        "nonzero snapshot");
+               } else {
+                       /*
+                        * btree uses snapshot field but it's not required to be
+                        * nonzero
+                        */
                }
 
-               if (bkey_eq(k.k->p, POS_MAX)) {
-                       prt_printf(err, "key at POS_MAX");
-                       return -BCH_ERR_invalid_bkey;
-               }
+               bkey_fsck_err_on(bkey_eq(k.k->p, POS_MAX), c, err,
+                                bkey_at_pos_max,
+                                "key at POS_MAX");
        }
-
-       return 0;
+fsck_err:
+       return ret;
 }
 
 int bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
@@ -217,20 +218,20 @@ int bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
                bch2_bkey_val_invalid(c, k, flags, err);
 }
 
-int bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k,
-                           struct printbuf *err)
+int bch2_bkey_in_btree_node(struct bch_fs *c, struct btree *b,
+                           struct bkey_s_c k, struct printbuf *err)
 {
-       if (bpos_lt(k.k->p, b->data->min_key)) {
-               prt_printf(err, "key before start of btree node");
-               return -BCH_ERR_invalid_bkey;
-       }
+       int ret = 0;
 
-       if (bpos_gt(k.k->p, b->data->max_key)) {
-               prt_printf(err, "key past end of btree node");
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(bpos_lt(k.k->p, b->data->min_key), c, err,
+                        bkey_before_start_of_btree_node,
+                        "key before start of btree node");
 
-       return 0;
+       bkey_fsck_err_on(bpos_gt(k.k->p, b->data->max_key), c, err,
+                        bkey_after_end_of_btree_node,
+                        "key past end of btree node");
+fsck_err:
+       return ret;
 }
 
 void bch2_bpos_to_text(struct printbuf *out, struct bpos pos)
index 668f595e2fcfeadf29547bd33d46448bad6dc7e3..3a370b7087acea9bed0de0e1c565034336303d01 100644 (file)
@@ -21,7 +21,7 @@ extern const struct bkey_ops bch2_bkey_null_ops;
  * being read or written; more aggressive checks can be enabled when rw == WRITE.
  */
 struct bkey_ops {
-       int             (*key_invalid)(const struct bch_fs *c, struct bkey_s_c k,
+       int             (*key_invalid)(struct bch_fs *c, struct bkey_s_c k,
                                       enum bkey_invalid_flags flags, struct printbuf *err);
        void            (*val_to_text)(struct printbuf *, struct bch_fs *,
                                       struct bkey_s_c);
@@ -55,7 +55,8 @@ int __bch2_bkey_invalid(struct bch_fs *, struct bkey_s_c, enum btree_node_type,
                        enum bkey_invalid_flags, struct printbuf *);
 int bch2_bkey_invalid(struct bch_fs *, struct bkey_s_c, enum btree_node_type,
                      enum bkey_invalid_flags, struct printbuf *);
-int bch2_bkey_in_btree_node(struct btree *, struct bkey_s_c, struct printbuf *);
+int bch2_bkey_in_btree_node(struct bch_fs *, struct btree *,
+                           struct bkey_s_c, struct printbuf *);
 
 void bch2_bpos_to_text(struct printbuf *, struct bpos);
 void bch2_bkey_to_text(struct printbuf *, const struct bkey *);
@@ -119,16 +120,6 @@ enum btree_update_flags {
 #define BTREE_TRIGGER_BUCKET_INVALIDATE        (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE)
 #define BTREE_TRIGGER_NOATOMIC         (1U << __BTREE_TRIGGER_NOATOMIC)
 
-#define BTREE_TRIGGER_WANTS_OLD_AND_NEW                \
-       ((1U << KEY_TYPE_alloc)|                \
-        (1U << KEY_TYPE_alloc_v2)|             \
-        (1U << KEY_TYPE_alloc_v3)|             \
-        (1U << KEY_TYPE_alloc_v4)|             \
-        (1U << KEY_TYPE_stripe)|               \
-        (1U << KEY_TYPE_inode)|                \
-        (1U << KEY_TYPE_inode_v2)|             \
-        (1U << KEY_TYPE_snapshot))
-
 static inline int bch2_trans_mark_key(struct btree_trans *trans,
                                      enum btree_id btree_id, unsigned level,
                                      struct bkey_s_c old, struct bkey_i *new,
index b9aa027c881b14458394ae8fdad6d93f53e1cbbf..bcca9e76a0b4bf40f20903c856e7559e60b87da1 100644 (file)
@@ -106,7 +106,7 @@ bch2_key_sort_fix_overlapping(struct bch_fs *c, struct bset *dst,
        while ((k = sort_iter_peek(iter))) {
                if (!bkey_deleted(k) &&
                    !should_drop_next_key(iter)) {
-                       bkey_copy(out, k);
+                       bkey_p_copy(out, k);
                        btree_keys_account_key_add(&nr, 0, out);
                        out = bkey_p_next(out);
                }
@@ -137,7 +137,7 @@ bch2_sort_repack(struct bset *dst, struct btree *src,
                        continue;
 
                if (!transform)
-                       bkey_copy(out, in);
+                       bkey_p_copy(out, in);
                else if (bch2_bkey_transform(out_f, out, bkey_packed(in)
                                             ? in_f : &bch2_bkey_format_current, in))
                        out->format = KEY_FORMAT_LOCAL_BTREE;
@@ -191,7 +191,7 @@ unsigned bch2_sort_keys(struct bkey_packed *dst,
                        memcpy_u64s_small(out, in, bkeyp_key_u64s(f, in));
                        set_bkeyp_val_u64s(f, out, 0);
                } else {
-                       bkey_copy(out, in);
+                       bkey_p_copy(out, in);
                }
                out->needs_whiteout |= needs_whiteout;
                out = bkey_p_next(out);
index 5e585819190576db1f22ec9ec19b9b3322f1caf1..47e7770d05831757d45c4aee9331f52a10c06e76 100644 (file)
@@ -472,7 +472,7 @@ int bch2_fs_btree_cache_init(struct bch_fs *c)
 
        mutex_init(&c->verify_lock);
 
-       shrink = shrinker_alloc(0, "%s/btree_cache", c->name);
+       shrink = shrinker_alloc(0, "%s-btree_cache", c->name);
        if (!shrink)
                goto err;
        bc->shrink = shrink;
@@ -785,12 +785,12 @@ static noinline void btree_bad_header(struct bch_fs *c, struct btree *b)
               "btree node header doesn't match ptr\n"
               "btree %s level %u\n"
               "ptr: ",
-              bch2_btree_ids[b->c.btree_id], b->c.level);
+              bch2_btree_id_str(b->c.btree_id), b->c.level);
        bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
 
        prt_printf(&buf, "\nheader: btree %s level %llu\n"
               "min ",
-              bch2_btree_ids[BTREE_NODE_ID(b->data)],
+              bch2_btree_id_str(BTREE_NODE_ID(b->data)),
               BTREE_NODE_LEVEL(b->data));
        bch2_bpos_to_text(&buf, b->data->min_key);
 
@@ -1153,8 +1153,21 @@ wait_on_io:
        six_unlock_intent(&b->c.lock);
 }
 
-void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c,
-                            const struct btree *b)
+const char *bch2_btree_id_str(enum btree_id btree)
+{
+       return btree < BTREE_ID_NR ? __bch2_btree_ids[btree] : "(unknown)";
+}
+
+void bch2_btree_pos_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b)
+{
+       prt_printf(out, "%s level %u/%u\n  ",
+              bch2_btree_id_str(b->c.btree_id),
+              b->c.level,
+              bch2_btree_id_root(c, b->c.btree_id)->level);
+       bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
+}
+
+void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b)
 {
        struct bset_stats stats;
 
index 1e562b6efa629fbdfc92d7451e92768b67a4bdb8..cfb80b201d61be9240ed659baa57a693d12b796a 100644 (file)
@@ -123,8 +123,9 @@ static inline struct btree *btree_node_root(struct bch_fs *c, struct btree *b)
        return bch2_btree_id_root(c, b->c.btree_id)->b;
 }
 
-void bch2_btree_node_to_text(struct printbuf *, struct bch_fs *,
-                            const struct btree *);
+const char *bch2_btree_id_str(enum btree_id);
+void bch2_btree_pos_to_text(struct printbuf *, struct bch_fs *, const struct btree *);
+void bch2_btree_node_to_text(struct printbuf *, struct bch_fs *, const struct btree *);
 void bch2_btree_cache_to_text(struct printbuf *, const struct bch_fs *);
 
 #endif /* _BCACHEFS_BTREE_CACHE_H */
index 693ed067b1a77a7ec89b04d5135656579ec2099c..0b5d09c8475d00bf35ee70c52cd9ae9483a56823 100644 (file)
@@ -95,15 +95,15 @@ static int bch2_gc_check_topology(struct bch_fs *c,
                        bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(cur.k));
 
                        if (__fsck_err(c,
-                                 FSCK_CAN_FIX|
-                                 FSCK_CAN_IGNORE|
-                                 FSCK_NO_RATELIMIT,
-                                 "btree node with incorrect min_key at btree %s level %u:\n"
-                                 "  prev %s\n"
-                                 "  cur %s",
-                                 bch2_btree_ids[b->c.btree_id], b->c.level,
-                                 buf1.buf, buf2.buf) &&
-                           should_restart_for_topology_repair(c)) {
+                                      FSCK_CAN_FIX|
+                                      FSCK_CAN_IGNORE|
+                                      FSCK_NO_RATELIMIT,
+                                      btree_node_topology_bad_min_key,
+                                      "btree node with incorrect min_key at btree %s level %u:\n"
+                                      "  prev %s\n"
+                                      "  cur %s",
+                                      bch2_btree_id_str(b->c.btree_id), b->c.level,
+                                      buf1.buf, buf2.buf) && should_restart_for_topology_repair(c)) {
                                bch_info(c, "Halting mark and sweep to start topology repair pass");
                                ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
                                goto err;
@@ -122,14 +122,12 @@ static int bch2_gc_check_topology(struct bch_fs *c,
                bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(cur.k));
                bch2_bpos_to_text(&buf2, node_end);
 
-               if (__fsck_err(c,
-                         FSCK_CAN_FIX|
-                         FSCK_CAN_IGNORE|
-                         FSCK_NO_RATELIMIT,
+               if (__fsck_err(c, FSCK_CAN_FIX|FSCK_CAN_IGNORE|FSCK_NO_RATELIMIT,
+                         btree_node_topology_bad_max_key,
                          "btree node with incorrect max_key at btree %s level %u:\n"
                          "  %s\n"
                          "  expected %s",
-                         bch2_btree_ids[b->c.btree_id], b->c.level,
+                         bch2_btree_id_str(b->c.btree_id), b->c.level,
                          buf1.buf, buf2.buf) &&
                    should_restart_for_topology_repair(c)) {
                        bch_info(c, "Halting mark and sweep to start topology repair pass");
@@ -287,10 +285,11 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
 
                if (mustfix_fsck_err_on(bpos_ge(prev->data->min_key,
                                                cur->data->min_key), c,
+                               btree_node_topology_overwritten_by_next_node,
                                "btree node overwritten by next node at btree %s level %u:\n"
                                "  node %s\n"
                                "  next %s",
-                               bch2_btree_ids[b->c.btree_id], b->c.level,
+                               bch2_btree_id_str(b->c.btree_id), b->c.level,
                                buf1.buf, buf2.buf)) {
                        ret = DROP_PREV_NODE;
                        goto out;
@@ -298,10 +297,11 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
 
                if (mustfix_fsck_err_on(!bpos_eq(prev->key.k.p,
                                                 bpos_predecessor(cur->data->min_key)), c,
+                               btree_node_topology_bad_max_key,
                                "btree node with incorrect max_key at btree %s level %u:\n"
                                "  node %s\n"
                                "  next %s",
-                               bch2_btree_ids[b->c.btree_id], b->c.level,
+                               bch2_btree_id_str(b->c.btree_id), b->c.level,
                                buf1.buf, buf2.buf))
                        ret = set_node_max(c, prev,
                                           bpos_predecessor(cur->data->min_key));
@@ -310,20 +310,22 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
 
                if (mustfix_fsck_err_on(bpos_ge(expected_start,
                                                cur->data->max_key), c,
+                               btree_node_topology_overwritten_by_prev_node,
                                "btree node overwritten by prev node at btree %s level %u:\n"
                                "  prev %s\n"
                                "  node %s",
-                               bch2_btree_ids[b->c.btree_id], b->c.level,
+                               bch2_btree_id_str(b->c.btree_id), b->c.level,
                                buf1.buf, buf2.buf)) {
                        ret = DROP_THIS_NODE;
                        goto out;
                }
 
                if (mustfix_fsck_err_on(!bpos_eq(expected_start, cur->data->min_key), c,
+                               btree_node_topology_bad_min_key,
                                "btree node with incorrect min_key at btree %s level %u:\n"
                                "  prev %s\n"
                                "  node %s",
-                               bch2_btree_ids[b->c.btree_id], b->c.level,
+                               bch2_btree_id_str(b->c.btree_id), b->c.level,
                                buf1.buf, buf2.buf))
                        ret = set_node_min(c, cur, expected_start);
        }
@@ -344,10 +346,11 @@ static int btree_repair_node_end(struct bch_fs *c, struct btree *b,
        bch2_bpos_to_text(&buf2, b->key.k.p);
 
        if (mustfix_fsck_err_on(!bpos_eq(child->key.k.p, b->key.k.p), c,
+                               btree_node_topology_bad_max_key,
                        "btree node with incorrect max_key at btree %s level %u:\n"
                        "  %s\n"
                        "  expected %s",
-                       bch2_btree_ids[b->c.btree_id], b->c.level,
+                       bch2_btree_id_str(b->c.btree_id), b->c.level,
                        buf1.buf, buf2.buf)) {
                ret = set_node_max(c, child, b->key.k.p);
                if (ret)
@@ -396,9 +399,10 @@ again:
                bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur_k.k));
 
                if (mustfix_fsck_err_on(ret == -EIO, c,
+                               btree_node_unreadable,
                                "Topology repair: unreadable btree node at btree %s level %u:\n"
                                "  %s",
-                               bch2_btree_ids[b->c.btree_id],
+                               bch2_btree_id_str(b->c.btree_id),
                                b->c.level - 1,
                                buf.buf)) {
                        bch2_btree_node_evict(trans, cur_k.k);
@@ -504,9 +508,10 @@ again:
        bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
 
        if (mustfix_fsck_err_on(!have_child, c,
+                       btree_node_topology_interior_node_empty,
                        "empty interior btree node at btree %s level %u\n"
                        "  %s",
-                       bch2_btree_ids[b->c.btree_id],
+                       bch2_btree_id_str(b->c.btree_id),
                        b->c.level, buf.buf))
                ret = DROP_THIS_NODE;
 err:
@@ -582,7 +587,8 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
 
                if (!g->gen_valid &&
                    (c->opts.reconstruct_alloc ||
-                    fsck_err(c, "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
+                    fsck_err(c, ptr_to_missing_alloc_key,
+                             "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
                              "while marking %s",
                              p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
                              bch2_data_types[ptr_data_type(k->k, &p.ptr)],
@@ -599,7 +605,8 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
 
                if (gen_cmp(p.ptr.gen, g->gen) > 0 &&
                    (c->opts.reconstruct_alloc ||
-                    fsck_err(c, "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
+                    fsck_err(c, ptr_gen_newer_than_bucket_gen,
+                             "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
                              "while marking %s",
                              p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
                              bch2_data_types[ptr_data_type(k->k, &p.ptr)],
@@ -620,7 +627,8 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
 
                if (gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX &&
                    (c->opts.reconstruct_alloc ||
-                    fsck_err(c, "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
+                    fsck_err(c, ptr_gen_newer_than_bucket_gen,
+                             "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
                              "while marking %s",
                              p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
                              bch2_data_types[ptr_data_type(k->k, &p.ptr)],
@@ -631,7 +639,8 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
 
                if (!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0 &&
                    (c->opts.reconstruct_alloc ||
-                    fsck_err(c, "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
+                    fsck_err(c, stale_dirty_ptr,
+                             "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
                              "while marking %s",
                              p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
                              bch2_data_types[ptr_data_type(k->k, &p.ptr)],
@@ -645,6 +654,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
 
                if (fsck_err_on(bucket_data_type(g->data_type) &&
                                bucket_data_type(g->data_type) != data_type, c,
+                               ptr_bucket_data_type_mismatch,
                                "bucket %u:%zu different types of data in same bucket: %s, %s\n"
                                "while marking %s",
                                p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
@@ -664,6 +674,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
                        struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx);
 
                        if (fsck_err_on(!m || !m->alive, c,
+                                       ptr_to_missing_stripe,
                                        "pointer to nonexistent stripe %llu\n"
                                        "while marking %s",
                                        (u64) p.ec.idx,
@@ -672,6 +683,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
                                do_update = true;
 
                        if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p), c,
+                                       ptr_to_incorrect_stripe,
                                        "pointer does not match stripe %llu\n"
                                        "while marking %s",
                                        (u64) p.ec.idx,
@@ -811,6 +823,7 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
                        goto err;
 
                if (fsck_err_on(k->k->version.lo > atomic64_read(&c->key_version), c,
+                               bkey_version_in_future,
                                "key version number higher than recorded: %llu > %llu",
                                k->k->version.lo,
                                atomic64_read(&c->key_version)))
@@ -968,9 +981,10 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b
                                          FSCK_CAN_FIX|
                                          FSCK_CAN_IGNORE|
                                          FSCK_NO_RATELIMIT,
+                                         btree_node_read_error,
                                          "Unreadable btree node at btree %s level %u:\n"
                                          "  %s",
-                                         bch2_btree_ids[b->c.btree_id],
+                                         bch2_btree_id_str(b->c.btree_id),
                                          b->c.level - 1,
                                          (printbuf_reset(&buf),
                                           bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur.k)), buf.buf)) &&
@@ -1025,6 +1039,7 @@ static int bch2_gc_btree_init(struct btree_trans *trans,
        printbuf_reset(&buf);
        bch2_bpos_to_text(&buf, b->data->min_key);
        if (mustfix_fsck_err_on(!bpos_eq(b->data->min_key, POS_MIN), c,
+                               btree_root_bad_min_key,
                        "btree root with incorrect min_key: %s", buf.buf)) {
                bch_err(c, "repair unimplemented");
                ret = -BCH_ERR_fsck_repair_unimplemented;
@@ -1034,6 +1049,7 @@ static int bch2_gc_btree_init(struct btree_trans *trans,
        printbuf_reset(&buf);
        bch2_bpos_to_text(&buf, b->data->max_key);
        if (mustfix_fsck_err_on(!bpos_eq(b->data->max_key, SPOS_MAX), c,
+                               btree_root_bad_max_key,
                        "btree root with incorrect max_key: %s", buf.buf)) {
                bch_err(c, "repair unimplemented");
                ret = -BCH_ERR_fsck_repair_unimplemented;
@@ -1207,16 +1223,16 @@ static int bch2_gc_done(struct bch_fs *c,
 
        percpu_down_write(&c->mark_lock);
 
-#define copy_field(_f, _msg, ...)                                      \
+#define copy_field(_err, _f, _msg, ...)                                        \
        if (dst->_f != src->_f &&                                       \
            (!verify ||                                                 \
-            fsck_err(c, _msg ": got %llu, should be %llu"              \
+            fsck_err(c, _err, _msg ": got %llu, should be %llu"        \
                      , ##__VA_ARGS__, dst->_f, src->_f)))              \
                dst->_f = src->_f
-#define copy_dev_field(_f, _msg, ...)                                  \
-       copy_field(_f, "dev %u has wrong " _msg, dev, ##__VA_ARGS__)
-#define copy_fs_field(_f, _msg, ...)                                   \
-       copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__)
+#define copy_dev_field(_err, _f, _msg, ...)                            \
+       copy_field(_err, _f, "dev %u has wrong " _msg, dev, ##__VA_ARGS__)
+#define copy_fs_field(_err, _f, _msg, ...)                             \
+       copy_field(_err, _f, "fs has wrong " _msg, ##__VA_ARGS__)
 
        for (i = 0; i < ARRAY_SIZE(c->usage); i++)
                bch2_fs_usage_acc_to_base(c, i);
@@ -1227,13 +1243,17 @@ static int bch2_gc_done(struct bch_fs *c,
                        bch2_acc_percpu_u64s((u64 __percpu *) ca->usage_gc,
                                             dev_usage_u64s());
 
-               copy_dev_field(buckets_ec,              "buckets_ec");
-
                for (i = 0; i < BCH_DATA_NR; i++) {
-                       copy_dev_field(d[i].buckets,    "%s buckets", bch2_data_types[i]);
-                       copy_dev_field(d[i].sectors,    "%s sectors", bch2_data_types[i]);
-                       copy_dev_field(d[i].fragmented, "%s fragmented", bch2_data_types[i]);
+                       copy_dev_field(dev_usage_buckets_wrong,
+                                      d[i].buckets,    "%s buckets", bch2_data_types[i]);
+                       copy_dev_field(dev_usage_sectors_wrong,
+                                      d[i].sectors,    "%s sectors", bch2_data_types[i]);
+                       copy_dev_field(dev_usage_fragmented_wrong,
+                                      d[i].fragmented, "%s fragmented", bch2_data_types[i]);
                }
+
+               copy_dev_field(dev_usage_buckets_ec_wrong,
+                              buckets_ec,              "buckets_ec");
        }
 
        {
@@ -1242,17 +1262,24 @@ static int bch2_gc_done(struct bch_fs *c,
                struct bch_fs_usage *src = (void *)
                        bch2_acc_percpu_u64s((u64 __percpu *) c->usage_gc, nr);
 
-               copy_fs_field(hidden,           "hidden");
-               copy_fs_field(btree,            "btree");
+               copy_fs_field(fs_usage_hidden_wrong,
+                             hidden,           "hidden");
+               copy_fs_field(fs_usage_btree_wrong,
+                             btree,            "btree");
 
                if (!metadata_only) {
-                       copy_fs_field(data,     "data");
-                       copy_fs_field(cached,   "cached");
-                       copy_fs_field(reserved, "reserved");
-                       copy_fs_field(nr_inodes,"nr_inodes");
+                       copy_fs_field(fs_usage_data_wrong,
+                                     data,     "data");
+                       copy_fs_field(fs_usage_cached_wrong,
+                                     cached,   "cached");
+                       copy_fs_field(fs_usage_reserved_wrong,
+                                     reserved, "reserved");
+                       copy_fs_field(fs_usage_nr_inodes_wrong,
+                                     nr_inodes,"nr_inodes");
 
                        for (i = 0; i < BCH_REPLICAS_MAX; i++)
-                               copy_fs_field(persistent_reserved[i],
+                               copy_fs_field(fs_usage_persistent_reserved_wrong,
+                                             persistent_reserved[i],
                                              "persistent_reserved[%i]", i);
                }
 
@@ -1268,7 +1295,8 @@ static int bch2_gc_done(struct bch_fs *c,
                        printbuf_reset(&buf);
                        bch2_replicas_entry_to_text(&buf, e);
 
-                       copy_fs_field(replicas[i], "%s", buf.buf);
+                       copy_fs_field(fs_usage_replicas_wrong,
+                                     replicas[i], "%s", buf.buf);
                }
        }
 
@@ -1404,6 +1432,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
 
        if (c->opts.reconstruct_alloc ||
            fsck_err_on(new.data_type != gc.data_type, c,
+                       alloc_key_data_type_wrong,
                        "bucket %llu:%llu gen %u has wrong data_type"
                        ": got %s, should be %s",
                        iter->pos.inode, iter->pos.offset,
@@ -1412,9 +1441,9 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
                        bch2_data_types[gc.data_type]))
                new.data_type = gc.data_type;
 
-#define copy_bucket_field(_f)                                          \
+#define copy_bucket_field(_errtype, _f)                                        \
        if (c->opts.reconstruct_alloc ||                                \
-           fsck_err_on(new._f != gc._f, c,                             \
+           fsck_err_on(new._f != gc._f, c, _errtype,                   \
                        "bucket %llu:%llu gen %u data type %s has wrong " #_f   \
                        ": got %u, should be %u",                       \
                        iter->pos.inode, iter->pos.offset,              \
@@ -1423,11 +1452,16 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
                        new._f, gc._f))                                 \
                new._f = gc._f;                                         \
 
-       copy_bucket_field(gen);
-       copy_bucket_field(dirty_sectors);
-       copy_bucket_field(cached_sectors);
-       copy_bucket_field(stripe_redundancy);
-       copy_bucket_field(stripe);
+       copy_bucket_field(alloc_key_gen_wrong,
+                         gen);
+       copy_bucket_field(alloc_key_dirty_sectors_wrong,
+                         dirty_sectors);
+       copy_bucket_field(alloc_key_cached_sectors_wrong,
+                         cached_sectors);
+       copy_bucket_field(alloc_key_stripe_wrong,
+                         stripe);
+       copy_bucket_field(alloc_key_stripe_redundancy_wrong,
+                         stripe_redundancy);
 #undef copy_bucket_field
 
        if (!bch2_alloc_v4_cmp(*old, new))
@@ -1584,6 +1618,7 @@ static int bch2_gc_write_reflink_key(struct btree_trans *trans,
        }
 
        if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), c,
+                       reflink_v_refcount_wrong,
                        "reflink key has wrong refcount:\n"
                        "  %s\n"
                        "  should be %u",
@@ -1709,7 +1744,8 @@ static int bch2_gc_write_stripes_key(struct btree_trans *trans,
        if (bad)
                bch2_bkey_val_to_text(&buf, c, k);
 
-       if (fsck_err_on(bad, c, "%s", buf.buf)) {
+       if (fsck_err_on(bad, c, stripe_sector_count_wrong,
+                       "%s", buf.buf)) {
                struct bkey_i_stripe *new;
 
                new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
@@ -1954,19 +1990,17 @@ int bch2_gc_gens(struct bch_fs *c)
        trans = bch2_trans_get(c);
 
        for_each_member_device(ca, c, i) {
-               struct bucket_gens *gens;
+               struct bucket_gens *gens = bucket_gens(ca);
 
                BUG_ON(ca->oldest_gen);
 
-               ca->oldest_gen = kvmalloc(ca->mi.nbuckets, GFP_KERNEL);
+               ca->oldest_gen = kvmalloc(gens->nbuckets, GFP_KERNEL);
                if (!ca->oldest_gen) {
                        percpu_ref_put(&ca->ref);
                        ret = -BCH_ERR_ENOMEM_gc_gens;
                        goto err;
                }
 
-               gens = bucket_gens(ca);
-
                for (b = gens->first_bucket;
                     b < gens->nbuckets; b++)
                        ca->oldest_gen[b] = gens->b[b];
index a869cf6ac7c6b94c84cba88bb0a3eeaac0d98d68..37d896edb06e0475cc7146e31a2790321f842394 100644 (file)
@@ -184,7 +184,7 @@ static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
        k = new_whiteouts;
 
        while (ptrs != ptrs_end) {
-               bkey_copy(k, *ptrs);
+               bkey_p_copy(k, *ptrs);
                k = bkey_p_next(k);
                ptrs++;
        }
@@ -260,7 +260,7 @@ static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
                        n = bkey_p_next(k);
 
                        if (!bkey_deleted(k)) {
-                               bkey_copy(out, k);
+                               bkey_p_copy(out, k);
                                out = bkey_p_next(out);
                        } else {
                                BUG_ON(k->needs_whiteout);
@@ -510,16 +510,6 @@ void bch2_btree_init_next(struct btree_trans *trans, struct btree *b)
                bch2_trans_node_reinit_iter(trans, b);
 }
 
-static void btree_pos_to_text(struct printbuf *out, struct bch_fs *c,
-                         struct btree *b)
-{
-       prt_printf(out, "%s level %u/%u\n  ",
-              bch2_btree_ids[b->c.btree_id],
-              b->c.level,
-              bch2_btree_id_root(c, b->c.btree_id)->level);
-       bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
-}
-
 static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
                          struct bch_dev *ca,
                          struct btree *b, struct bset *i,
@@ -532,7 +522,7 @@ static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
        if (ca)
                prt_printf(out, "on %s ", ca->name);
        prt_printf(out, "at btree ");
-       btree_pos_to_text(out, c, b);
+       bch2_btree_pos_to_text(out, c, b);
 
        prt_printf(out, "\n  node offset %u", b->written);
        if (i)
@@ -540,7 +530,7 @@ static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
        prt_str(out, ": ");
 }
 
-__printf(8, 9)
+__printf(9, 10)
 static int __btree_err(int ret,
                       struct bch_fs *c,
                       struct bch_dev *ca,
@@ -548,6 +538,7 @@ static int __btree_err(int ret,
                       struct bset *i,
                       int write,
                       bool have_retry,
+                      enum bch_sb_error_id err_type,
                       const char *fmt, ...)
 {
        struct printbuf out = PRINTBUF;
@@ -572,9 +563,15 @@ static int __btree_err(int ret,
        if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry)
                ret = -BCH_ERR_btree_node_read_err_bad_node;
 
+       if (ret != -BCH_ERR_btree_node_read_err_fixable)
+               bch2_sb_error_count(c, err_type);
+
        switch (ret) {
        case -BCH_ERR_btree_node_read_err_fixable:
-               mustfix_fsck_err(c, "%s", out.buf);
+               ret = bch2_fsck_err(c, FSCK_CAN_FIX, err_type, "%s", out.buf);
+               if (ret != -BCH_ERR_fsck_fix &&
+                   ret != -BCH_ERR_fsck_ignore)
+                       goto fsck_err;
                ret = -BCH_ERR_fsck_fix;
                break;
        case -BCH_ERR_btree_node_read_err_want_retry:
@@ -599,9 +596,11 @@ fsck_err:
        return ret;
 }
 
-#define btree_err(type, c, ca, b, i, msg, ...)                         \
+#define btree_err(type, c, ca, b, i, _err_type, msg, ...)              \
 ({                                                                     \
-       int _ret = __btree_err(type, c, ca, b, i, write, have_retry, msg, ##__VA_ARGS__);\
+       int _ret = __btree_err(type, c, ca, b, i, write, have_retry,    \
+                              BCH_FSCK_ERR_##_err_type,                \
+                              msg, ##__VA_ARGS__);                     \
                                                                        \
        if (_ret != -BCH_ERR_fsck_fix) {                                \
                ret = _ret;                                             \
@@ -676,13 +675,17 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
        int ret = 0;
 
        btree_err_on(!bch2_version_compatible(version),
-                    -BCH_ERR_btree_node_read_err_incompatible, c, ca, b, i,
+                    -BCH_ERR_btree_node_read_err_incompatible,
+                    c, ca, b, i,
+                    btree_node_unsupported_version,
                     "unsupported bset version %u.%u",
                     BCH_VERSION_MAJOR(version),
                     BCH_VERSION_MINOR(version));
 
        if (btree_err_on(version < c->sb.version_min,
-                        -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i,
+                        -BCH_ERR_btree_node_read_err_fixable,
+                        c, NULL, b, i,
+                        btree_node_bset_older_than_sb_min,
                         "bset version %u older than superblock version_min %u",
                         version, c->sb.version_min)) {
                mutex_lock(&c->sb_lock);
@@ -693,7 +696,9 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
 
        if (btree_err_on(BCH_VERSION_MAJOR(version) >
                         BCH_VERSION_MAJOR(c->sb.version),
-                        -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i,
+                        -BCH_ERR_btree_node_read_err_fixable,
+                        c, NULL, b, i,
+                        btree_node_bset_newer_than_sb,
                         "bset version %u newer than superblock version %u",
                         version, c->sb.version)) {
                mutex_lock(&c->sb_lock);
@@ -703,11 +708,15 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
        }
 
        btree_err_on(BSET_SEPARATE_WHITEOUTS(i),
-                    -BCH_ERR_btree_node_read_err_incompatible, c, ca, b, i,
+                    -BCH_ERR_btree_node_read_err_incompatible,
+                    c, ca, b, i,
+                    btree_node_unsupported_version,
                     "BSET_SEPARATE_WHITEOUTS no longer supported");
 
        if (btree_err_on(offset + sectors > btree_sectors(c),
-                        -BCH_ERR_btree_node_read_err_fixable, c, ca, b, i,
+                        -BCH_ERR_btree_node_read_err_fixable,
+                        c, ca, b, i,
+                        bset_past_end_of_btree_node,
                         "bset past end of btree node")) {
                i->u64s = 0;
                ret = 0;
@@ -715,12 +724,15 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
        }
 
        btree_err_on(offset && !i->u64s,
-                    -BCH_ERR_btree_node_read_err_fixable, c, ca, b, i,
+                    -BCH_ERR_btree_node_read_err_fixable,
+                    c, ca, b, i,
+                    bset_empty,
                     "empty bset");
 
-       btree_err_on(BSET_OFFSET(i) &&
-                    BSET_OFFSET(i) != offset,
-                    -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i,
+       btree_err_on(BSET_OFFSET(i) && BSET_OFFSET(i) != offset,
+                    -BCH_ERR_btree_node_read_err_want_retry,
+                    c, ca, b, i,
+                    bset_wrong_sector_offset,
                     "bset at wrong sector offset");
 
        if (!offset) {
@@ -734,16 +746,22 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
 
                        /* XXX endianness */
                        btree_err_on(bp->seq != bn->keys.seq,
-                                    -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
+                                    -BCH_ERR_btree_node_read_err_must_retry,
+                                    c, ca, b, NULL,
+                                    bset_bad_seq,
                                     "incorrect sequence number (wrong btree node)");
                }
 
                btree_err_on(BTREE_NODE_ID(bn) != b->c.btree_id,
-                            -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, i,
+                            -BCH_ERR_btree_node_read_err_must_retry,
+                            c, ca, b, i,
+                            btree_node_bad_btree,
                             "incorrect btree id");
 
                btree_err_on(BTREE_NODE_LEVEL(bn) != b->c.level,
-                            -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, i,
+                            -BCH_ERR_btree_node_read_err_must_retry,
+                            c, ca, b, i,
+                            btree_node_bad_level,
                             "incorrect level");
 
                if (!write)
@@ -760,7 +778,9 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
                        }
 
                        btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
-                                    -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
+                                    -BCH_ERR_btree_node_read_err_must_retry,
+                                    c, ca, b, NULL,
+                                    btree_node_bad_min_key,
                                     "incorrect min_key: got %s should be %s",
                                     (printbuf_reset(&buf1),
                                      bch2_bpos_to_text(&buf1, bn->min_key), buf1.buf),
@@ -769,7 +789,9 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
                }
 
                btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
-                            -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, i,
+                            -BCH_ERR_btree_node_read_err_must_retry,
+                            c, ca, b, i,
+                            btree_node_bad_max_key,
                             "incorrect max key %s",
                             (printbuf_reset(&buf1),
                              bch2_bpos_to_text(&buf1, bn->max_key), buf1.buf));
@@ -779,7 +801,9 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
                                          BSET_BIG_ENDIAN(i), write, bn);
 
                btree_err_on(bch2_bkey_format_invalid(c, &bn->format, write, &buf1),
-                            -BCH_ERR_btree_node_read_err_bad_node, c, ca, b, i,
+                            -BCH_ERR_btree_node_read_err_bad_node,
+                            c, ca, b, i,
+                            btree_node_bad_format,
                             "invalid bkey format: %s\n  %s", buf1.buf,
                             (printbuf_reset(&buf2),
                              bch2_bkey_format_to_text(&buf2, &bn->format), buf2.buf));
@@ -802,7 +826,7 @@ static int bset_key_invalid(struct bch_fs *c, struct btree *b,
                            struct printbuf *err)
 {
        return __bch2_bkey_invalid(c, k, btree_node_type(b), READ, err) ?:
-               (!updated_range ? bch2_bkey_in_btree_node(b, k, err) : 0) ?:
+               (!updated_range ? bch2_bkey_in_btree_node(c, b, k, err) : 0) ?:
                (rw == WRITE ? bch2_bkey_val_invalid(c, k, READ, err) : 0);
 }
 
@@ -823,14 +847,18 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
                struct bkey tmp;
 
                if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
-                                -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i,
+                                -BCH_ERR_btree_node_read_err_fixable,
+                                c, NULL, b, i,
+                                btree_node_bkey_past_bset_end,
                                 "key extends past end of bset")) {
                        i->u64s = cpu_to_le16((u64 *) k - i->_data);
                        break;
                }
 
                if (btree_err_on(k->format > KEY_FORMAT_CURRENT,
-                                -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i,
+                                -BCH_ERR_btree_node_read_err_fixable,
+                                c, NULL, b, i,
+                                btree_node_bkey_bad_format,
                                 "invalid bkey format %u", k->format)) {
                        i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
                        memmove_u64s_down(k, bkey_p_next(k),
@@ -849,12 +877,14 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
                printbuf_reset(&buf);
                if (bset_key_invalid(c, b, u.s_c, updated_range, write, &buf)) {
                        printbuf_reset(&buf);
-                       prt_printf(&buf, "invalid bkey:  ");
                        bset_key_invalid(c, b, u.s_c, updated_range, write, &buf);
                        prt_printf(&buf, "\n  ");
                        bch2_bkey_val_to_text(&buf, c, u.s_c);
 
-                       btree_err(-BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i, "%s", buf.buf);
+                       btree_err(-BCH_ERR_btree_node_read_err_fixable,
+                                 c, NULL, b, i,
+                                 btree_node_bad_bkey,
+                                 "invalid bkey: %s", buf.buf);
 
                        i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
                        memmove_u64s_down(k, bkey_p_next(k),
@@ -878,7 +908,10 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
 
                        bch2_dump_bset(c, b, i, 0);
 
-                       if (btree_err(-BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i, "%s", buf.buf)) {
+                       if (btree_err(-BCH_ERR_btree_node_read_err_fixable,
+                                     c, NULL, b, i,
+                                     btree_node_bkey_out_of_order,
+                                     "%s", buf.buf)) {
                                i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
                                memmove_u64s_down(k, bkey_p_next(k),
                                                  (u64 *) vstruct_end(i) - (u64 *) k);
@@ -919,47 +952,62 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
        sort_iter_init(iter, b, (btree_blocks(c) + 1) * 2);
 
        if (bch2_meta_read_fault("btree"))
-               btree_err(-BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
+               btree_err(-BCH_ERR_btree_node_read_err_must_retry,
+                         c, ca, b, NULL,
+                         btree_node_fault_injected,
                          "dynamic fault");
 
        btree_err_on(le64_to_cpu(b->data->magic) != bset_magic(c),
-                    -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
+                    -BCH_ERR_btree_node_read_err_must_retry,
+                    c, ca, b, NULL,
+                    btree_node_bad_magic,
                     "bad magic: want %llx, got %llx",
                     bset_magic(c), le64_to_cpu(b->data->magic));
 
-       btree_err_on(!b->data->keys.seq,
-                    -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
-                    "bad btree header: seq 0");
-
        if (b->key.k.type == KEY_TYPE_btree_ptr_v2) {
                struct bch_btree_ptr_v2 *bp =
                        &bkey_i_to_btree_ptr_v2(&b->key)->v;
 
                btree_err_on(b->data->keys.seq != bp->seq,
-                            -BCH_ERR_btree_node_read_err_must_retry, c, ca, b, NULL,
+                            -BCH_ERR_btree_node_read_err_must_retry,
+                            c, ca, b, NULL,
+                            btree_node_bad_seq,
                             "got wrong btree node (seq %llx want %llx)",
                             b->data->keys.seq, bp->seq);
+       } else {
+               btree_err_on(!b->data->keys.seq,
+                            -BCH_ERR_btree_node_read_err_must_retry,
+                            c, ca, b, NULL,
+                            btree_node_bad_seq,
+                            "bad btree header: seq 0");
        }
 
        while (b->written < (ptr_written ?: btree_sectors(c))) {
                unsigned sectors;
                struct nonce nonce;
-               struct bch_csum csum;
                bool first = !b->written;
+               bool csum_bad;
 
                if (!b->written) {
                        i = &b->data->keys;
 
                        btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
-                                    -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i,
-                                    "unknown checksum type %llu",
-                                    BSET_CSUM_TYPE(i));
+                                    -BCH_ERR_btree_node_read_err_want_retry,
+                                    c, ca, b, i,
+                                    bset_unknown_csum,
+                                    "unknown checksum type %llu", BSET_CSUM_TYPE(i));
 
                        nonce = btree_nonce(i, b->written << 9);
-                       csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data);
 
-                       btree_err_on(bch2_crc_cmp(csum, b->data->csum),
-                                    -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i,
+                       csum_bad = bch2_crc_cmp(b->data->csum,
+                               csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, b->data));
+                       if (csum_bad)
+                               bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
+
+                       btree_err_on(csum_bad,
+                                    -BCH_ERR_btree_node_read_err_want_retry,
+                                    c, ca, b, i,
+                                    bset_bad_csum,
                                     "invalid checksum");
 
                        ret = bset_encrypt(c, i, b->written << 9);
@@ -969,7 +1017,9 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
 
                        btree_err_on(btree_node_type_is_extents(btree_node_type(b)) &&
                                     !BTREE_NODE_NEW_EXTENT_OVERWRITE(b->data),
-                                    -BCH_ERR_btree_node_read_err_incompatible, c, NULL, b, NULL,
+                                    -BCH_ERR_btree_node_read_err_incompatible,
+                                    c, NULL, b, NULL,
+                                    btree_node_unsupported_version,
                                     "btree node does not have NEW_EXTENT_OVERWRITE set");
 
                        sectors = vstruct_sectors(b->data, c->block_bits);
@@ -981,15 +1031,21 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
                                break;
 
                        btree_err_on(!bch2_checksum_type_valid(c, BSET_CSUM_TYPE(i)),
-                                    -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i,
-                                    "unknown checksum type %llu",
-                                    BSET_CSUM_TYPE(i));
+                                    -BCH_ERR_btree_node_read_err_want_retry,
+                                    c, ca, b, i,
+                                    bset_unknown_csum,
+                                    "unknown checksum type %llu", BSET_CSUM_TYPE(i));
 
                        nonce = btree_nonce(i, b->written << 9);
-                       csum = csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne);
-
-                       btree_err_on(bch2_crc_cmp(csum, bne->csum),
-                                    -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, i,
+                       csum_bad = bch2_crc_cmp(bne->csum,
+                               csum_vstruct(c, BSET_CSUM_TYPE(i), nonce, bne));
+                       if (csum_bad)
+                               bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
+
+                       btree_err_on(csum_bad,
+                                    -BCH_ERR_btree_node_read_err_want_retry,
+                                    c, ca, b, i,
+                                    bset_bad_csum,
                                     "invalid checksum");
 
                        ret = bset_encrypt(c, i, b->written << 9);
@@ -1022,12 +1078,16 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
                                        true);
 
                btree_err_on(blacklisted && first,
-                            -BCH_ERR_btree_node_read_err_fixable, c, ca, b, i,
+                            -BCH_ERR_btree_node_read_err_fixable,
+                            c, ca, b, i,
+                            bset_blacklisted_journal_seq,
                             "first btree node bset has blacklisted journal seq (%llu)",
                             le64_to_cpu(i->journal_seq));
 
                btree_err_on(blacklisted && ptr_written,
-                            -BCH_ERR_btree_node_read_err_fixable, c, ca, b, i,
+                            -BCH_ERR_btree_node_read_err_fixable,
+                            c, ca, b, i,
+                            first_bset_blacklisted_journal_seq,
                             "found blacklisted bset (journal seq %llu) in btree node at offset %u-%u/%u",
                             le64_to_cpu(i->journal_seq),
                             b->written, b->written + sectors, ptr_written);
@@ -1044,7 +1104,9 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
 
        if (ptr_written) {
                btree_err_on(b->written < ptr_written,
-                            -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, NULL,
+                            -BCH_ERR_btree_node_read_err_want_retry,
+                            c, ca, b, NULL,
+                            btree_node_data_missing,
                             "btree node data missing: expected %u sectors, found %u",
                             ptr_written, b->written);
        } else {
@@ -1055,7 +1117,9 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
                                     !bch2_journal_seq_is_blacklisted(c,
                                                                      le64_to_cpu(bne->keys.journal_seq),
                                                                      true),
-                                    -BCH_ERR_btree_node_read_err_want_retry, c, ca, b, NULL,
+                                    -BCH_ERR_btree_node_read_err_want_retry,
+                                    c, ca, b, NULL,
+                                    btree_node_bset_after_end,
                                     "found bset signature after last bset");
        }
 
@@ -1097,7 +1161,10 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
                        prt_printf(&buf, "\n  ");
                        bch2_bkey_val_to_text(&buf, c, u.s_c);
 
-                       btree_err(-BCH_ERR_btree_node_read_err_fixable, c, NULL, b, i, "%s", buf.buf);
+                       btree_err(-BCH_ERR_btree_node_read_err_fixable,
+                                 c, NULL, b, i,
+                                 btree_node_bad_bkey,
+                                 "%s", buf.buf);
 
                        btree_keys_account_key_drop(&b->nr, 0, k);
 
@@ -1177,8 +1244,9 @@ static void btree_node_read_work(struct work_struct *work)
                }
 start:
                printbuf_reset(&buf);
-               btree_pos_to_text(&buf, c, b);
-               bch2_dev_io_err_on(bio->bi_status, ca, "btree read error %s for %s",
+               bch2_btree_pos_to_text(&buf, c, b);
+               bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_read,
+                                  "btree read error %s for %s",
                                   bch2_blk_status_to_str(bio->bi_status), buf.buf);
                if (rb->have_ioref)
                        percpu_ref_put(&ca->io_ref);
@@ -1213,7 +1281,7 @@ start:
                printbuf_reset(&buf);
                bch2_bpos_to_text(&buf, b->key.k.p);
                bch_info(c, "%s: rewriting btree node at btree=%s level=%u %s due to error",
-                        __func__, bch2_btree_ids[b->c.btree_id], b->c.level, buf.buf);
+                        __func__, bch2_btree_id_str(b->c.btree_id), b->c.level, buf.buf);
 
                bch2_btree_node_rewrite_async(c, b);
        }
@@ -1322,14 +1390,20 @@ static void btree_node_read_all_replicas_done(struct closure *cl)
                }
 
                written2 = btree_node_sectors_written(c, ra->buf[i]);
-               if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, NULL,
+               if (btree_err_on(written2 != written, -BCH_ERR_btree_node_read_err_fixable,
+                                c, NULL, b, NULL,
+                                btree_node_replicas_sectors_written_mismatch,
                                 "btree node sectors written mismatch: %u != %u",
                                 written, written2) ||
                    btree_err_on(btree_node_has_extra_bsets(c, written2, ra->buf[i]),
-                                -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, NULL,
+                                -BCH_ERR_btree_node_read_err_fixable,
+                                c, NULL, b, NULL,
+                                btree_node_bset_after_end,
                                 "found bset signature after last bset") ||
                    btree_err_on(memcmp(ra->buf[best], ra->buf[i], written << 9),
-                                -BCH_ERR_btree_node_read_err_fixable, c, NULL, b, NULL,
+                                -BCH_ERR_btree_node_read_err_fixable,
+                                c, NULL, b, NULL,
+                                btree_node_replicas_data_mismatch,
                                 "btree node replicas content mismatch"))
                        dump_bset_maps = true;
 
@@ -1524,7 +1598,7 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
                struct printbuf buf = PRINTBUF;
 
                prt_str(&buf, "btree node read error: no device to read from\n at ");
-               btree_pos_to_text(&buf, c, b);
+               bch2_btree_pos_to_text(&buf, c, b);
                bch_err(c, "%s", buf.buf);
 
                if (c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
@@ -1759,7 +1833,8 @@ static void btree_node_write_endio(struct bio *bio)
        if (wbio->have_ioref)
                bch2_latency_acct(ca, wbio->submit_time, WRITE);
 
-       if (bch2_dev_io_err_on(bio->bi_status, ca, "btree write error: %s",
+       if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
+                              "btree write error: %s",
                               bch2_blk_status_to_str(bio->bi_status)) ||
            bch2_meta_write_fault("btree")) {
                spin_lock_irqsave(&c->btree_write_error_lock, flags);
index 1d79514754d7da80debdbbd39a328fc64c0f1ae2..c2adf3fbb0b3abec5a3521d49663ec77ed32916f 100644 (file)
@@ -257,7 +257,7 @@ static void bch2_btree_iter_verify(struct btree_iter *iter)
 
        BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
               (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
-              !btree_type_has_snapshots(iter->btree_id));
+              !btree_type_has_snapshot_field(iter->btree_id));
 
        if (iter->update_path)
                bch2_btree_path_verify(trans, iter->update_path);
@@ -362,7 +362,7 @@ void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
        bch2_bpos_to_text(&buf, pos);
 
        panic("not locked: %s %s%s\n",
-             bch2_btree_ids[id], buf.buf,
+             bch2_btree_id_str(id), buf.buf,
              key_cache ? " cached" : "");
 }
 
@@ -1109,6 +1109,9 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans,
        if (unlikely(ret))
                goto out;
 
+       if (unlikely(!trans->srcu_held))
+               bch2_trans_srcu_lock(trans);
+
        /*
         * Ensure we obey path->should_be_locked: if it's set, we can't unlock
         * and re-traverse the path without a transaction restart:
@@ -1371,7 +1374,7 @@ void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
                struct bkey_s_c old = { &i->old_k, i->old_v };
 
                prt_printf(buf, "update: btree=%s cached=%u %pS",
-                      bch2_btree_ids[i->btree_id],
+                      bch2_btree_id_str(i->btree_id),
                       i->cached,
                       (void *) i->ip_allocated);
                prt_newline(buf);
@@ -1387,7 +1390,7 @@ void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
 
        trans_for_each_wb_update(trans, wb) {
                prt_printf(buf, "update: btree=%s wb=1 %pS",
-                      bch2_btree_ids[wb->btree],
+                      bch2_btree_id_str(wb->btree),
                       (void *) i->ip_allocated);
                prt_newline(buf);
 
@@ -1416,7 +1419,7 @@ void bch2_btree_path_to_text(struct printbuf *out, struct btree_path *path)
                   path->idx, path->ref, path->intent_ref,
                   path->preserve ? 'P' : ' ',
                   path->should_be_locked ? 'S' : ' ',
-                  bch2_btree_ids[path->btree_id],
+                  bch2_btree_id_str(path->btree_id),
                   path->level);
        bch2_bpos_to_text(out, path->pos);
 
@@ -1523,6 +1526,7 @@ static inline struct btree_path *btree_path_alloc(struct btree_trans *trans,
        path->ref               = 0;
        path->intent_ref        = 0;
        path->nodes_locked      = 0;
+       path->alloc_seq++;
 
        btree_path_list_add(trans, pos, path);
        trans->paths_sorted = false;
@@ -1598,7 +1602,7 @@ struct btree_path *bch2_path_get(struct btree_trans *trans,
 
        locks_want = min(locks_want, BTREE_MAX_DEPTH);
        if (locks_want > path->locks_want)
-               bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want);
+               bch2_btree_path_upgrade_noupgrade_sibs(trans, path, locks_want, NULL);
 
        return path;
 }
@@ -2829,18 +2833,36 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
        return p;
 }
 
-static noinline void bch2_trans_reset_srcu_lock(struct btree_trans *trans)
+static inline void check_srcu_held_too_long(struct btree_trans *trans)
 {
-       struct bch_fs *c = trans->c;
-       struct btree_path *path;
+       WARN(trans->srcu_held && time_after(jiffies, trans->srcu_lock_time + HZ * 10),
+            "btree trans held srcu lock (delaying memory reclaim) for %lu seconds",
+            (jiffies - trans->srcu_lock_time) / HZ);
+}
 
-       trans_for_each_path(trans, path)
-               if (path->cached && !btree_node_locked(path, 0))
-                       path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
+void bch2_trans_srcu_unlock(struct btree_trans *trans)
+{
+       if (trans->srcu_held) {
+               struct bch_fs *c = trans->c;
+               struct btree_path *path;
 
-       srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
-       trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
-       trans->srcu_lock_time   = jiffies;
+               trans_for_each_path(trans, path)
+                       if (path->cached && !btree_node_locked(path, 0))
+                               path->l[0].b = ERR_PTR(-BCH_ERR_no_btree_node_srcu_reset);
+
+               check_srcu_held_too_long(trans);
+               srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
+               trans->srcu_held = false;
+       }
+}
+
+void bch2_trans_srcu_lock(struct btree_trans *trans)
+{
+       if (!trans->srcu_held) {
+               trans->srcu_idx = srcu_read_lock(&trans->c->btree_trans_barrier);
+               trans->srcu_lock_time   = jiffies;
+               trans->srcu_held = true;
+       }
 }
 
 /**
@@ -2894,8 +2916,9 @@ u32 bch2_trans_begin(struct btree_trans *trans)
        }
        trans->last_begin_time = now;
 
-       if (unlikely(time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
-               bch2_trans_reset_srcu_lock(trans);
+       if (unlikely(trans->srcu_held &&
+                    time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
+               bch2_trans_srcu_unlock(trans);
 
        trans->last_begin_ip = _RET_IP_;
        if (trans->restarted) {
@@ -2980,8 +3003,9 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
                trans->wb_updates_size = s->wb_updates_size;
        }
 
-       trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
+       trans->srcu_idx         = srcu_read_lock(&c->btree_trans_barrier);
        trans->srcu_lock_time   = jiffies;
+       trans->srcu_held        = true;
 
        if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
                struct btree_trans *pos;
@@ -3025,7 +3049,7 @@ leaked:
        trans_for_each_path(trans, path)
                if (path->ref)
                        printk(KERN_ERR "  btree %s %pS\n",
-                              bch2_btree_ids[path->btree_id],
+                              bch2_btree_id_str(path->btree_id),
                               (void *) path->ip_allocated);
        /* Be noisy about this: */
        bch2_fatal_error(c);
@@ -3058,7 +3082,10 @@ void bch2_trans_put(struct btree_trans *trans)
 
        check_btree_paths_leaked(trans);
 
-       srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
+       if (trans->srcu_held) {
+               check_srcu_held_too_long(trans);
+               srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
+       }
 
        bch2_journal_preres_put(&c->journal, &trans->journal_preres);
 
@@ -3100,7 +3127,7 @@ bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
 
        prt_tab(out);
        prt_printf(out, "%px %c l=%u %s:", b, b->cached ? 'c' : 'b',
-                  b->level, bch2_btree_ids[b->btree_id]);
+                  b->level, bch2_btree_id_str(b->btree_id));
        bch2_bpos_to_text(out, btree_node_pos(b));
 
        prt_tab(out);
@@ -3130,7 +3157,7 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
                       path->idx,
                       path->cached ? 'c' : 'b',
                       path->level,
-                      bch2_btree_ids[path->btree_id]);
+                      bch2_btree_id_str(path->btree_id));
                bch2_bpos_to_text(out, path->pos);
                prt_newline(out);
 
index fbe273453db36d2fa926e6c61c977926818eabbb..85e7cb52f6b6c41b95c7dc3496a9340c9f2b38d0 100644 (file)
@@ -274,6 +274,7 @@ void bch2_path_put(struct btree_trans *, struct btree_path *, bool);
 int bch2_trans_relock(struct btree_trans *);
 int bch2_trans_relock_notrace(struct btree_trans *);
 void bch2_trans_unlock(struct btree_trans *);
+void bch2_trans_unlock_long(struct btree_trans *);
 bool bch2_trans_locked(struct btree_trans *);
 
 static inline int trans_was_restarted(struct btree_trans *trans, u32 restart_count)
@@ -411,11 +412,11 @@ static inline unsigned __bch2_btree_iter_flags(struct btree_trans *trans,
                flags |= BTREE_ITER_ALL_SNAPSHOTS|__BTREE_ITER_ALL_SNAPSHOTS;
 
        if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
-           btree_node_type_is_extents(btree_id))
+           btree_id_is_extents(btree_id))
                flags |= BTREE_ITER_IS_EXTENTS;
 
        if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
-           !btree_type_has_snapshots(btree_id))
+           !btree_type_has_snapshot_field(btree_id))
                flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
 
        if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
@@ -579,6 +580,9 @@ static inline int __bch2_bkey_get_val_typed(struct btree_trans *trans,
        __bch2_bkey_get_val_typed(_trans, _btree_id, _pos, _flags,      \
                                  KEY_TYPE_##_type, sizeof(*_val), _val)
 
+void bch2_trans_srcu_unlock(struct btree_trans *);
+void bch2_trans_srcu_lock(struct btree_trans *);
+
 u32 bch2_trans_begin(struct btree_trans *);
 
 /*
index f9a5e38a085bbfb280fbe439ca2a6b1f0ba2f1af..9b78f78a75b59c0cb28dac46fc4e107f0a9cbca1 100644 (file)
@@ -324,7 +324,7 @@ btree_key_cache_create(struct btree_trans *trans, struct btree_path *path)
                ck = bkey_cached_reuse(bc);
                if (unlikely(!ck)) {
                        bch_err(c, "error allocating memory for key cache item, btree %s",
-                               bch2_btree_ids[path->btree_id]);
+                               bch2_btree_id_str(path->btree_id));
                        return ERR_PTR(-BCH_ERR_ENOMEM_btree_key_cache_create);
                }
 
@@ -407,7 +407,7 @@ static int btree_key_cache_fill(struct btree_trans *trans,
                        new_k = kmalloc(new_u64s * sizeof(u64), GFP_KERNEL);
                        if (!new_k) {
                                bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u",
-                                       bch2_btree_ids[ck->key.btree_id], new_u64s);
+                                       bch2_btree_id_str(ck->key.btree_id), new_u64s);
                                ret = -BCH_ERR_ENOMEM_btree_key_cache_fill;
                                goto err;
                        }
@@ -509,7 +509,7 @@ fill:
                 * path->uptodate yet:
                 */
                if (!path->locks_want &&
-                   !__bch2_btree_path_upgrade(trans, path, 1)) {
+                   !__bch2_btree_path_upgrade(trans, path, 1, NULL)) {
                        trace_and_count(trans->c, trans_restart_key_cache_upgrade, trans, _THIS_IP_);
                        ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_upgrade);
                        goto err;
@@ -1038,7 +1038,7 @@ int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
 
        bc->table_init_done = true;
 
-       shrink = shrinker_alloc(0, "%s/btree_key_cache", c->name);
+       shrink = shrinker_alloc(0, "%s-btree_key_cache", c->name);
        if (!shrink)
                return -BCH_ERR_ENOMEM_fs_btree_cache_init;
        bc->shrink = shrink;
index 40c8ed8f7bf187ddd83974bce2d32236b5942973..3d48834d091fbda928e9e462b6061ca03f847bb1 100644 (file)
@@ -431,7 +431,8 @@ void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
 
 static inline bool btree_path_get_locks(struct btree_trans *trans,
                                        struct btree_path *path,
-                                       bool upgrade)
+                                       bool upgrade,
+                                       struct get_locks_fail *f)
 {
        unsigned l = path->level;
        int fail_idx = -1;
@@ -442,8 +443,14 @@ static inline bool btree_path_get_locks(struct btree_trans *trans,
 
                if (!(upgrade
                      ? bch2_btree_node_upgrade(trans, path, l)
-                     : bch2_btree_node_relock(trans, path, l)))
-                       fail_idx = l;
+                     : bch2_btree_node_relock(trans, path, l))) {
+                       fail_idx        = l;
+
+                       if (f) {
+                               f->l    = l;
+                               f->b    = path->l[l].b;
+                       }
+               }
 
                l++;
        } while (l < path->locks_want);
@@ -584,7 +591,9 @@ __flatten
 bool bch2_btree_path_relock_norestart(struct btree_trans *trans,
                        struct btree_path *path, unsigned long trace_ip)
 {
-       return btree_path_get_locks(trans, path, false);
+       struct get_locks_fail f;
+
+       return btree_path_get_locks(trans, path, false, &f);
 }
 
 int __bch2_btree_path_relock(struct btree_trans *trans,
@@ -600,22 +609,24 @@ int __bch2_btree_path_relock(struct btree_trans *trans,
 
 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
                               struct btree_path *path,
-                              unsigned new_locks_want)
+                              unsigned new_locks_want,
+                              struct get_locks_fail *f)
 {
        EBUG_ON(path->locks_want >= new_locks_want);
 
        path->locks_want = new_locks_want;
 
-       return btree_path_get_locks(trans, path, true);
+       return btree_path_get_locks(trans, path, true, f);
 }
 
 bool __bch2_btree_path_upgrade(struct btree_trans *trans,
                               struct btree_path *path,
-                              unsigned new_locks_want)
+                              unsigned new_locks_want,
+                              struct get_locks_fail *f)
 {
        struct btree_path *linked;
 
-       if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want))
+       if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f))
                return true;
 
        /*
@@ -644,7 +655,7 @@ bool __bch2_btree_path_upgrade(struct btree_trans *trans,
                            linked->btree_id == path->btree_id &&
                            linked->locks_want < new_locks_want) {
                                linked->locks_want = new_locks_want;
-                               btree_path_get_locks(trans, linked, true);
+                               btree_path_get_locks(trans, linked, true, NULL);
                        }
 
        return false;
@@ -656,6 +667,9 @@ void __bch2_btree_path_downgrade(struct btree_trans *trans,
 {
        unsigned l;
 
+       if (trans->restarted)
+               return;
+
        EBUG_ON(path->locks_want < new_locks_want);
 
        path->locks_want = new_locks_want;
@@ -674,6 +688,9 @@ void __bch2_btree_path_downgrade(struct btree_trans *trans,
        }
 
        bch2_btree_path_verify_locks(path);
+
+       path->downgrade_seq++;
+       trace_path_downgrade(trans, _RET_IP_, path);
 }
 
 /* Btree transaction locking: */
@@ -682,6 +699,9 @@ void bch2_trans_downgrade(struct btree_trans *trans)
 {
        struct btree_path *path;
 
+       if (trans->restarted)
+               return;
+
        trans_for_each_path(trans, path)
                bch2_btree_path_downgrade(trans, path);
 }
@@ -733,6 +753,12 @@ void bch2_trans_unlock(struct btree_trans *trans)
                __bch2_btree_path_unlock(trans, path);
 }
 
+void bch2_trans_unlock_long(struct btree_trans *trans)
+{
+       bch2_trans_unlock(trans);
+       bch2_trans_srcu_unlock(trans);
+}
+
 bool bch2_trans_locked(struct btree_trans *trans)
 {
        struct btree_path *path;
index 6231e9ffc5d7497b693febe166e64560a6f024c9..11b0a2c8cd691b21afccdcc38486aa060351f62a 100644 (file)
@@ -355,26 +355,36 @@ static inline bool bch2_btree_node_relock_notrace(struct btree_trans *trans,
 
 /* upgrade */
 
+
+struct get_locks_fail {
+       unsigned        l;
+       struct btree    *b;
+};
+
 bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *,
-                              struct btree_path *, unsigned);
+                              struct btree_path *, unsigned,
+                              struct get_locks_fail *);
+
 bool __bch2_btree_path_upgrade(struct btree_trans *,
-                              struct btree_path *, unsigned);
+                              struct btree_path *, unsigned,
+                              struct get_locks_fail *);
 
 static inline int bch2_btree_path_upgrade(struct btree_trans *trans,
                                          struct btree_path *path,
                                          unsigned new_locks_want)
 {
+       struct get_locks_fail f;
        unsigned old_locks_want = path->locks_want;
 
        new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
 
        if (path->locks_want < new_locks_want
-           ? __bch2_btree_path_upgrade(trans, path, new_locks_want)
+           ? __bch2_btree_path_upgrade(trans, path, new_locks_want, &f)
            : path->uptodate == BTREE_ITER_UPTODATE)
                return 0;
 
        trace_and_count(trans->c, trans_restart_upgrade, trans, _THIS_IP_, path,
-                       old_locks_want, new_locks_want);
+                       old_locks_want, new_locks_want, &f);
        return btree_trans_restart(trans, BCH_ERR_transaction_restart_upgrade);
 }
 
index 04c1f4610972599686c18ada9591a1f83dbd1557..decad7b66c59c114a9315d7acd1ec3bf755a4230 100644 (file)
@@ -269,6 +269,7 @@ static inline void btree_insert_entry_checks(struct btree_trans *trans,
        BUG_ON(i->level         != i->path->level);
        BUG_ON(i->btree_id      != i->path->btree_id);
        EBUG_ON(!i->level &&
+               btree_type_has_snapshots(i->btree_id) &&
                !(i->flags & BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) &&
                test_bit(JOURNAL_REPLAY_DONE, &trans->c->journal.flags) &&
                i->k->k.p.snapshot &&
@@ -349,7 +350,7 @@ static int btree_key_can_insert_cached(struct btree_trans *trans, unsigned flags
        new_k           = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOFS);
        if (!new_k) {
                bch_err(c, "error allocating memory for key cache key, btree %s u64s %u",
-                       bch2_btree_ids[path->btree_id], new_u64s);
+                       bch2_btree_id_str(path->btree_id), new_u64s);
                return -BCH_ERR_ENOMEM_btree_key_cache_insert;
        }
 
@@ -379,11 +380,10 @@ static int run_one_mem_trigger(struct btree_trans *trans,
        if (unlikely(flags & BTREE_TRIGGER_NORUN))
                return 0;
 
-       if (!btree_node_type_needs_gc((enum btree_node_type) i->btree_id))
+       if (!btree_node_type_needs_gc(__btree_node_type(i->level, i->btree_id)))
                return 0;
 
-       if (old_ops->atomic_trigger == new_ops->atomic_trigger &&
-           ((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) {
+       if (old_ops->atomic_trigger == new_ops->atomic_trigger) {
                ret   = bch2_mark_key(trans, i->btree_id, i->level,
                                old, bkey_i_to_s_c(new),
                                BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
@@ -425,8 +425,7 @@ static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_
 
        if (!i->insert_trigger_run &&
            !i->overwrite_trigger_run &&
-           old_ops->trans_trigger == new_ops->trans_trigger &&
-           ((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) {
+           old_ops->trans_trigger == new_ops->trans_trigger) {
                i->overwrite_trigger_run = true;
                i->insert_trigger_run = true;
                return bch2_trans_mark_key(trans, i->btree_id, i->level, old, i->k,
@@ -683,7 +682,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
                                                       BCH_JSET_ENTRY_overwrite,
                                                       i->btree_id, i->level,
                                                       i->old_k.u64s);
-                               bkey_reassemble(&entry->start[0],
+                               bkey_reassemble((struct bkey_i *) entry->start,
                                                (struct bkey_s_c) { &i->old_k, i->old_v });
                        }
 
@@ -691,7 +690,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
                                               BCH_JSET_ENTRY_btree_keys,
                                               i->btree_id, i->level,
                                               i->k->k.u64s);
-                       bkey_copy(&entry->start[0], i->k);
+                       bkey_copy((struct bkey_i *) entry->start, i->k);
                }
 
                trans_for_each_wb_update(trans, wb) {
@@ -699,7 +698,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
                                               BCH_JSET_ENTRY_btree_keys,
                                               wb->btree, 0,
                                               wb->k.k.u64s);
-                       bkey_copy(&entry->start[0], &wb->k);
+                       bkey_copy((struct bkey_i *) entry->start, &wb->k);
                }
 
                if (trans->journal_seq)
@@ -776,12 +775,12 @@ static noinline void bch2_drop_overwrites_from_journal(struct btree_trans *trans
                bch2_journal_key_overwritten(trans->c, wb->btree, 0, wb->k.k.p);
 }
 
-static noinline int bch2_trans_commit_bkey_invalid(struct btree_trans *trans, unsigned flags,
+static noinline int bch2_trans_commit_bkey_invalid(struct btree_trans *trans,
+                                                  enum bkey_invalid_flags flags,
                                                   struct btree_insert_entry *i,
                                                   struct printbuf *err)
 {
        struct bch_fs *c = trans->c;
-       int rw = (flags & BTREE_INSERT_JOURNAL_REPLAY) ? READ : WRITE;
 
        printbuf_reset(err);
        prt_printf(err, "invalid bkey on insert from %s -> %ps",
@@ -792,8 +791,7 @@ static noinline int bch2_trans_commit_bkey_invalid(struct btree_trans *trans, un
        bch2_bkey_val_to_text(err, c, bkey_i_to_s_c(i->k));
        prt_newline(err);
 
-       bch2_bkey_invalid(c, bkey_i_to_s_c(i->k),
-                         i->bkey_type, rw, err);
+       bch2_bkey_invalid(c, bkey_i_to_s_c(i->k), i->bkey_type, flags, err);
        bch2_print_string_as_lines(KERN_ERR, err->buf);
 
        bch2_inconsistent_error(c);
@@ -864,12 +862,7 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags
         */
        bch2_journal_res_put(&c->journal, &trans->journal_res);
 
-       if (unlikely(ret))
-               return ret;
-
-       bch2_trans_downgrade(trans);
-
-       return 0;
+       return ret;
 }
 
 static int journal_reclaim_wait_done(struct bch_fs *c)
@@ -1034,7 +1027,7 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
 
                if (unlikely(bch2_bkey_invalid(c, bkey_i_to_s_c(i->k),
                                               i->bkey_type, invalid_flags, &buf)))
-                       ret = bch2_trans_commit_bkey_invalid(trans, flags, i, &buf);
+                       ret = bch2_trans_commit_bkey_invalid(trans, invalid_flags, i, &buf);
                btree_insert_entry_checks(trans, i);
                printbuf_exit(&buf);
 
@@ -1138,6 +1131,8 @@ out:
        if (likely(!(flags & BTREE_INSERT_NOCHECK_RW)))
                bch2_write_ref_put(c, BCH_WRITE_REF_trans);
 out_reset:
+       if (!ret)
+               bch2_trans_downgrade(trans);
        bch2_trans_reset_updates(trans);
 
        return ret;
index bc6714d88925f3183ed9d817f8a79f106e55da7c..941841a0c5bf68c56370cf2fadf6669351d582d8 100644 (file)
@@ -228,6 +228,8 @@ struct btree_path {
        u8                      sorted_idx;
        u8                      ref;
        u8                      intent_ref;
+       u32                     alloc_seq;
+       u32                     downgrade_seq;
 
        /* btree_iter_copy starts here: */
        struct bpos             pos;
@@ -424,6 +426,7 @@ struct btree_trans {
        u8                      nr_updates;
        u8                      nr_wb_updates;
        u8                      wb_updates_size;
+       bool                    srcu_held:1;
        bool                    used_mempool:1;
        bool                    in_traverse_all:1;
        bool                    paths_sorted:1;
@@ -636,16 +639,17 @@ static inline unsigned bset_byte_offset(struct btree *b, void *i)
 }
 
 enum btree_node_type {
-#define x(kwd, val, ...) BKEY_TYPE_##kwd = val,
+       BKEY_TYPE_btree,
+#define x(kwd, val, ...) BKEY_TYPE_##kwd = val + 1,
        BCH_BTREE_IDS()
 #undef x
-       BKEY_TYPE_btree,
+       BKEY_TYPE_NR
 };
 
 /* Type of a key in btree @id at level @level: */
 static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
 {
-       return level ? BKEY_TYPE_btree : (enum btree_node_type) id;
+       return level ? BKEY_TYPE_btree : (unsigned) id + 1;
 }
 
 /* Type of keys @b contains: */
@@ -654,19 +658,21 @@ static inline enum btree_node_type btree_node_type(struct btree *b)
        return __btree_node_type(b->c.level, b->c.btree_id);
 }
 
+const char *bch2_btree_node_type_str(enum btree_node_type);
+
 #define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS             \
-       (BIT(BKEY_TYPE_extents)|                        \
-        BIT(BKEY_TYPE_alloc)|                          \
-        BIT(BKEY_TYPE_inodes)|                         \
-        BIT(BKEY_TYPE_stripes)|                        \
-        BIT(BKEY_TYPE_reflink)|                        \
-        BIT(BKEY_TYPE_btree))
+       (BIT_ULL(BKEY_TYPE_extents)|                    \
+        BIT_ULL(BKEY_TYPE_alloc)|                      \
+        BIT_ULL(BKEY_TYPE_inodes)|                     \
+        BIT_ULL(BKEY_TYPE_stripes)|                    \
+        BIT_ULL(BKEY_TYPE_reflink)|                    \
+        BIT_ULL(BKEY_TYPE_btree))
 
 #define BTREE_NODE_TYPE_HAS_MEM_TRIGGERS               \
-       (BIT(BKEY_TYPE_alloc)|                          \
-        BIT(BKEY_TYPE_inodes)|                         \
-        BIT(BKEY_TYPE_stripes)|                        \
-        BIT(BKEY_TYPE_snapshots))
+       (BIT_ULL(BKEY_TYPE_alloc)|                      \
+        BIT_ULL(BKEY_TYPE_inodes)|                     \
+        BIT_ULL(BKEY_TYPE_stripes)|                    \
+        BIT_ULL(BKEY_TYPE_snapshots))
 
 #define BTREE_NODE_TYPE_HAS_TRIGGERS                   \
        (BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS|            \
@@ -674,13 +680,13 @@ static inline enum btree_node_type btree_node_type(struct btree *b)
 
 static inline bool btree_node_type_needs_gc(enum btree_node_type type)
 {
-       return BTREE_NODE_TYPE_HAS_TRIGGERS & (1U << type);
+       return BTREE_NODE_TYPE_HAS_TRIGGERS & BIT_ULL(type);
 }
 
 static inline bool btree_node_type_is_extents(enum btree_node_type type)
 {
        const unsigned mask = 0
-#define x(name, nr, flags, ...)        |((!!((flags) & BTREE_ID_EXTENTS)) << nr)
+#define x(name, nr, flags, ...)        |((!!((flags) & BTREE_ID_EXTENTS)) << (nr + 1))
        BCH_BTREE_IDS()
 #undef x
        ;
@@ -690,7 +696,7 @@ static inline bool btree_node_type_is_extents(enum btree_node_type type)
 
 static inline bool btree_id_is_extents(enum btree_id btree)
 {
-       return btree_node_type_is_extents((enum btree_node_type) btree);
+       return btree_node_type_is_extents(__btree_node_type(0, btree));
 }
 
 static inline bool btree_type_has_snapshots(enum btree_id id)
@@ -704,6 +710,17 @@ static inline bool btree_type_has_snapshots(enum btree_id id)
        return (1U << id) & mask;
 }
 
+static inline bool btree_type_has_snapshot_field(enum btree_id id)
+{
+       const unsigned mask = 0
+#define x(name, nr, flags, ...)        |((!!((flags) & (BTREE_ID_SNAPSHOT_FIELD|BTREE_ID_SNAPSHOTS))) << nr)
+       BCH_BTREE_IDS()
+#undef x
+       ;
+
+       return (1U << id) & mask;
+}
+
 static inline bool btree_type_has_ptrs(enum btree_id id)
 {
        const unsigned mask = 0
index 7dbf6b6c7f3481e0d0ab0e15b9b5ec6c501131f0..39c2db68123bd1e7958cb69540721a1548d92516 100644 (file)
@@ -1274,14 +1274,14 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as,
 
        if (bch2_bkey_invalid(c, bkey_i_to_s_c(insert),
                              btree_node_type(b), WRITE, &buf) ?:
-           bch2_bkey_in_btree_node(b, bkey_i_to_s_c(insert), &buf)) {
+           bch2_bkey_in_btree_node(c, b, bkey_i_to_s_c(insert), &buf)) {
                printbuf_reset(&buf);
                prt_printf(&buf, "inserting invalid bkey\n  ");
                bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
                prt_printf(&buf, "\n  ");
                bch2_bkey_invalid(c, bkey_i_to_s_c(insert),
                                  btree_node_type(b), WRITE, &buf);
-               bch2_bkey_in_btree_node(b, bkey_i_to_s_c(insert), &buf);
+               bch2_bkey_in_btree_node(c, b, bkey_i_to_s_c(insert), &buf);
 
                bch2_fs_inconsistent(c, "%s", buf.buf);
                dump_stack();
@@ -1987,7 +1987,7 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
 out:
        if (new_path)
                bch2_path_put(trans, new_path, true);
-       bch2_btree_path_downgrade(trans, iter->path);
+       bch2_trans_downgrade(trans);
        return ret;
 err:
        bch2_btree_node_free_never_used(as, trans, n);
@@ -2411,30 +2411,24 @@ void bch2_journal_entry_to_btree_root(struct bch_fs *c, struct jset_entry *entry
 
        r->level = entry->level;
        r->alive = true;
-       bkey_copy(&r->key, &entry->start[0]);
+       bkey_copy(&r->key, (struct bkey_i *) entry->start);
 
        mutex_unlock(&c->btree_root_lock);
 }
 
 struct jset_entry *
 bch2_btree_roots_to_journal_entries(struct bch_fs *c,
-                                   struct jset_entry *start,
-                                   struct jset_entry *end)
+                                   struct jset_entry *end,
+                                   unsigned long skip)
 {
-       struct jset_entry *entry;
-       unsigned long have = 0;
        unsigned i;
 
-       for (entry = start; entry < end; entry = vstruct_next(entry))
-               if (entry->type == BCH_JSET_ENTRY_btree_root)
-                       __set_bit(entry->btree_id, &have);
-
        mutex_lock(&c->btree_root_lock);
 
        for (i = 0; i < btree_id_nr_alive(c); i++) {
                struct btree_root *r = bch2_btree_id_root(c, i);
 
-               if (r->alive && !test_bit(i, &have)) {
+               if (r->alive && !test_bit(i, &skip)) {
                        journal_entry_set(end, BCH_JSET_ENTRY_btree_root,
                                          i, r->level, &r->key, r->key.k.u64s);
                        end = vstruct_next(end);
index 5e0a467fe9056acf25ce8c3d65a1d5ca6adfc216..4df21512d640dac83c8948137dfa4fd077b2ef39 100644 (file)
@@ -271,7 +271,7 @@ static inline struct btree_node_entry *want_new_bset(struct bch_fs *c,
        struct btree_node_entry *bne = max(write_block(b),
                        (void *) btree_bkey_last(b, bset_tree_last(b)));
        ssize_t remaining_space =
-               __bch_btree_u64s_remaining(c, b, &bne->keys.start[0]);
+               __bch_btree_u64s_remaining(c, b, bne->keys.start);
 
        if (unlikely(bset_written(b, bset(b, t)))) {
                if (remaining_space > (ssize_t) (block_bytes(c) >> 3))
@@ -303,7 +303,7 @@ static inline void push_whiteout(struct bch_fs *c, struct btree *b,
        k.needs_whiteout = true;
 
        b->whiteout_u64s += k.u64s;
-       bkey_copy(unwritten_whiteouts_start(c, b), &k);
+       bkey_p_copy(unwritten_whiteouts_start(c, b), &k);
 }
 
 /*
@@ -325,7 +325,7 @@ bool bch2_btree_interior_updates_flush(struct bch_fs *);
 
 void bch2_journal_entry_to_btree_root(struct bch_fs *, struct jset_entry *);
 struct jset_entry *bch2_btree_roots_to_journal_entries(struct bch_fs *,
-                                       struct jset_entry *, struct jset_entry *);
+                                       struct jset_entry *, unsigned long);
 
 void bch2_do_pending_node_rewrites(struct bch_fs *);
 void bch2_free_pending_node_rewrites(struct bch_fs *);
index a1a4b5feadaa2cf36fc3b0657241efc464b6faab..58d8c6ffd955429d9f13207ddf04c1f687a68b2e 100644 (file)
@@ -370,8 +370,8 @@ static inline int update_replicas(struct bch_fs *c, struct bkey_s_c k,
 
        idx = bch2_replicas_entry_idx(c, r);
        if (idx < 0 &&
-           fsck_err(c, "no replicas entry\n"
-                    "  while marking %s",
+           fsck_err(c, ptr_to_missing_replicas_entry,
+                    "no replicas entry\n  while marking %s",
                     (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
                percpu_up_read(&c->mark_lock);
                ret = bch2_mark_replicas(c, r);
@@ -695,6 +695,7 @@ static int check_bucket_ref(struct btree_trans *trans,
 
        if (gen_after(ptr->gen, b_gen)) {
                bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+                             BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen,
                        "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen\n"
                        "while marking %s",
                        ptr->dev, bucket_nr, b_gen,
@@ -707,6 +708,7 @@ static int check_bucket_ref(struct btree_trans *trans,
 
        if (gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX) {
                bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+                             BCH_FSCK_ERR_ptr_too_stale,
                        "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
                        "while marking %s",
                        ptr->dev, bucket_nr, b_gen,
@@ -720,6 +722,7 @@ static int check_bucket_ref(struct btree_trans *trans,
 
        if (b_gen != ptr->gen && !ptr->cached) {
                bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+                             BCH_FSCK_ERR_stale_dirty_ptr,
                        "bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)\n"
                        "while marking %s",
                        ptr->dev, bucket_nr, b_gen,
@@ -741,6 +744,7 @@ static int check_bucket_ref(struct btree_trans *trans,
            ptr_data_type &&
            bucket_data_type != ptr_data_type) {
                bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+                             BCH_FSCK_ERR_ptr_bucket_data_type_mismatch,
                        "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
                        "while marking %s",
                        ptr->dev, bucket_nr, b_gen,
@@ -754,6 +758,7 @@ static int check_bucket_ref(struct btree_trans *trans,
 
        if ((u64) bucket_sectors + sectors > U32_MAX) {
                bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+                             BCH_FSCK_ERR_bucket_sector_count_overflow,
                        "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX\n"
                        "while marking %s",
                        ptr->dev, bucket_nr, b_gen,
@@ -935,14 +940,12 @@ static int bch2_mark_stripe_ptr(struct btree_trans *trans,
        return 0;
 }
 
-int bch2_mark_extent(struct btree_trans *trans,
-                    enum btree_id btree_id, unsigned level,
-                    struct bkey_s_c old, struct bkey_s_c new,
-                    unsigned flags)
+static int __mark_extent(struct btree_trans *trans,
+                        enum btree_id btree_id, unsigned level,
+                        struct bkey_s_c k, unsigned flags)
 {
        u64 journal_seq = trans->journal_res.seq;
        struct bch_fs *c = trans->c;
-       struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
        struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
        const union bch_extent_entry *entry;
        struct extent_ptr_decoded p;
@@ -1018,6 +1021,14 @@ int bch2_mark_extent(struct btree_trans *trans,
        return 0;
 }
 
+int bch2_mark_extent(struct btree_trans *trans,
+                    enum btree_id btree_id, unsigned level,
+                    struct bkey_s_c old, struct bkey_s_c new,
+                    unsigned flags)
+{
+       return mem_trigger_run_overwrite_then_insert(__mark_extent, trans, btree_id, level, old, new, flags);
+}
+
 int bch2_mark_stripe(struct btree_trans *trans,
                     enum btree_id btree_id, unsigned level,
                     struct bkey_s_c old, struct bkey_s_c new,
@@ -1124,13 +1135,11 @@ int bch2_mark_stripe(struct btree_trans *trans,
        return 0;
 }
 
-int bch2_mark_reservation(struct btree_trans *trans,
-                         enum btree_id btree_id, unsigned level,
-                         struct bkey_s_c old, struct bkey_s_c new,
-                         unsigned flags)
+static int __mark_reservation(struct btree_trans *trans,
+                             enum btree_id btree_id, unsigned level,
+                             struct bkey_s_c k, unsigned flags)
 {
        struct bch_fs *c = trans->c;
-       struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
        struct bch_fs_usage *fs_usage;
        unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
        s64 sectors = (s64) k.k->size;
@@ -1157,6 +1166,14 @@ int bch2_mark_reservation(struct btree_trans *trans,
        return 0;
 }
 
+int bch2_mark_reservation(struct btree_trans *trans,
+                         enum btree_id btree_id, unsigned level,
+                         struct bkey_s_c old, struct bkey_s_c new,
+                         unsigned flags)
+{
+       return mem_trigger_run_overwrite_then_insert(__mark_reservation, trans, btree_id, level, old, new, flags);
+}
+
 static s64 __bch2_mark_reflink_p(struct btree_trans *trans,
                                 struct bkey_s_c_reflink_p p,
                                 u64 start, u64 end,
@@ -1183,7 +1200,8 @@ static s64 __bch2_mark_reflink_p(struct btree_trans *trans,
        *idx = r->offset;
        return 0;
 not_found:
-       if (fsck_err(c, "pointer to missing indirect extent\n"
+       if (fsck_err(c, reflink_p_to_missing_reflink_v,
+                    "pointer to missing indirect extent\n"
                     "  %s\n"
                     "  missing range %llu-%llu",
                     (bch2_bkey_val_to_text(&buf, c, p.s_c), buf.buf),
@@ -1211,13 +1229,11 @@ fsck_err:
        return ret;
 }
 
-int bch2_mark_reflink_p(struct btree_trans *trans,
-                       enum btree_id btree_id, unsigned level,
-                       struct bkey_s_c old, struct bkey_s_c new,
-                       unsigned flags)
+static int __mark_reflink_p(struct btree_trans *trans,
+                           enum btree_id btree_id, unsigned level,
+                           struct bkey_s_c k, unsigned flags)
 {
        struct bch_fs *c = trans->c;
-       struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
        struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
        struct reflink_gc *ref;
        size_t l, r, m;
@@ -1251,6 +1267,14 @@ int bch2_mark_reflink_p(struct btree_trans *trans,
        return ret;
 }
 
+int bch2_mark_reflink_p(struct btree_trans *trans,
+                       enum btree_id btree_id, unsigned level,
+                       struct bkey_s_c old, struct bkey_s_c new,
+                       unsigned flags)
+{
+       return mem_trigger_run_overwrite_then_insert(__mark_reflink_p, trans, btree_id, level, old, new, flags);
+}
+
 void bch2_trans_fs_usage_revert(struct btree_trans *trans,
                                struct replicas_delta_list *deltas)
 {
@@ -1298,7 +1322,7 @@ int bch2_trans_fs_usage_apply(struct btree_trans *trans,
        struct bch_fs *c = trans->c;
        static int warned_disk_usage = 0;
        bool warn = false;
-       unsigned disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
+       u64 disk_res_sectors = trans->disk_res ? trans->disk_res->sectors : 0;
        struct replicas_delta *d, *d2;
        struct replicas_delta *top = (void *) deltas->d + deltas->used;
        struct bch_fs_usage *dst;
@@ -1357,7 +1381,7 @@ int bch2_trans_fs_usage_apply(struct btree_trans *trans,
 
        if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
                bch2_trans_inconsistent(trans,
-                                       "disk usage increased %lli more than %u sectors reserved)",
+                                       "disk usage increased %lli more than %llu sectors reserved)",
                                        should_not_have_added, disk_res_sectors);
        return 0;
 need_mark:
@@ -1452,15 +1476,11 @@ err:
        return ret;
 }
 
-int bch2_trans_mark_extent(struct btree_trans *trans,
-                          enum btree_id btree_id, unsigned level,
-                          struct bkey_s_c old, struct bkey_i *new,
-                          unsigned flags)
+static int __trans_mark_extent(struct btree_trans *trans,
+                              enum btree_id btree_id, unsigned level,
+                              struct bkey_s_c k, unsigned flags)
 {
        struct bch_fs *c = trans->c;
-       struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
-               ? old
-               : bkey_i_to_s_c(new);
        struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
        const union bch_extent_entry *entry;
        struct extent_ptr_decoded p;
@@ -1517,6 +1537,24 @@ int bch2_trans_mark_extent(struct btree_trans *trans,
        return ret;
 }
 
+int bch2_trans_mark_extent(struct btree_trans *trans,
+                          enum btree_id btree_id, unsigned level,
+                          struct bkey_s_c old, struct bkey_i *new,
+                          unsigned flags)
+{
+       struct bch_fs *c = trans->c;
+       int mod = (int) bch2_bkey_needs_rebalance(c, bkey_i_to_s_c(new)) -
+                 (int) bch2_bkey_needs_rebalance(c, old);
+
+       if (mod) {
+               int ret = bch2_btree_bit_mod(trans, BTREE_ID_rebalance_work, new->k.p, mod > 0);
+               if (ret)
+                       return ret;
+       }
+
+       return trigger_run_overwrite_then_insert(__trans_mark_extent, trans, btree_id, level, old, new, flags);
+}
+
 static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
                                         struct bkey_s_c_stripe s,
                                         unsigned idx, bool deleting)
@@ -1670,15 +1708,10 @@ int bch2_trans_mark_stripe(struct btree_trans *trans,
        return ret;
 }
 
-int bch2_trans_mark_reservation(struct btree_trans *trans,
-                               enum btree_id btree_id, unsigned level,
-                               struct bkey_s_c old,
-                               struct bkey_i *new,
-                               unsigned flags)
+static int __trans_mark_reservation(struct btree_trans *trans,
+                                   enum btree_id btree_id, unsigned level,
+                                   struct bkey_s_c k, unsigned flags)
 {
-       struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
-               ? old
-               : bkey_i_to_s_c(new);
        unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
        s64 sectors = (s64) k.k->size;
        struct replicas_delta_list *d;
@@ -1700,7 +1733,16 @@ int bch2_trans_mark_reservation(struct btree_trans *trans,
        return 0;
 }
 
-static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
+int bch2_trans_mark_reservation(struct btree_trans *trans,
+                               enum btree_id btree_id, unsigned level,
+                               struct bkey_s_c old,
+                               struct bkey_i *new,
+                               unsigned flags)
+{
+       return trigger_run_overwrite_then_insert(__trans_mark_reservation, trans, btree_id, level, old, new, flags);
+}
+
+static int trans_mark_reflink_p_segment(struct btree_trans *trans,
                        struct bkey_s_c_reflink_p p,
                        u64 *idx, unsigned flags)
 {
@@ -1767,35 +1809,38 @@ err:
        return ret;
 }
 
-int bch2_trans_mark_reflink_p(struct btree_trans *trans,
-                             enum btree_id btree_id, unsigned level,
-                             struct bkey_s_c old,
-                             struct bkey_i *new,
-                             unsigned flags)
+static int __trans_mark_reflink_p(struct btree_trans *trans,
+                               enum btree_id btree_id, unsigned level,
+                               struct bkey_s_c k, unsigned flags)
 {
-       struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
-               ? old
-               : bkey_i_to_s_c(new);
        struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
        u64 idx, end_idx;
        int ret = 0;
 
-       if (flags & BTREE_TRIGGER_INSERT) {
-               struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
-
-               v->front_pad = v->back_pad = 0;
-       }
-
        idx     = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad);
        end_idx = le64_to_cpu(p.v->idx) + p.k->size +
                le32_to_cpu(p.v->back_pad);
 
        while (idx < end_idx && !ret)
-               ret = __bch2_trans_mark_reflink_p(trans, p, &idx, flags);
-
+               ret = trans_mark_reflink_p_segment(trans, p, &idx, flags);
        return ret;
 }
 
+int bch2_trans_mark_reflink_p(struct btree_trans *trans,
+                             enum btree_id btree_id, unsigned level,
+                             struct bkey_s_c old,
+                             struct bkey_i *new,
+                             unsigned flags)
+{
+       if (flags & BTREE_TRIGGER_INSERT) {
+               struct bch_reflink_p *v = &bkey_i_to_reflink_p(new)->v;
+
+               v->front_pad = v->back_pad = 0;
+       }
+
+       return trigger_run_overwrite_then_insert(__trans_mark_reflink_p, trans, btree_id, level, old, new, flags);
+}
+
 static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
                                    struct bch_dev *ca, size_t b,
                                    enum bch_data_type type,
@@ -1818,6 +1863,7 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
 
        if (a->v.data_type && type && a->v.data_type != type) {
                bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+                             BCH_FSCK_ERR_bucket_metadata_type_mismatch,
                        "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
                        "while marking %s",
                        iter.pos.inode, iter.pos.offset, a->v.gen,
@@ -1825,16 +1871,16 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
                        bch2_data_types[type],
                        bch2_data_types[type]);
                ret = -EIO;
-               goto out;
+               goto err;
        }
 
-       a->v.data_type          = type;
-       a->v.dirty_sectors      = sectors;
-
-       ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
-       if (ret)
-               goto out;
-out:
+       if (a->v.data_type      != type ||
+           a->v.dirty_sectors  != sectors) {
+               a->v.data_type          = type;
+               a->v.dirty_sectors      = sectors;
+               ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
+       }
+err:
        bch2_trans_iter_exit(trans, &iter);
        return ret;
 }
@@ -1929,6 +1975,22 @@ int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
        return ret;
 }
 
+int bch2_trans_mark_dev_sbs(struct bch_fs *c)
+{
+       struct bch_dev *ca;
+       unsigned i;
+
+       for_each_online_member(ca, c, i) {
+               int ret = bch2_trans_mark_dev_sb(c, ca);
+               if (ret) {
+                       percpu_ref_put(&ca->ref);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
 /* Disk reservations: */
 
 #define SECTORS_CACHE  1024
index bf8d7f407e9cf372c2a5cf4b636e7bddf906e913..21f6cb356921f1e3b1f9df59fbdae7309f3931fa 100644 (file)
@@ -339,12 +339,27 @@ int bch2_trans_mark_stripe(struct btree_trans *, enum btree_id, unsigned, struct
 int bch2_trans_mark_reservation(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_i *, unsigned);
 int bch2_trans_mark_reflink_p(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_i *, unsigned);
 
+#define mem_trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)\
+({                                                                                             \
+       int ret = 0;                                                                            \
+                                                                                               \
+       if (_old.k->type)                                                                       \
+               ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_INSERT);     \
+       if (!ret && _new.k->type)                                                               \
+               ret = _fn(_trans, _btree_id, _level, _new, _flags & ~BTREE_TRIGGER_OVERWRITE);  \
+       ret;                                                                                    \
+})
+
+#define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)  \
+       mem_trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, bkey_i_to_s_c(_new), _flags)
+
 void bch2_trans_fs_usage_revert(struct btree_trans *, struct replicas_delta_list *);
 int bch2_trans_fs_usage_apply(struct btree_trans *, struct replicas_delta_list *);
 
 int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *,
                                    size_t, enum bch_data_type, unsigned);
 int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *);
+int bch2_trans_mark_dev_sbs(struct bch_fs *);
 
 static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
 {
index f69e15dc699c9b6b22c07c8a1ce709bf478e57d8..4bb88aefed121f275582df94e3cea9dcdec7c58c 100644 (file)
@@ -332,8 +332,8 @@ static ssize_t bch2_data_job_read(struct file *file, char __user *buf,
        struct bch_ioctl_data_event e = {
                .type                   = BCH_DATA_EVENT_PROGRESS,
                .p.data_type            = ctx->stats.data_type,
-               .p.btree_id             = ctx->stats.btree_id,
-               .p.pos                  = ctx->stats.pos,
+               .p.btree_id             = ctx->stats.pos.btree,
+               .p.pos                  = ctx->stats.pos.pos,
                .p.sectors_done         = atomic64_read(&ctx->stats.sectors_seen),
                .p.sectors_total        = bch2_fs_usage_read_short(c).used,
        };
index 1480b64547b0c961d7a62f06071e8bffcf7e35a8..a8b148ec2a2b6b8ed1f33d10ad195b72afa112e0 100644 (file)
@@ -697,14 +697,32 @@ err:
        return ret;
 }
 
+void bch2_compression_opt_to_text(struct printbuf *out, u64 v)
+{
+       struct bch_compression_opt opt = bch2_compression_decode(v);
+
+       if (opt.type < BCH_COMPRESSION_OPT_NR)
+               prt_str(out, bch2_compression_opts[opt.type]);
+       else
+               prt_printf(out, "(unknown compression opt %u)", opt.type);
+       if (opt.level)
+               prt_printf(out, ":%u", opt.level);
+}
+
 void bch2_opt_compression_to_text(struct printbuf *out,
                                  struct bch_fs *c,
                                  struct bch_sb *sb,
                                  u64 v)
 {
-       struct bch_compression_opt opt = bch2_compression_decode(v);
+       return bch2_compression_opt_to_text(out, v);
+}
 
-       prt_str(out, bch2_compression_opts[opt.type]);
-       if (opt.level)
-               prt_printf(out, ":%u", opt.level);
+int bch2_opt_compression_validate(u64 v, struct printbuf *err)
+{
+       if (!bch2_compression_opt_valid(v)) {
+               prt_printf(err, "invalid compression opt %llu", v);
+               return -BCH_ERR_invalid_sb_opt_compression;
+       }
+
+       return 0;
 }
index 052ea303241fc31407edde0bcc2d3037d7691137..607fd5e232c902dbb39f3dac84ea2e214e6b106c 100644 (file)
@@ -4,12 +4,18 @@
 
 #include "extents_types.h"
 
+static const unsigned __bch2_compression_opt_to_type[] = {
+#define x(t, n) [BCH_COMPRESSION_OPT_##t] = BCH_COMPRESSION_TYPE_##t,
+       BCH_COMPRESSION_OPTS()
+#undef x
+};
+
 struct bch_compression_opt {
        u8              type:4,
                        level:4;
 };
 
-static inline struct bch_compression_opt bch2_compression_decode(unsigned v)
+static inline struct bch_compression_opt __bch2_compression_decode(unsigned v)
 {
        return (struct bch_compression_opt) {
                .type   = v & 15,
@@ -17,17 +23,25 @@ static inline struct bch_compression_opt bch2_compression_decode(unsigned v)
        };
 }
 
+static inline bool bch2_compression_opt_valid(unsigned v)
+{
+       struct bch_compression_opt opt = __bch2_compression_decode(v);
+
+       return opt.type < ARRAY_SIZE(__bch2_compression_opt_to_type) && !(!opt.type && opt.level);
+}
+
+static inline struct bch_compression_opt bch2_compression_decode(unsigned v)
+{
+       return bch2_compression_opt_valid(v)
+               ? __bch2_compression_decode(v)
+               : (struct bch_compression_opt) { 0 };
+}
+
 static inline unsigned bch2_compression_encode(struct bch_compression_opt opt)
 {
        return opt.type|(opt.level << 4);
 }
 
-static const unsigned __bch2_compression_opt_to_type[] = {
-#define x(t, n) [BCH_COMPRESSION_OPT_##t] = BCH_COMPRESSION_TYPE_##t,
-       BCH_COMPRESSION_OPTS()
-#undef x
-};
-
 static inline enum bch_compression_type bch2_compression_opt_to_type(unsigned v)
 {
        return __bch2_compression_opt_to_type[bch2_compression_decode(v).type];
@@ -44,12 +58,16 @@ int bch2_check_set_has_compressed_data(struct bch_fs *, unsigned);
 void bch2_fs_compress_exit(struct bch_fs *);
 int bch2_fs_compress_init(struct bch_fs *);
 
+void bch2_compression_opt_to_text(struct printbuf *, u64);
+
 int bch2_opt_compression_parse(struct bch_fs *, const char *, u64 *, struct printbuf *);
 void bch2_opt_compression_to_text(struct printbuf *, struct bch_fs *, struct bch_sb *, u64);
+int bch2_opt_compression_validate(u64, struct printbuf *);
 
 #define bch2_opt_compression (struct bch_opt_fn) {             \
-       .parse          = bch2_opt_compression_parse,   \
-       .to_text        = bch2_opt_compression_to_text, \
+       .parse          = bch2_opt_compression_parse,           \
+       .to_text        = bch2_opt_compression_to_text,         \
+       .validate       = bch2_opt_compression_validate,        \
 }
 
 #endif /* _BCACHEFS_COMPRESS_H */
index 114f86b45fd52ffc0e3be365f7eb4109e0358fdc..87b4b2d1ec766f65e97c40cab918b256015ae3ed 100644 (file)
@@ -69,9 +69,15 @@ static inline int __darray_make_room(darray_void *d, size_t t_size, size_t more,
        _ret;                                                           \
 })
 
+#define darray_remove_item(_d, _pos)                                   \
+       array_remove_item((_d)->data, (_d)->nr, (_pos) - (_d)->data)
+
 #define darray_for_each(_d, _i)                                                \
        for (_i = (_d).data; _i < (_d).data + (_d).nr; _i++)
 
+#define darray_for_each_reverse(_d, _i)                                        \
+       for (_i = (_d).data + (_d).nr - 1; _i >= (_d).data; --_i)
+
 #define darray_init(_d)                                                        \
 do {                                                                   \
        (_d)->data = NULL;                                              \
index 899ff46de8e062aa4213d4815b8b20789d54bae3..0771a6d880bf5e2e4efcbcc21d91d34b64160dd4 100644 (file)
@@ -13,6 +13,7 @@
 #include "keylist.h"
 #include "move.h"
 #include "nocow_locking.h"
+#include "rebalance.h"
 #include "subvolume.h"
 #include "trace.h"
 
@@ -161,11 +162,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans,
                        if (((1U << i) & m->data_opts.rewrite_ptrs) &&
                            (ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
                            !ptr->cached) {
-                               bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), ptr);
-                               /*
-                                * See comment below:
                                bch2_extent_ptr_set_cached(bkey_i_to_s(insert), ptr);
-                               */
                                rewrites_found |= 1U << i;
                        }
                        i++;
@@ -211,14 +208,8 @@ restart_drop_extra_replicas:
                        if (!p.ptr.cached &&
                            durability - ptr_durability >= m->op.opts.data_replicas) {
                                durability -= ptr_durability;
-                               bch2_bkey_drop_ptr_noerror(bkey_i_to_s(insert), &entry->ptr);
-                               /*
-                                * Currently, we're dropping unneeded replicas
-                                * instead of marking them as cached, since
-                                * cached data in stripe buckets prevents them
-                                * from being reused:
+
                                bch2_extent_ptr_set_cached(bkey_i_to_s(insert), &entry->ptr);
-                               */
                                goto restart_drop_extra_replicas;
                        }
                }
@@ -251,11 +242,11 @@ restart_drop_extra_replicas:
                ret =   bch2_insert_snapshot_whiteouts(trans, m->btree_id,
                                                k.k->p, bkey_start_pos(&insert->k)) ?:
                        bch2_insert_snapshot_whiteouts(trans, m->btree_id,
-                                               k.k->p, insert->k.p);
-               if (ret)
-                       goto err;
-
-               ret   = bch2_trans_update(trans, &iter, insert,
+                                               k.k->p, insert->k.p) ?:
+                       bch2_bkey_set_needs_rebalance(c, insert,
+                                                     op->opts.background_target,
+                                                     op->opts.background_compression) ?:
+                       bch2_trans_update(trans, &iter, insert,
                                BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
                        bch2_trans_commit(trans, &op->res,
                                NULL,
@@ -281,11 +272,11 @@ next:
                }
                continue;
 nowork:
-               if (m->ctxt && m->ctxt->stats) {
+               if (m->stats && m->stats) {
                        BUG_ON(k.k->p.offset <= iter.pos.offset);
-                       atomic64_inc(&m->ctxt->stats->keys_raced);
+                       atomic64_inc(&m->stats->keys_raced);
                        atomic64_add(k.k->p.offset - iter.pos.offset,
-                                    &m->ctxt->stats->sectors_raced);
+                                    &m->stats->sectors_raced);
                }
 
                this_cpu_inc(c->counters[BCH_COUNTER_move_extent_fail]);
@@ -439,6 +430,8 @@ int bch2_data_update_init(struct btree_trans *trans,
        bch2_bkey_buf_reassemble(&m->k, c, k);
        m->btree_id     = btree_id;
        m->data_opts    = data_opts;
+       m->ctxt         = ctxt;
+       m->stats        = ctxt ? ctxt->stats : NULL;
 
        bch2_write_op_init(&m->op, c, io_opts);
        m->op.pos       = bkey_start_pos(k.k);
@@ -487,7 +480,7 @@ int bch2_data_update_init(struct btree_trans *trans,
 
                if (c->opts.nocow_enabled) {
                        if (ctxt) {
-                               move_ctxt_wait_event(ctxt, trans,
+                               move_ctxt_wait_event(ctxt,
                                                (locked = bch2_bucket_nocow_trylock(&c->nocow_locks,
                                                                          PTR_BUCKET_POS(c, &p.ptr), 0)) ||
                                                !atomic_read(&ctxt->read_sectors));
index 7ca1f98d7e9462d3563f3149f96d288300d54388..9dc17b9d83795181798deb5af39401d4d6248581 100644 (file)
@@ -23,6 +23,7 @@ struct data_update {
        struct bkey_buf         k;
        struct data_update_opts data_opts;
        struct moving_context   *ctxt;
+       struct bch_move_stats   *stats;
        struct bch_write_op     op;
 };
 
index 75a3dc7cbd470da758750e612b187f16e34e8eb1..57c5128db173f4579168c71b8c67749b1d63004c 100644 (file)
@@ -517,7 +517,7 @@ static void bch2_cached_btree_node_to_text(struct printbuf *out, struct bch_fs *
 
        prt_printf(out, "%px btree=%s l=%u ",
               b,
-              bch2_btree_ids[b->c.btree_id],
+              bch2_btree_id_str(b->c.btree_id),
               b->c.level);
        prt_newline(out);
 
@@ -919,18 +919,18 @@ void bch2_fs_debug_init(struct bch_fs *c)
             bd < c->btree_debug + ARRAY_SIZE(c->btree_debug);
             bd++) {
                bd->id = bd - c->btree_debug;
-               debugfs_create_file(bch2_btree_ids[bd->id],
+               debugfs_create_file(bch2_btree_id_str(bd->id),
                                    0400, c->btree_debug_dir, bd,
                                    &btree_debug_ops);
 
                snprintf(name, sizeof(name), "%s-formats",
-                        bch2_btree_ids[bd->id]);
+                        bch2_btree_id_str(bd->id));
 
                debugfs_create_file(name, 0400, c->btree_debug_dir, bd,
                                    &btree_format_debug_ops);
 
                snprintf(name, sizeof(name), "%s-bfloat-failed",
-                        bch2_btree_ids[bd->id]);
+                        bch2_btree_id_str(bd->id));
 
                debugfs_create_file(name, 0400, c->btree_debug_dir, bd,
                                    &bfloat_failed_debug_ops);
index 6c6c8d57d72b43a0acdfe41f04c9e7cd3ee53383..1a0f2d5715692baa2f26a088c61b55742e03fec3 100644 (file)
@@ -97,61 +97,51 @@ const struct bch_hash_desc bch2_dirent_hash_desc = {
        .is_visible     = dirent_is_visible,
 };
 
-int bch2_dirent_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_dirent_invalid(struct bch_fs *c, struct bkey_s_c k,
                        enum bkey_invalid_flags flags,
                        struct printbuf *err)
 {
        struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
        struct qstr d_name = bch2_dirent_get_name(d);
+       int ret = 0;
 
-       if (!d_name.len) {
-               prt_printf(err, "empty name");
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(!d_name.len, c, err,
+                        dirent_empty_name,
+                        "empty name");
 
-       if (bkey_val_u64s(k.k) > dirent_val_u64s(d_name.len)) {
-               prt_printf(err, "value too big (%zu > %u)",
-                      bkey_val_u64s(k.k), dirent_val_u64s(d_name.len));
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(bkey_val_u64s(k.k) > dirent_val_u64s(d_name.len), c, err,
+                        dirent_val_too_big,
+                        "value too big (%zu > %u)",
+                        bkey_val_u64s(k.k), dirent_val_u64s(d_name.len));
 
        /*
         * Check new keys don't exceed the max length
         * (older keys may be larger.)
         */
-       if ((flags & BKEY_INVALID_COMMIT) && d_name.len > BCH_NAME_MAX) {
-               prt_printf(err, "dirent name too big (%u > %u)",
-                      d_name.len, BCH_NAME_MAX);
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       if (d_name.len != strnlen(d_name.name, d_name.len)) {
-               prt_printf(err, "dirent has stray data after name's NUL");
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       if (d_name.len == 1 && !memcmp(d_name.name, ".", 1)) {
-               prt_printf(err, "invalid name");
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       if (d_name.len == 2 && !memcmp(d_name.name, "..", 2)) {
-               prt_printf(err, "invalid name");
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       if (memchr(d_name.name, '/', d_name.len)) {
-               prt_printf(err, "invalid name");
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       if (d.v->d_type != DT_SUBVOL &&
-           le64_to_cpu(d.v->d_inum) == d.k->p.inode) {
-               prt_printf(err, "dirent points to own directory");
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       return 0;
+       bkey_fsck_err_on((flags & BKEY_INVALID_COMMIT) && d_name.len > BCH_NAME_MAX, c, err,
+                        dirent_name_too_long,
+                        "dirent name too big (%u > %u)",
+                        d_name.len, BCH_NAME_MAX);
+
+       bkey_fsck_err_on(d_name.len != strnlen(d_name.name, d_name.len), c, err,
+                        dirent_name_embedded_nul,
+                        "dirent has stray data after name's NUL");
+
+       bkey_fsck_err_on((d_name.len == 1 && !memcmp(d_name.name, ".", 1)) ||
+                        (d_name.len == 2 && !memcmp(d_name.name, "..", 2)), c, err,
+                        dirent_name_dot_or_dotdot,
+                        "invalid name");
+
+       bkey_fsck_err_on(memchr(d_name.name, '/', d_name.len), c, err,
+                        dirent_name_has_slash,
+                        "name with /");
+
+       bkey_fsck_err_on(d.v->d_type != DT_SUBVOL &&
+                        le64_to_cpu(d.v->d_inum) == d.k->p.inode, c, err,
+                        dirent_to_itself,
+                        "dirent points to own directory");
+fsck_err:
+       return ret;
 }
 
 void bch2_dirent_to_text(struct printbuf *out, struct bch_fs *c,
index e9fa1df38232bf34d43afb7a6a671212f74ff5e0..cd262bf4d9c5365747562f22536309dc5853d070 100644 (file)
@@ -7,7 +7,7 @@
 enum bkey_invalid_flags;
 extern const struct bch_hash_desc bch2_dirent_hash_desc;
 
-int bch2_dirent_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_dirent_invalid(struct bch_fs *, struct bkey_s_c,
                        enum bkey_invalid_flags, struct printbuf *);
 void bch2_dirent_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
index e00133b6ea517be6a6544973532e0c299e7371b5..d613695abf9f67c2e9f2ab4ce91d863bdfd743c7 100644 (file)
@@ -175,6 +175,7 @@ int bch2_sb_disk_groups_to_cpu(struct bch_fs *c)
 
                dst->deleted    = BCH_GROUP_DELETED(src);
                dst->parent     = BCH_GROUP_PARENT(src);
+               memcpy(dst->label, src->label, sizeof(dst->label));
        }
 
        for (i = 0; i < c->disk_sb.sb->nr_devices; i++) {
@@ -382,7 +383,57 @@ int bch2_disk_path_find_or_create(struct bch_sb_handle *sb, const char *name)
        return v;
 }
 
-void bch2_disk_path_to_text(struct printbuf *out, struct bch_sb *sb, unsigned v)
+void bch2_disk_path_to_text(struct printbuf *out, struct bch_fs *c, unsigned v)
+{
+       struct bch_disk_groups_cpu *groups;
+       struct bch_disk_group_cpu *g;
+       unsigned nr = 0;
+       u16 path[32];
+
+       out->atomic++;
+       rcu_read_lock();
+       groups = rcu_dereference(c->disk_groups);
+       if (!groups)
+               goto invalid;
+
+       while (1) {
+               if (nr == ARRAY_SIZE(path))
+                       goto invalid;
+
+               if (v >= groups->nr)
+                       goto invalid;
+
+               g = groups->entries + v;
+
+               if (g->deleted)
+                       goto invalid;
+
+               path[nr++] = v;
+
+               if (!g->parent)
+                       break;
+
+               v = g->parent - 1;
+       }
+
+       while (nr) {
+               v = path[--nr];
+               g = groups->entries + v;
+
+               prt_printf(out, "%.*s", (int) sizeof(g->label), g->label);
+               if (nr)
+                       prt_printf(out, ".");
+       }
+out:
+       rcu_read_unlock();
+       out->atomic--;
+       return;
+invalid:
+       prt_printf(out, "invalid label %u", v);
+       goto out;
+}
+
+void bch2_disk_path_to_text_sb(struct printbuf *out, struct bch_sb *sb, unsigned v)
 {
        struct bch_sb_field_disk_groups *groups =
                bch2_sb_field_get(sb, disk_groups);
@@ -493,10 +544,7 @@ int bch2_opt_target_parse(struct bch_fs *c, const char *val, u64 *res,
        return -EINVAL;
 }
 
-void bch2_opt_target_to_text(struct printbuf *out,
-                            struct bch_fs *c,
-                            struct bch_sb *sb,
-                            u64 v)
+void bch2_target_to_text(struct printbuf *out, struct bch_fs *c, unsigned v)
 {
        struct target t = target_decode(v);
 
@@ -504,47 +552,69 @@ void bch2_opt_target_to_text(struct printbuf *out,
        case TARGET_NULL:
                prt_printf(out, "none");
                break;
-       case TARGET_DEV:
-               if (c) {
-                       struct bch_dev *ca;
-
-                       rcu_read_lock();
-                       ca = t.dev < c->sb.nr_devices
-                               ? rcu_dereference(c->devs[t.dev])
-                               : NULL;
-
-                       if (ca && percpu_ref_tryget(&ca->io_ref)) {
-                               prt_printf(out, "/dev/%pg", ca->disk_sb.bdev);
-                               percpu_ref_put(&ca->io_ref);
-                       } else if (ca) {
-                               prt_printf(out, "offline device %u", t.dev);
-                       } else {
-                               prt_printf(out, "invalid device %u", t.dev);
-                       }
-
-                       rcu_read_unlock();
+       case TARGET_DEV: {
+               struct bch_dev *ca;
+
+               rcu_read_lock();
+               ca = t.dev < c->sb.nr_devices
+                       ? rcu_dereference(c->devs[t.dev])
+                       : NULL;
+
+               if (ca && percpu_ref_tryget(&ca->io_ref)) {
+                       prt_printf(out, "/dev/%pg", ca->disk_sb.bdev);
+                       percpu_ref_put(&ca->io_ref);
+               } else if (ca) {
+                       prt_printf(out, "offline device %u", t.dev);
                } else {
-                       struct bch_member m = bch2_sb_member_get(sb, t.dev);
-
-                       if (bch2_dev_exists(sb, t.dev)) {
-                               prt_printf(out, "Device ");
-                               pr_uuid(out, m.uuid.b);
-                               prt_printf(out, " (%u)", t.dev);
-                       } else {
-                               prt_printf(out, "Bad device %u", t.dev);
-                       }
+                       prt_printf(out, "invalid device %u", t.dev);
                }
+
+               rcu_read_unlock();
                break;
+       }
        case TARGET_GROUP:
-               if (c) {
-                       mutex_lock(&c->sb_lock);
-                       bch2_disk_path_to_text(out, c->disk_sb.sb, t.group);
-                       mutex_unlock(&c->sb_lock);
+               bch2_disk_path_to_text(out, c, t.group);
+               break;
+       default:
+               BUG();
+       }
+}
+
+void bch2_target_to_text_sb(struct printbuf *out, struct bch_sb *sb, unsigned v)
+{
+       struct target t = target_decode(v);
+
+       switch (t.type) {
+       case TARGET_NULL:
+               prt_printf(out, "none");
+               break;
+       case TARGET_DEV: {
+               struct bch_member m = bch2_sb_member_get(sb, t.dev);
+
+               if (bch2_dev_exists(sb, t.dev)) {
+                       prt_printf(out, "Device ");
+                       pr_uuid(out, m.uuid.b);
+                       prt_printf(out, " (%u)", t.dev);
                } else {
-                       bch2_disk_path_to_text(out, sb, t.group);
+                       prt_printf(out, "Bad device %u", t.dev);
                }
                break;
+       }
+       case TARGET_GROUP:
+               bch2_disk_path_to_text_sb(out, sb, t.group);
+               break;
        default:
                BUG();
        }
 }
+
+void bch2_opt_target_to_text(struct printbuf *out,
+                            struct bch_fs *c,
+                            struct bch_sb *sb,
+                            u64 v)
+{
+       if (c)
+               bch2_target_to_text(out, c, v);
+       else
+               bch2_target_to_text_sb(out, sb, v);
+}
index bd7711767fd4f95537fb2ed38d615fdf6aeec250..441826fff224369b79698442e6b314cf5331c02c 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef _BCACHEFS_DISK_GROUPS_H
 #define _BCACHEFS_DISK_GROUPS_H
 
+#include "disk_groups_types.h"
+
 extern const struct bch_sb_field_ops bch_sb_field_ops_disk_groups;
 
 static inline unsigned disk_groups_nr(struct bch_sb_field_disk_groups *groups)
@@ -83,7 +85,10 @@ int bch2_disk_path_find(struct bch_sb_handle *, const char *);
 /* Exported for userspace bcachefs-tools: */
 int bch2_disk_path_find_or_create(struct bch_sb_handle *, const char *);
 
-void bch2_disk_path_to_text(struct printbuf *, struct bch_sb *, unsigned);
+void bch2_disk_path_to_text(struct printbuf *, struct bch_fs *, unsigned);
+void bch2_disk_path_to_text_sb(struct printbuf *, struct bch_sb *, unsigned);
+
+void bch2_target_to_text(struct printbuf *out, struct bch_fs *, unsigned);
 
 int bch2_opt_target_parse(struct bch_fs *, const char *, u64 *, struct printbuf *);
 void bch2_opt_target_to_text(struct printbuf *, struct bch_fs *, struct bch_sb *, u64);
diff --git a/fs/bcachefs/disk_groups_types.h b/fs/bcachefs/disk_groups_types.h
new file mode 100644 (file)
index 0000000..a54ef08
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_DISK_GROUPS_TYPES_H
+#define _BCACHEFS_DISK_GROUPS_TYPES_H
+
+struct bch_disk_group_cpu {
+       bool                            deleted;
+       u16                             parent;
+       u8                              label[BCH_SB_LABEL_SIZE];
+       struct bch_devs_mask            devs;
+};
+
+struct bch_disk_groups_cpu {
+       struct rcu_head                 rcu;
+       unsigned                        nr;
+       struct bch_disk_group_cpu       entries[] __counted_by(nr);
+};
+
+#endif /* _BCACHEFS_DISK_GROUPS_TYPES_H */
index 8646856e4539eae46fed2634154ebb340f841a46..875f7c5a6fca63337a6be502daf2dda5a48844ea 100644 (file)
@@ -105,29 +105,26 @@ struct ec_bio {
 
 /* Stripes btree keys: */
 
-int bch2_stripe_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_stripe_invalid(struct bch_fs *c, struct bkey_s_c k,
                        enum bkey_invalid_flags flags,
                        struct printbuf *err)
 {
        const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
+       int ret = 0;
 
-       if (bkey_eq(k.k->p, POS_MIN)) {
-               prt_printf(err, "stripe at POS_MIN");
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       if (k.k->p.inode) {
-               prt_printf(err, "nonzero inode field");
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(bkey_eq(k.k->p, POS_MIN) ||
+                        bpos_gt(k.k->p, POS(0, U32_MAX)), c, err,
+                        stripe_pos_bad,
+                        "stripe at bad pos");
 
-       if (bkey_val_u64s(k.k) < stripe_val_u64s(s)) {
-               prt_printf(err, "incorrect value size (%zu < %u)",
-                      bkey_val_u64s(k.k), stripe_val_u64s(s));
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(bkey_val_u64s(k.k) < stripe_val_u64s(s), c, err,
+                        stripe_val_size_bad,
+                        "incorrect value size (%zu < %u)",
+                        bkey_val_u64s(k.k), stripe_val_u64s(s));
 
-       return bch2_bkey_ptrs_invalid(c, k, flags, err);
+       ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
+fsck_err:
+       return ret;
 }
 
 void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
@@ -153,6 +150,7 @@ void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
                prt_printf(out, " %u:%llu:%u", ptr->dev, b, offset);
                if (i < nr_data)
                        prt_printf(out, "#%u", stripe_blockcount_get(s, i));
+               prt_printf(out, " gen %u", ptr->gen);
                if (ptr_stale(ca, ptr))
                        prt_printf(out, " stale");
        }
@@ -306,16 +304,21 @@ static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
                        struct bch_csum got = ec_block_checksum(buf, i, offset);
 
                        if (bch2_crc_cmp(want, got)) {
-                               struct printbuf buf2 = PRINTBUF;
+                               struct printbuf err = PRINTBUF;
+                               struct bch_dev *ca = bch_dev_bkey_exists(c, v->ptrs[i].dev);
+
+                               prt_printf(&err, "stripe checksum error: expected %0llx:%0llx got %0llx:%0llx (type %s)\n",
+                                          want.hi, want.lo,
+                                          got.hi, got.lo,
+                                          bch2_csum_types[v->csum_type]);
+                               prt_printf(&err, "  for %ps at %u of\n  ", (void *) _RET_IP_, i);
+                               bch2_bkey_val_to_text(&err, c, bkey_i_to_s_c(&buf->key));
+                               bch_err_ratelimited(ca, "%s", err.buf);
+                               printbuf_exit(&err);
 
-                               bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&buf->key));
-
-                               bch_err_ratelimited(c,
-                                       "stripe checksum error for %ps at %u:%u: csum type %u, expected %llx got %llx\n%s",
-                                       (void *) _RET_IP_, i, j, v->csum_type,
-                                       want.lo, got.lo, buf2.buf);
-                               printbuf_exit(&buf2);
                                clear_bit(i, buf->valid);
+
+                               bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
                                break;
                        }
 
@@ -373,7 +376,11 @@ static void ec_block_endio(struct bio *bio)
        struct bch_dev *ca = ec_bio->ca;
        struct closure *cl = bio->bi_private;
 
-       if (bch2_dev_io_err_on(bio->bi_status, ca, "erasure coding %s error: %s",
+       if (bch2_dev_io_err_on(bio->bi_status, ca,
+                              bio_data_dir(bio)
+                              ? BCH_MEMBER_ERROR_write
+                              : BCH_MEMBER_ERROR_read,
+                              "erasure coding %s error: %s",
                               bio_data_dir(bio) ? "write" : "read",
                               bch2_blk_status_to_str(bio->bi_status)))
                clear_bit(ec_bio->idx, ec_bio->buf->valid);
@@ -474,14 +481,10 @@ err:
        return ret;
 }
 
-static int get_stripe_key(struct bch_fs *c, u64 idx, struct ec_stripe_buf *stripe)
-{
-       return bch2_trans_run(c, get_stripe_key_trans(trans, idx, stripe));
-}
-
 /* recovery read path: */
-int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio)
+int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio)
 {
+       struct bch_fs *c = trans->c;
        struct ec_stripe_buf *buf;
        struct closure cl;
        struct bch_stripe *v;
@@ -496,7 +499,7 @@ int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio)
        if (!buf)
                return -BCH_ERR_ENOMEM_ec_read_extent;
 
-       ret = get_stripe_key(c, rbio->pick.ec.idx, buf);
+       ret = lockrestart_do(trans, get_stripe_key_trans(trans, rbio->pick.ec.idx, buf));
        if (ret) {
                bch_err_ratelimited(c,
                        "error doing reconstruct read: error %i looking up stripe", ret);
index 966d165a3b6602c200f0be8c4090be6d8027e7ea..7d0237c9819f1a42561f5ec81512e1c4278d12fd 100644 (file)
@@ -8,7 +8,7 @@
 
 enum bkey_invalid_flags;
 
-int bch2_stripe_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_stripe_invalid(struct bch_fs *, struct bkey_s_c,
                        enum bkey_invalid_flags, struct printbuf *);
 void bch2_stripe_to_text(struct printbuf *, struct bch_fs *,
                         struct bkey_s_c);
@@ -199,7 +199,7 @@ struct ec_stripe_head {
        struct ec_stripe_new    *s;
 };
 
-int bch2_ec_read_extent(struct bch_fs *, struct bch_read_bio *);
+int bch2_ec_read_extent(struct btree_trans *, struct bch_read_bio *);
 
 void *bch2_writepoint_ec_buf(struct bch_fs *, struct write_point *);
 
index 7cc083776a2e029a6ec2b11169867e4b02e1173d..68a1a96bb7caf526a148a988c12913151c57b6d5 100644 (file)
@@ -3,6 +3,8 @@
 #define _BCACHEFS_ERRCODE_H
 
 #define BCH_ERRCODES()                                                         \
+       x(ERANGE,                       ERANGE_option_too_small)                \
+       x(ERANGE,                       ERANGE_option_too_big)                  \
        x(ENOMEM,                       ENOMEM_stripe_buf)                      \
        x(ENOMEM,                       ENOMEM_replicas_table)                  \
        x(ENOMEM,                       ENOMEM_cpu_replicas)                    \
        x(BCH_ERR_invalid_sb,           invalid_sb_crypt)                       \
        x(BCH_ERR_invalid_sb,           invalid_sb_clean)                       \
        x(BCH_ERR_invalid_sb,           invalid_sb_quota)                       \
+       x(BCH_ERR_invalid_sb,           invalid_sb_errors)                      \
+       x(BCH_ERR_invalid_sb,           invalid_sb_opt_compression)             \
        x(BCH_ERR_invalid,              invalid_bkey)                           \
        x(BCH_ERR_operation_blocked,    nocow_lock_blocked)                     \
        x(EIO,                          btree_node_read_err)                    \
index 2a5af88726132bc936e4ddc404086b862f0666ef..7b28d37922fd0e47d82ac1d27403f031cc577c7b 100644 (file)
@@ -56,8 +56,9 @@ void bch2_io_error_work(struct work_struct *work)
        up_write(&c->state_lock);
 }
 
-void bch2_io_error(struct bch_dev *ca)
+void bch2_io_error(struct bch_dev *ca, enum bch_member_error_type type)
 {
+       atomic64_inc(&ca->errors[type]);
        //queue_work(system_long_wq, &ca->io_error_work);
 }
 
@@ -116,31 +117,34 @@ static struct fsck_err_state *fsck_err_get(struct bch_fs *c, const char *fmt)
        if (test_bit(BCH_FS_FSCK_DONE, &c->flags))
                return NULL;
 
-       list_for_each_entry(s, &c->fsck_errors, list)
+       list_for_each_entry(s, &c->fsck_error_msgs, list)
                if (s->fmt == fmt) {
                        /*
                         * move it to the head of the list: repeated fsck errors
                         * are common
                         */
-                       list_move(&s->list, &c->fsck_errors);
+                       list_move(&s->list, &c->fsck_error_msgs);
                        return s;
                }
 
        s = kzalloc(sizeof(*s), GFP_NOFS);
        if (!s) {
-               if (!c->fsck_alloc_err)
+               if (!c->fsck_alloc_msgs_err)
                        bch_err(c, "kmalloc err, cannot ratelimit fsck errs");
-               c->fsck_alloc_err = true;
+               c->fsck_alloc_msgs_err = true;
                return NULL;
        }
 
        INIT_LIST_HEAD(&s->list);
        s->fmt = fmt;
-       list_add(&s->list, &c->fsck_errors);
+       list_add(&s->list, &c->fsck_error_msgs);
        return s;
 }
 
-int bch2_fsck_err(struct bch_fs *c, unsigned flags, const char *fmt, ...)
+int bch2_fsck_err(struct bch_fs *c,
+                 enum bch_fsck_flags flags,
+                 enum bch_sb_error_id err,
+                 const char *fmt, ...)
 {
        struct fsck_err_state *s = NULL;
        va_list args;
@@ -148,11 +152,13 @@ int bch2_fsck_err(struct bch_fs *c, unsigned flags, const char *fmt, ...)
        struct printbuf buf = PRINTBUF, *out = &buf;
        int ret = -BCH_ERR_fsck_ignore;
 
+       bch2_sb_error_count(c, err);
+
        va_start(args, fmt);
        prt_vprintf(out, fmt, args);
        va_end(args);
 
-       mutex_lock(&c->fsck_error_lock);
+       mutex_lock(&c->fsck_error_msgs_lock);
        s = fsck_err_get(c, fmt);
        if (s) {
                /*
@@ -162,7 +168,7 @@ int bch2_fsck_err(struct bch_fs *c, unsigned flags, const char *fmt, ...)
                 */
                if (s->last_msg && !strcmp(buf.buf, s->last_msg)) {
                        ret = s->ret;
-                       mutex_unlock(&c->fsck_error_lock);
+                       mutex_unlock(&c->fsck_error_msgs_lock);
                        printbuf_exit(&buf);
                        return ret;
                }
@@ -257,7 +263,7 @@ int bch2_fsck_err(struct bch_fs *c, unsigned flags, const char *fmt, ...)
        if (s)
                s->ret = ret;
 
-       mutex_unlock(&c->fsck_error_lock);
+       mutex_unlock(&c->fsck_error_msgs_lock);
 
        printbuf_exit(&buf);
 
@@ -278,9 +284,9 @@ void bch2_flush_fsck_errs(struct bch_fs *c)
 {
        struct fsck_err_state *s, *n;
 
-       mutex_lock(&c->fsck_error_lock);
+       mutex_lock(&c->fsck_error_msgs_lock);
 
-       list_for_each_entry_safe(s, n, &c->fsck_errors, list) {
+       list_for_each_entry_safe(s, n, &c->fsck_error_msgs, list) {
                if (s->ratelimited && s->last_msg)
                        bch_err(c, "Saw %llu errors like:\n    %s", s->nr, s->last_msg);
 
@@ -289,5 +295,5 @@ void bch2_flush_fsck_errs(struct bch_fs *c)
                kfree(s);
        }
 
-       mutex_unlock(&c->fsck_error_lock);
+       mutex_unlock(&c->fsck_error_msgs_lock);
 }
index 7ce9540052e53df99a90d4ac958c671ad375bdc7..d167d65986e0425f2c2e8b2d5503b5d0a6526c6c 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/list.h>
 #include <linux/printk.h>
+#include "sb-errors.h"
 
 struct bch_dev;
 struct bch_fs;
@@ -101,18 +102,26 @@ struct fsck_err_state {
        char                    *last_msg;
 };
 
-#define FSCK_CAN_FIX           (1 << 0)
-#define FSCK_CAN_IGNORE                (1 << 1)
-#define FSCK_NEED_FSCK         (1 << 2)
-#define FSCK_NO_RATELIMIT      (1 << 3)
+enum bch_fsck_flags {
+       FSCK_CAN_FIX            = 1 << 0,
+       FSCK_CAN_IGNORE         = 1 << 1,
+       FSCK_NEED_FSCK          = 1 << 2,
+       FSCK_NO_RATELIMIT       = 1 << 3,
+};
+
+#define fsck_err_count(_c, _err)       bch2_sb_err_count(_c, BCH_FSCK_ERR_##_err)
 
-__printf(3, 4) __cold
-int bch2_fsck_err(struct bch_fs *, unsigned, const char *, ...);
+__printf(4, 5) __cold
+int bch2_fsck_err(struct bch_fs *,
+                 enum bch_fsck_flags,
+                 enum bch_sb_error_id,
+                 const char *, ...);
 void bch2_flush_fsck_errs(struct bch_fs *);
 
-#define __fsck_err(c, _flags, msg, ...)                                        \
+#define __fsck_err(c, _flags, _err_type, ...)                          \
 ({                                                                     \
-       int _ret = bch2_fsck_err(c, _flags, msg, ##__VA_ARGS__);        \
+       int _ret = bch2_fsck_err(c, _flags, BCH_FSCK_ERR_##_err_type,   \
+                                __VA_ARGS__);                          \
                                                                        \
        if (_ret != -BCH_ERR_fsck_fix &&                                \
            _ret != -BCH_ERR_fsck_ignore) {                             \
@@ -127,26 +136,53 @@ void bch2_flush_fsck_errs(struct bch_fs *);
 
 /* XXX: mark in superblock that filesystem contains errors, if we ignore: */
 
-#define __fsck_err_on(cond, c, _flags, ...)                            \
-       (unlikely(cond) ? __fsck_err(c, _flags, ##__VA_ARGS__) : false)
+#define __fsck_err_on(cond, c, _flags, _err_type, ...)                 \
+       (unlikely(cond) ? __fsck_err(c, _flags, _err_type, __VA_ARGS__) : false)
+
+#define need_fsck_err_on(cond, c, _err_type, ...)                              \
+       __fsck_err_on(cond, c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, _err_type, __VA_ARGS__)
+
+#define need_fsck_err(c, _err_type, ...)                               \
+       __fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, _err_type, __VA_ARGS__)
+
+#define mustfix_fsck_err(c, _err_type, ...)                            \
+       __fsck_err(c, FSCK_CAN_FIX, _err_type, __VA_ARGS__)
+
+#define mustfix_fsck_err_on(cond, c, _err_type, ...)                   \
+       __fsck_err_on(cond, c, FSCK_CAN_FIX, _err_type, __VA_ARGS__)
 
-#define need_fsck_err_on(cond, c, ...)                                 \
-       __fsck_err_on(cond, c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, ##__VA_ARGS__)
+#define fsck_err(c, _err_type, ...)                                    \
+       __fsck_err(c, FSCK_CAN_FIX|FSCK_CAN_IGNORE, _err_type, __VA_ARGS__)
 
-#define need_fsck_err(c, ...)                                          \
-       __fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK, ##__VA_ARGS__)
+#define fsck_err_on(cond, c, _err_type, ...)                           \
+       __fsck_err_on(cond, c, FSCK_CAN_FIX|FSCK_CAN_IGNORE, _err_type, __VA_ARGS__)
 
-#define mustfix_fsck_err(c, ...)                                       \
-       __fsck_err(c, FSCK_CAN_FIX, ##__VA_ARGS__)
+static inline void bch2_bkey_fsck_err(struct bch_fs *c,
+                                    struct printbuf *err_msg,
+                                    enum bch_sb_error_id err_type,
+                                    const char *fmt, ...)
+{
+       va_list args;
 
-#define mustfix_fsck_err_on(cond, c, ...)                              \
-       __fsck_err_on(cond, c, FSCK_CAN_FIX, ##__VA_ARGS__)
+       va_start(args, fmt);
+       prt_vprintf(err_msg, fmt, args);
+       va_end(args);
 
-#define fsck_err(c, ...)                                               \
-       __fsck_err(c, FSCK_CAN_FIX|FSCK_CAN_IGNORE, ##__VA_ARGS__)
+}
 
-#define fsck_err_on(cond, c, ...)                                      \
-       __fsck_err_on(cond, c, FSCK_CAN_FIX|FSCK_CAN_IGNORE, ##__VA_ARGS__)
+#define bkey_fsck_err(c, _err_msg, _err_type, ...)                     \
+do {                                                                   \
+       prt_printf(_err_msg, __VA_ARGS__);                              \
+       bch2_sb_error_count(c, BCH_FSCK_ERR_##_err_type);               \
+       ret = -BCH_ERR_invalid_bkey;                                    \
+       goto fsck_err;                                                  \
+} while (0)
+
+#define bkey_fsck_err_on(cond, ...)                                    \
+do {                                                                   \
+       if (unlikely(cond))                                             \
+               bkey_fsck_err(__VA_ARGS__);                             \
+} while (0)
 
 /*
  * Fatal errors: these don't indicate a bug, but we can't continue running in RW
@@ -179,26 +215,26 @@ do {                                                                      \
 void bch2_io_error_work(struct work_struct *);
 
 /* Does the error handling without logging a message */
-void bch2_io_error(struct bch_dev *);
+void bch2_io_error(struct bch_dev *, enum bch_member_error_type);
 
-#define bch2_dev_io_err_on(cond, ca, ...)                              \
+#define bch2_dev_io_err_on(cond, ca, _type, ...)                       \
 ({                                                                     \
        bool _ret = (cond);                                             \
                                                                        \
        if (_ret) {                                                     \
                bch_err_dev_ratelimited(ca, __VA_ARGS__);               \
-               bch2_io_error(ca);                                      \
+               bch2_io_error(ca, _type);                               \
        }                                                               \
        _ret;                                                           \
 })
 
-#define bch2_dev_inum_io_err_on(cond, ca, ...)                         \
+#define bch2_dev_inum_io_err_on(cond, ca, _type, ...)                  \
 ({                                                                     \
        bool _ret = (cond);                                             \
                                                                        \
        if (_ret) {                                                     \
                bch_err_inum_offset_ratelimited(ca, __VA_ARGS__);       \
-               bch2_io_error(ca);                                      \
+               bch2_io_error(ca, _type);                               \
        }                                                               \
        _ret;                                                           \
 })
index 1b25f84e4b9cb883fe36dd70bfe43a8df10484aa..a864de231b69e297e85491dfd285928152c467b8 100644 (file)
@@ -13,6 +13,7 @@
 #include "btree_iter.h"
 #include "buckets.h"
 #include "checksum.h"
+#include "compress.h"
 #include "debug.h"
 #include "disk_groups.h"
 #include "error.h"
@@ -162,17 +163,19 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
 
 /* KEY_TYPE_btree_ptr: */
 
-int bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_btree_ptr_invalid(struct bch_fs *c, struct bkey_s_c k,
                           enum bkey_invalid_flags flags,
                           struct printbuf *err)
 {
-       if (bkey_val_u64s(k.k) > BCH_REPLICAS_MAX) {
-               prt_printf(err, "value too big (%zu > %u)",
-                      bkey_val_u64s(k.k), BCH_REPLICAS_MAX);
-               return -BCH_ERR_invalid_bkey;
-       }
+       int ret = 0;
+
+       bkey_fsck_err_on(bkey_val_u64s(k.k) > BCH_REPLICAS_MAX, c, err,
+                        btree_ptr_val_too_big,
+                        "value too big (%zu > %u)", bkey_val_u64s(k.k), BCH_REPLICAS_MAX);
 
-       return bch2_bkey_ptrs_invalid(c, k, flags, err);
+       ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
+fsck_err:
+       return ret;
 }
 
 void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
@@ -181,17 +184,20 @@ void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
        bch2_bkey_ptrs_to_text(out, c, k);
 }
 
-int bch2_btree_ptr_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_btree_ptr_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
                              enum bkey_invalid_flags flags,
                              struct printbuf *err)
 {
-       if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX) {
-               prt_printf(err, "value too big (%zu > %zu)",
-                      bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX);
-               return -BCH_ERR_invalid_bkey;
-       }
+       int ret = 0;
+
+       bkey_fsck_err_on(bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX, c, err,
+                        btree_ptr_v2_val_too_big,
+                        "value too big (%zu > %zu)",
+                        bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX);
 
-       return bch2_bkey_ptrs_invalid(c, k, flags, err);
+       ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
+fsck_err:
+       return ret;
 }
 
 void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
@@ -372,19 +378,18 @@ bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
 
 /* KEY_TYPE_reservation: */
 
-int bch2_reservation_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_reservation_invalid(struct bch_fs *c, struct bkey_s_c k,
                             enum bkey_invalid_flags flags,
                             struct printbuf *err)
 {
        struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
+       int ret = 0;
 
-       if (!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX) {
-               prt_printf(err, "invalid nr_replicas (%u)",
-                      r.v->nr_replicas);
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       return 0;
+       bkey_fsck_err_on(!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX, c, err,
+                        reservation_key_nr_replicas_invalid,
+                        "invalid nr_replicas (%u)", r.v->nr_replicas);
+fsck_err:
+       return ret;
 }
 
 void bch2_reservation_to_text(struct printbuf *out, struct bch_fs *c,
@@ -757,18 +762,6 @@ static union bch_extent_entry *extent_entry_prev(struct bkey_ptrs ptrs,
        return i;
 }
 
-static void extent_entry_drop(struct bkey_s k, union bch_extent_entry *entry)
-{
-       union bch_extent_entry *next = extent_entry_next(entry);
-
-       /* stripes have ptrs, but their layout doesn't work with this code */
-       BUG_ON(k.k->type == KEY_TYPE_stripe);
-
-       memmove_u64s_down(entry, next,
-                         (u64 *) bkey_val_end(k) - (u64 *) next);
-       k.k->u64s -= (u64 *) next - (u64 *) entry;
-}
-
 /*
  * Returns pointer to the next entry after the one being dropped:
  */
@@ -992,10 +985,6 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
 {
        struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
        const union bch_extent_entry *entry;
-       struct bch_extent_crc_unpacked crc;
-       const struct bch_extent_ptr *ptr;
-       const struct bch_extent_stripe_ptr *ec;
-       struct bch_dev *ca;
        bool first = true;
 
        if (c)
@@ -1006,9 +995,9 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
                        prt_printf(out, " ");
 
                switch (__extent_entry_type(entry)) {
-               case BCH_EXTENT_ENTRY_ptr:
-                       ptr = entry_to_ptr(entry);
-                       ca = c && ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
+               case BCH_EXTENT_ENTRY_ptr: {
+                       const struct bch_extent_ptr *ptr = entry_to_ptr(entry);
+                       struct bch_dev *ca = c && ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
                                ? bch_dev_bkey_exists(c, ptr->dev)
                                : NULL;
 
@@ -1030,10 +1019,12 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
                                        prt_printf(out, " stale");
                        }
                        break;
+               }
                case BCH_EXTENT_ENTRY_crc32:
                case BCH_EXTENT_ENTRY_crc64:
-               case BCH_EXTENT_ENTRY_crc128:
-                       crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
+               case BCH_EXTENT_ENTRY_crc128: {
+                       struct bch_extent_crc_unpacked crc =
+                               bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
 
                        prt_printf(out, "crc: c_size %u size %u offset %u nonce %u csum %s compress %s",
                               crc.compressed_size,
@@ -1042,12 +1033,26 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
                               bch2_csum_types[crc.csum_type],
                               bch2_compression_types[crc.compression_type]);
                        break;
-               case BCH_EXTENT_ENTRY_stripe_ptr:
-                       ec = &entry->stripe_ptr;
+               }
+               case BCH_EXTENT_ENTRY_stripe_ptr: {
+                       const struct bch_extent_stripe_ptr *ec = &entry->stripe_ptr;
 
                        prt_printf(out, "ec: idx %llu block %u",
                               (u64) ec->idx, ec->block);
                        break;
+               }
+               case BCH_EXTENT_ENTRY_rebalance: {
+                       const struct bch_extent_rebalance *r = &entry->rebalance;
+
+                       prt_str(out, "rebalance: target ");
+                       if (c)
+                               bch2_target_to_text(out, c, r->target);
+                       else
+                               prt_printf(out, "%u", r->target);
+                       prt_str(out, " compression ");
+                       bch2_compression_opt_to_text(out, r->compression);
+                       break;
+               }
                default:
                        prt_printf(out, "(invalid extent entry %.16llx)", *((u64 *) entry));
                        return;
@@ -1057,7 +1062,7 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
        }
 }
 
-static int extent_ptr_invalid(const struct bch_fs *c,
+static int extent_ptr_invalid(struct bch_fs *c,
                              struct bkey_s_c k,
                              enum bkey_invalid_flags flags,
                              const struct bch_extent_ptr *ptr,
@@ -1070,6 +1075,7 @@ static int extent_ptr_invalid(const struct bch_fs *c,
        u64 bucket;
        u32 bucket_offset;
        struct bch_dev *ca;
+       int ret = 0;
 
        if (!bch2_dev_exists2(c, ptr->dev)) {
                /*
@@ -1080,41 +1086,33 @@ static int extent_ptr_invalid(const struct bch_fs *c,
                if (flags & BKEY_INVALID_WRITE)
                        return 0;
 
-               prt_printf(err, "pointer to invalid device (%u)", ptr->dev);
-               return -BCH_ERR_invalid_bkey;
+               bkey_fsck_err(c, err, ptr_to_invalid_device,
+                          "pointer to invalid device (%u)", ptr->dev);
        }
 
        ca = bch_dev_bkey_exists(c, ptr->dev);
        bkey_for_each_ptr(ptrs, ptr2)
-               if (ptr != ptr2 && ptr->dev == ptr2->dev) {
-                       prt_printf(err, "multiple pointers to same device (%u)", ptr->dev);
-                       return -BCH_ERR_invalid_bkey;
-               }
+               bkey_fsck_err_on(ptr != ptr2 && ptr->dev == ptr2->dev, c, err,
+                                ptr_to_duplicate_device,
+                                "multiple pointers to same device (%u)", ptr->dev);
 
        bucket = sector_to_bucket_and_offset(ca, ptr->offset, &bucket_offset);
 
-       if (bucket >= ca->mi.nbuckets) {
-               prt_printf(err, "pointer past last bucket (%llu > %llu)",
-                      bucket, ca->mi.nbuckets);
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       if (ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket)) {
-               prt_printf(err, "pointer before first bucket (%llu < %u)",
-                      bucket, ca->mi.first_bucket);
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       if (bucket_offset + size_ondisk > ca->mi.bucket_size) {
-               prt_printf(err, "pointer spans multiple buckets (%u + %u > %u)",
+       bkey_fsck_err_on(bucket >= ca->mi.nbuckets, c, err,
+                        ptr_after_last_bucket,
+                        "pointer past last bucket (%llu > %llu)", bucket, ca->mi.nbuckets);
+       bkey_fsck_err_on(ptr->offset < bucket_to_sector(ca, ca->mi.first_bucket), c, err,
+                        ptr_before_first_bucket,
+                        "pointer before first bucket (%llu < %u)", bucket, ca->mi.first_bucket);
+       bkey_fsck_err_on(bucket_offset + size_ondisk > ca->mi.bucket_size, c, err,
+                        ptr_spans_multiple_buckets,
+                        "pointer spans multiple buckets (%u + %u > %u)",
                       bucket_offset, size_ondisk, ca->mi.bucket_size);
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       return 0;
+fsck_err:
+       return ret;
 }
 
-int bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_bkey_ptrs_invalid(struct bch_fs *c, struct bkey_s_c k,
                           enum bkey_invalid_flags flags,
                           struct printbuf *err)
 {
@@ -1124,24 +1122,22 @@ int bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k,
        unsigned size_ondisk = k.k->size;
        unsigned nonce = UINT_MAX;
        unsigned nr_ptrs = 0;
-       bool unwritten = false, have_ec = false, crc_since_last_ptr = false;
-       int ret;
+       bool have_written = false, have_unwritten = false, have_ec = false, crc_since_last_ptr = false;
+       int ret = 0;
 
        if (bkey_is_btree_ptr(k.k))
                size_ondisk = btree_sectors(c);
 
        bkey_extent_entry_for_each(ptrs, entry) {
-               if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX) {
-                       prt_printf(err, "invalid extent entry type (got %u, max %u)",
-                              __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX);
-                       return -BCH_ERR_invalid_bkey;
-               }
+               bkey_fsck_err_on(__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX, c, err,
+                       extent_ptrs_invalid_entry,
+                       "invalid extent entry type (got %u, max %u)",
+                       __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX);
 
-               if (bkey_is_btree_ptr(k.k) &&
-                   !extent_entry_is_ptr(entry)) {
-                       prt_printf(err, "has non ptr field");
-                       return -BCH_ERR_invalid_bkey;
-               }
+               bkey_fsck_err_on(bkey_is_btree_ptr(k.k) &&
+                                !extent_entry_is_ptr(entry), c, err,
+                                btree_ptr_has_non_ptr,
+                                "has non ptr field");
 
                switch (extent_entry_type(entry)) {
                case BCH_EXTENT_ENTRY_ptr:
@@ -1150,22 +1146,15 @@ int bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k,
                        if (ret)
                                return ret;
 
-                       if (nr_ptrs && unwritten != entry->ptr.unwritten) {
-                               prt_printf(err, "extent with unwritten and written ptrs");
-                               return -BCH_ERR_invalid_bkey;
-                       }
-
-                       if (k.k->type != KEY_TYPE_extent && entry->ptr.unwritten) {
-                               prt_printf(err, "has unwritten ptrs");
-                               return -BCH_ERR_invalid_bkey;
-                       }
+                       bkey_fsck_err_on(entry->ptr.cached && have_ec, c, err,
+                                        ptr_cached_and_erasure_coded,
+                                        "cached, erasure coded ptr");
 
-                       if (entry->ptr.cached && have_ec) {
-                               prt_printf(err, "cached, erasure coded ptr");
-                               return -BCH_ERR_invalid_bkey;
-                       }
+                       if (!entry->ptr.unwritten)
+                               have_written = true;
+                       else
+                               have_unwritten = true;
 
-                       unwritten = entry->ptr.unwritten;
                        have_ec = false;
                        crc_since_last_ptr = false;
                        nr_ptrs++;
@@ -1175,72 +1164,77 @@ int bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k,
                case BCH_EXTENT_ENTRY_crc128:
                        crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
 
-                       if (crc.offset + crc.live_size >
-                           crc.uncompressed_size) {
-                               prt_printf(err, "checksum offset + key size > uncompressed size");
-                               return -BCH_ERR_invalid_bkey;
-                       }
-
-                       size_ondisk = crc.compressed_size;
-
-                       if (!bch2_checksum_type_valid(c, crc.csum_type)) {
-                               prt_printf(err, "invalid checksum type");
-                               return -BCH_ERR_invalid_bkey;
-                       }
-
-                       if (crc.compression_type >= BCH_COMPRESSION_TYPE_NR) {
-                               prt_printf(err, "invalid compression type");
-                               return -BCH_ERR_invalid_bkey;
-                       }
+                       bkey_fsck_err_on(crc.offset + crc.live_size > crc.uncompressed_size, c, err,
+                                        ptr_crc_uncompressed_size_too_small,
+                                        "checksum offset + key size > uncompressed size");
+                       bkey_fsck_err_on(!bch2_checksum_type_valid(c, crc.csum_type), c, err,
+                                        ptr_crc_csum_type_unknown,
+                                        "invalid checksum type");
+                       bkey_fsck_err_on(crc.compression_type >= BCH_COMPRESSION_TYPE_NR, c, err,
+                                        ptr_crc_compression_type_unknown,
+                                        "invalid compression type");
 
                        if (bch2_csum_type_is_encryption(crc.csum_type)) {
                                if (nonce == UINT_MAX)
                                        nonce = crc.offset + crc.nonce;
-                               else if (nonce != crc.offset + crc.nonce) {
-                                       prt_printf(err, "incorrect nonce");
-                                       return -BCH_ERR_invalid_bkey;
-                               }
+                               else if (nonce != crc.offset + crc.nonce)
+                                       bkey_fsck_err(c, err, ptr_crc_nonce_mismatch,
+                                                     "incorrect nonce");
                        }
 
-                       if (crc_since_last_ptr) {
-                               prt_printf(err, "redundant crc entry");
-                               return -BCH_ERR_invalid_bkey;
-                       }
+                       bkey_fsck_err_on(crc_since_last_ptr, c, err,
+                                        ptr_crc_redundant,
+                                        "redundant crc entry");
                        crc_since_last_ptr = true;
+
+                       bkey_fsck_err_on(crc_is_encoded(crc) &&
+                                        (crc.uncompressed_size > c->opts.encoded_extent_max >> 9) &&
+                                        (flags & (BKEY_INVALID_WRITE|BKEY_INVALID_COMMIT)), c, err,
+                                        ptr_crc_uncompressed_size_too_big,
+                                        "too large encoded extent");
+
+                       size_ondisk = crc.compressed_size;
                        break;
                case BCH_EXTENT_ENTRY_stripe_ptr:
-                       if (have_ec) {
-                               prt_printf(err, "redundant stripe entry");
-                               return -BCH_ERR_invalid_bkey;
-                       }
+                       bkey_fsck_err_on(have_ec, c, err,
+                                        ptr_stripe_redundant,
+                                        "redundant stripe entry");
                        have_ec = true;
                        break;
-               case BCH_EXTENT_ENTRY_rebalance:
+               case BCH_EXTENT_ENTRY_rebalance: {
+                       const struct bch_extent_rebalance *r = &entry->rebalance;
+
+                       if (!bch2_compression_opt_valid(r->compression)) {
+                               struct bch_compression_opt opt = __bch2_compression_decode(r->compression);
+                               prt_printf(err, "invalid compression opt %u:%u",
+                                          opt.type, opt.level);
+                               return -BCH_ERR_invalid_bkey;
+                       }
                        break;
                }
+               }
        }
 
-       if (!nr_ptrs) {
-               prt_str(err, "no ptrs");
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       if (nr_ptrs >= BCH_BKEY_PTRS_MAX) {
-               prt_str(err, "too many ptrs");
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       if (crc_since_last_ptr) {
-               prt_printf(err, "redundant crc entry");
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       if (have_ec) {
-               prt_printf(err, "redundant stripe entry");
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       return 0;
+       bkey_fsck_err_on(!nr_ptrs, c, err,
+                        extent_ptrs_no_ptrs,
+                        "no ptrs");
+       bkey_fsck_err_on(nr_ptrs > BCH_BKEY_PTRS_MAX, c, err,
+                        extent_ptrs_too_many_ptrs,
+                        "too many ptrs: %u > %u", nr_ptrs, BCH_BKEY_PTRS_MAX);
+       bkey_fsck_err_on(have_written && have_unwritten, c, err,
+                        extent_ptrs_written_and_unwritten,
+                        "extent with unwritten and written ptrs");
+       bkey_fsck_err_on(k.k->type != KEY_TYPE_extent && have_unwritten, c, err,
+                        extent_ptrs_unwritten,
+                        "has unwritten ptrs");
+       bkey_fsck_err_on(crc_since_last_ptr, c, err,
+                        extent_ptrs_redundant_crc,
+                        "redundant crc entry");
+       bkey_fsck_err_on(have_ec, c, err,
+                        extent_ptrs_redundant_stripe,
+                        "redundant stripe entry");
+fsck_err:
+       return ret;
 }
 
 void bch2_ptr_swab(struct bkey_s k)
@@ -1281,6 +1275,125 @@ void bch2_ptr_swab(struct bkey_s k)
        }
 }
 
+const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c k)
+{
+       struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+       const union bch_extent_entry *entry;
+
+       bkey_extent_entry_for_each(ptrs, entry)
+               if (__extent_entry_type(entry) == BCH_EXTENT_ENTRY_rebalance)
+                       return &entry->rebalance;
+
+       return NULL;
+}
+
+unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *c, struct bkey_s_c k,
+                                      unsigned target, unsigned compression)
+{
+       struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+       unsigned rewrite_ptrs = 0;
+
+       if (compression) {
+               unsigned compression_type = bch2_compression_opt_to_type(compression);
+               const union bch_extent_entry *entry;
+               struct extent_ptr_decoded p;
+               unsigned i = 0;
+
+               bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
+                       if (p.crc.compression_type == BCH_COMPRESSION_TYPE_incompressible) {
+                               rewrite_ptrs = 0;
+                               goto incompressible;
+                       }
+
+                       if (!p.ptr.cached && p.crc.compression_type != compression_type)
+                               rewrite_ptrs |= 1U << i;
+                       i++;
+               }
+       }
+incompressible:
+       if (target && bch2_target_accepts_data(c, BCH_DATA_user, target)) {
+               const struct bch_extent_ptr *ptr;
+               unsigned i = 0;
+
+               bkey_for_each_ptr(ptrs, ptr) {
+                       if (!ptr->cached && !bch2_dev_in_target(c, ptr->dev, target))
+                               rewrite_ptrs |= 1U << i;
+                       i++;
+               }
+       }
+
+       return rewrite_ptrs;
+}
+
+bool bch2_bkey_needs_rebalance(struct bch_fs *c, struct bkey_s_c k)
+{
+       const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k);
+
+       /*
+        * If it's an indirect extent, we don't delete the rebalance entry when
+        * done so that we know what options were applied - check if it still
+        * needs work done:
+        */
+       if (r &&
+           k.k->type == KEY_TYPE_reflink_v &&
+           !bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression))
+               r = NULL;
+
+       return r != NULL;
+}
+
+int bch2_bkey_set_needs_rebalance(struct bch_fs *c, struct bkey_i *_k,
+                                 unsigned target, unsigned compression)
+{
+       struct bkey_s k = bkey_i_to_s(_k);
+       struct bch_extent_rebalance *r;
+       bool needs_rebalance;
+
+       if (!bkey_extent_is_direct_data(k.k))
+               return 0;
+
+       /* get existing rebalance entry: */
+       r = (struct bch_extent_rebalance *) bch2_bkey_rebalance_opts(k.s_c);
+       if (r) {
+               if (k.k->type == KEY_TYPE_reflink_v) {
+                       /*
+                        * indirect extents: existing options take precedence,
+                        * so that we don't move extents back and forth if
+                        * they're referenced by different inodes with different
+                        * options:
+                        */
+                       if (r->target)
+                               target = r->target;
+                       if (r->compression)
+                               compression = r->compression;
+               }
+
+               r->target       = target;
+               r->compression  = compression;
+       }
+
+       needs_rebalance = bch2_bkey_ptrs_need_rebalance(c, k.s_c, target, compression);
+
+       if (needs_rebalance && !r) {
+               union bch_extent_entry *new = bkey_val_end(k);
+
+               new->rebalance.type             = 1U << BCH_EXTENT_ENTRY_rebalance;
+               new->rebalance.compression      = compression;
+               new->rebalance.target           = target;
+               new->rebalance.unused           = 0;
+               k.k->u64s += extent_entry_u64s(new);
+       } else if (!needs_rebalance && r && k.k->type != KEY_TYPE_reflink_v) {
+               /*
+                * For indirect extents, don't delete the rebalance entry when
+                * we're finished so that we know we specifically moved it or
+                * compressed it to its current location/compression type
+                */
+               extent_entry_drop(k, (union bch_extent_entry *) r);
+       }
+
+       return 0;
+}
+
 /* Generic extent code: */
 
 int bch2_cut_front_s(struct bpos where, struct bkey_s k)
index 879e7d218b6a4baf58b7a567266d1941f72de1fe..a2ce8a3be13ca418a001d8ff93d9091565aed800 100644 (file)
@@ -89,6 +89,18 @@ static inline void __extent_entry_insert(struct bkey_i *k,
        memcpy_u64s_small(dst, new, extent_entry_u64s(new));
 }
 
+static inline void extent_entry_drop(struct bkey_s k, union bch_extent_entry *entry)
+{
+       union bch_extent_entry *next = extent_entry_next(entry);
+
+       /* stripes have ptrs, but their layout doesn't work with this code */
+       BUG_ON(k.k->type == KEY_TYPE_stripe);
+
+       memmove_u64s_down(entry, next,
+                         (u64 *) bkey_val_end(k) - (u64 *) next);
+       k.k->u64s -= (u64 *) next - (u64 *) entry;
+}
+
 static inline bool extent_entry_is_ptr(const union bch_extent_entry *e)
 {
        return extent_entry_type(e) == BCH_EXTENT_ENTRY_ptr;
@@ -190,6 +202,11 @@ static inline bool crc_is_compressed(struct bch_extent_crc_unpacked crc)
                crc.compression_type != BCH_COMPRESSION_TYPE_incompressible);
 }
 
+static inline bool crc_is_encoded(struct bch_extent_crc_unpacked crc)
+{
+       return crc.csum_type != BCH_CSUM_none || crc_is_compressed(crc);
+}
+
 /* bkey_ptrs: generically over any key type that has ptrs */
 
 struct bkey_ptrs_c {
@@ -383,12 +400,12 @@ int bch2_bkey_pick_read_device(struct bch_fs *, struct bkey_s_c,
 
 /* KEY_TYPE_btree_ptr: */
 
-int bch2_btree_ptr_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_btree_ptr_invalid(struct bch_fs *, struct bkey_s_c,
                           enum bkey_invalid_flags, struct printbuf *);
 void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *,
                            struct bkey_s_c);
 
-int bch2_btree_ptr_v2_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_btree_ptr_v2_invalid(struct bch_fs *, struct bkey_s_c,
                              enum bkey_invalid_flags, struct printbuf *);
 void bch2_btree_ptr_v2_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned,
@@ -428,7 +445,7 @@ bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
 
 /* KEY_TYPE_reservation: */
 
-int bch2_reservation_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_reservation_invalid(struct bch_fs *, struct bkey_s_c,
                             enum bkey_invalid_flags, struct printbuf *);
 void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 bool bch2_reservation_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
@@ -688,11 +705,19 @@ void bch2_extent_ptr_set_cached(struct bkey_s, struct bch_extent_ptr *);
 bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
 void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
                            struct bkey_s_c);
-int bch2_bkey_ptrs_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_bkey_ptrs_invalid(struct bch_fs *, struct bkey_s_c,
                           enum bkey_invalid_flags, struct printbuf *);
 
 void bch2_ptr_swab(struct bkey_s);
 
+const struct bch_extent_rebalance *bch2_bkey_rebalance_opts(struct bkey_s_c);
+unsigned bch2_bkey_ptrs_need_rebalance(struct bch_fs *, struct bkey_s_c,
+                                      unsigned, unsigned);
+bool bch2_bkey_needs_rebalance(struct bch_fs *, struct bkey_s_c);
+
+int bch2_bkey_set_needs_rebalance(struct bch_fs *, struct bkey_i *,
+                                 unsigned, unsigned);
+
 /* Generic extent code: */
 
 enum bch_extent_overlap {
@@ -737,22 +762,4 @@ static inline void bch2_key_resize(struct bkey *k, unsigned new_size)
        k->size = new_size;
 }
 
-/*
- * In extent_sort_fix_overlapping(), insert_fixup_extent(),
- * extent_merge_inline() - we're modifying keys in place that are packed. To do
- * that we have to unpack the key, modify the unpacked key - then this
- * copies/repacks the unpacked to the original as necessary.
- */
-static inline void extent_save(struct btree *b, struct bkey_packed *dst,
-                              struct bkey *src)
-{
-       struct bkey_format *f = &b->format;
-       struct bkey_i *dst_unpacked;
-
-       if ((dst_unpacked = packed_to_bkey(dst)))
-               dst_unpacked->k = *src;
-       else
-               BUG_ON(!bch2_bkey_pack_key(dst, src, f));
-}
-
 #endif /* _BCACHEFS_EXTENTS_H */
index bb5305441f275938d4f76535a9b9bf1aaccf7934..4496cf91a4c17bcde4e4a934eb0475007ff1311c 100644 (file)
@@ -51,7 +51,7 @@ int bch2_create_trans(struct btree_trans *trans,
                bch2_inode_init_late(new_inode, now, uid, gid, mode, rdev, dir_u);
 
                if (flags & BCH_CREATE_TMPFILE)
-                       new_inode->bi_flags |= BCH_INODE_UNLINKED;
+                       new_inode->bi_flags |= BCH_INODE_unlinked;
 
                ret = bch2_inode_create(trans, &inode_iter, new_inode, snapshot, cpu);
                if (ret)
index 58ccc7b91ac79c7343828205e37edfdca56c0fab..52f0e7acda3d81ce043672b428db4432cdcebeb2 100644 (file)
@@ -389,6 +389,21 @@ static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs
        return ret;
 }
 
+/*
+ * Determine when a writepage io is full. We have to limit writepage bios to a
+ * single page per bvec (i.e. 1MB with 4k pages) because that is the limit to
+ * what the bounce path in bch2_write_extent() can handle. In theory we could
+ * loosen this restriction for non-bounce I/O, but we don't have that context
+ * here. Ideally, we can up this limit and make it configurable in the future
+ * when the bounce path can be enhanced to accommodate larger source bios.
+ */
+static inline bool bch_io_full(struct bch_writepage_io *io, unsigned len)
+{
+       struct bio *bio = &io->op.wbio.bio;
+       return bio_full(bio, len) ||
+               (bio->bi_iter.bi_size + len > BIO_MAX_VECS * PAGE_SIZE);
+}
+
 static void bch2_writepage_io_done(struct bch_write_op *op)
 {
        struct bch_writepage_io *io =
@@ -606,9 +621,7 @@ do_io:
 
                if (w->io &&
                    (w->io->op.res.nr_replicas != nr_replicas_this_write ||
-                    bio_full(&w->io->op.wbio.bio, sectors << 9) ||
-                    w->io->op.wbio.bio.bi_iter.bi_size + (sectors << 9) >=
-                    (BIO_MAX_VECS * PAGE_SIZE) ||
+                    bch_io_full(w->io, sectors << 9) ||
                     bio_end_sector(&w->io->op.wbio.bio) != sector))
                        bch2_writepage_do_io(w);
 
index 6a9557e7ecabb47d1a30b7c665367decb3fecbe4..5b42a76c4796f90062bb86e2914d0301e52cf7d0 100644 (file)
@@ -113,6 +113,7 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
        } else {
                atomic_set(&dio->cl.remaining,
                           CLOSURE_REMAINING_INITIALIZER + 1);
+               dio->cl.closure_get_happened = true;
        }
 
        dio->req        = req;
index 6040bd3f077819b41accbc08ac37fa5928d0592d..5a39bcb597a33d42826a16a98da394de3fe23660 100644 (file)
@@ -45,13 +45,13 @@ static int bch2_inode_flags_set(struct btree_trans *trans,
        unsigned newflags = s->flags;
        unsigned oldflags = bi->bi_flags & s->mask;
 
-       if (((newflags ^ oldflags) & (BCH_INODE_APPEND|BCH_INODE_IMMUTABLE)) &&
+       if (((newflags ^ oldflags) & (BCH_INODE_append|BCH_INODE_immutable)) &&
            !capable(CAP_LINUX_IMMUTABLE))
                return -EPERM;
 
        if (!S_ISREG(bi->bi_mode) &&
            !S_ISDIR(bi->bi_mode) &&
-           (newflags & (BCH_INODE_NODUMP|BCH_INODE_NOATIME)) != newflags)
+           (newflags & (BCH_INODE_nodump|BCH_INODE_noatime)) != newflags)
                return -EINVAL;
 
        if (s->set_projinherit) {
index 54a9c21a3b832ba9ad5c0281abe7363ec6f9cd9a..d30f9bb056fd9790f97c4b08f839b480bf46397c 100644 (file)
@@ -6,28 +6,28 @@
 
 /* bcachefs inode flags -> vfs inode flags: */
 static const __maybe_unused unsigned bch_flags_to_vfs[] = {
-       [__BCH_INODE_SYNC]      = S_SYNC,
-       [__BCH_INODE_IMMUTABLE] = S_IMMUTABLE,
-       [__BCH_INODE_APPEND]    = S_APPEND,
-       [__BCH_INODE_NOATIME]   = S_NOATIME,
+       [__BCH_INODE_sync]      = S_SYNC,
+       [__BCH_INODE_immutable] = S_IMMUTABLE,
+       [__BCH_INODE_append]    = S_APPEND,
+       [__BCH_INODE_noatime]   = S_NOATIME,
 };
 
 /* bcachefs inode flags -> FS_IOC_GETFLAGS: */
 static const __maybe_unused unsigned bch_flags_to_uflags[] = {
-       [__BCH_INODE_SYNC]      = FS_SYNC_FL,
-       [__BCH_INODE_IMMUTABLE] = FS_IMMUTABLE_FL,
-       [__BCH_INODE_APPEND]    = FS_APPEND_FL,
-       [__BCH_INODE_NODUMP]    = FS_NODUMP_FL,
-       [__BCH_INODE_NOATIME]   = FS_NOATIME_FL,
+       [__BCH_INODE_sync]      = FS_SYNC_FL,
+       [__BCH_INODE_immutable] = FS_IMMUTABLE_FL,
+       [__BCH_INODE_append]    = FS_APPEND_FL,
+       [__BCH_INODE_nodump]    = FS_NODUMP_FL,
+       [__BCH_INODE_noatime]   = FS_NOATIME_FL,
 };
 
 /* bcachefs inode flags -> FS_IOC_FSGETXATTR: */
 static const __maybe_unused unsigned bch_flags_to_xflags[] = {
-       [__BCH_INODE_SYNC]      = FS_XFLAG_SYNC,
-       [__BCH_INODE_IMMUTABLE] = FS_XFLAG_IMMUTABLE,
-       [__BCH_INODE_APPEND]    = FS_XFLAG_APPEND,
-       [__BCH_INODE_NODUMP]    = FS_XFLAG_NODUMP,
-       [__BCH_INODE_NOATIME]   = FS_XFLAG_NOATIME,
+       [__BCH_INODE_sync]      = FS_XFLAG_SYNC,
+       [__BCH_INODE_immutable] = FS_XFLAG_IMMUTABLE,
+       [__BCH_INODE_append]    = FS_XFLAG_APPEND,
+       [__BCH_INODE_nodump]    = FS_XFLAG_NODUMP,
+       [__BCH_INODE_noatime]   = FS_XFLAG_NOATIME,
        //[__BCH_INODE_PROJINHERIT] = FS_XFLAG_PROJINHERIT;
 };
 
index a2a5133fb6b5aec8a3023dacf24c717d416c745f..166d8d8abe683f1b05ddd0115763c79015208fa1 100644 (file)
@@ -764,15 +764,15 @@ static int bch2_getattr(struct mnt_idmap *idmap,
                stat->btime = bch2_time_to_timespec(c, inode->ei_inode.bi_otime);
        }
 
-       if (inode->ei_inode.bi_flags & BCH_INODE_IMMUTABLE)
+       if (inode->ei_inode.bi_flags & BCH_INODE_immutable)
                stat->attributes |= STATX_ATTR_IMMUTABLE;
        stat->attributes_mask    |= STATX_ATTR_IMMUTABLE;
 
-       if (inode->ei_inode.bi_flags & BCH_INODE_APPEND)
+       if (inode->ei_inode.bi_flags & BCH_INODE_append)
                stat->attributes |= STATX_ATTR_APPEND;
        stat->attributes_mask    |= STATX_ATTR_APPEND;
 
-       if (inode->ei_inode.bi_flags & BCH_INODE_NODUMP)
+       if (inode->ei_inode.bi_flags & BCH_INODE_nodump)
                stat->attributes |= STATX_ATTR_NODUMP;
        stat->attributes_mask    |= STATX_ATTR_NODUMP;
 
@@ -1213,9 +1213,6 @@ static struct dentry *bch2_get_parent(struct dentry *child)
                .inum = inode->ei_inode.bi_dir,
        };
 
-       if (!parent_inum.inum)
-               return NULL;
-
        return d_obtain_alias(bch2_vfs_inode_get(c, parent_inum));
 }
 
index b8f9e7475dc5f744ba9545c3499b627f5f122df0..9f3e9bd3d767a75fb1a0734c0413193a671f3206 100644 (file)
@@ -2,6 +2,7 @@
 
 #include "bcachefs.h"
 #include "bkey_buf.h"
+#include "btree_cache.h"
 #include "btree_update.h"
 #include "buckets.h"
 #include "darray.h"
@@ -444,9 +445,10 @@ static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s,
                if (i->equiv == n.equiv) {
                        bch_err(c, "snapshot deletion did not finish:\n"
                                "  duplicate keys in btree %s at %llu:%llu snapshots %u, %u (equiv %u)\n",
-                               bch2_btree_ids[btree_id],
+                               bch2_btree_id_str(btree_id),
                                pos.inode, pos.offset,
                                i->id, n.id, n.equiv);
+                       set_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags);
                        return bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_delete_dead_snapshots);
                }
        }
@@ -719,8 +721,9 @@ static int check_key_has_snapshot(struct btree_trans *trans,
        int ret = 0;
 
        if (mustfix_fsck_err_on(!bch2_snapshot_equiv(c, k.k->p.snapshot), c,
-                       "key in missing snapshot: %s",
-                       (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
+                               bkey_in_missing_snapshot,
+                               "key in missing snapshot: %s",
+                               (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
                ret = bch2_btree_delete_at(trans, iter,
                                            BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: 1;
 fsck_err:
@@ -789,6 +792,7 @@ static int hash_check_key(struct btree_trans *trans,
 
                if (fsck_err_on(k.k->type == desc.key_type &&
                                !desc.cmp_bkey(k, hash_k), c,
+                               hash_table_key_duplicate,
                                "duplicate hash table keys:\n%s",
                                (printbuf_reset(&buf),
                                 bch2_bkey_val_to_text(&buf, c, hash_k),
@@ -807,8 +811,9 @@ out:
        printbuf_exit(&buf);
        return ret;
 bad_hash:
-       if (fsck_err(c, "hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n%s",
-                    bch2_btree_ids[desc.btree_id], hash_k.k->p.inode, hash_k.k->p.offset, hash,
+       if (fsck_err(c, hash_table_key_wrong_offset,
+                    "hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n%s",
+                    bch2_btree_id_str(desc.btree_id), hash_k.k->p.inode, hash_k.k->p.offset, hash,
                     (printbuf_reset(&buf),
                      bch2_bkey_val_to_text(&buf, c, hash_k), buf.buf))) {
                ret = hash_redo_key(trans, desc, hash_info, k_iter, hash_k);
@@ -849,22 +854,23 @@ static int check_inode(struct btree_trans *trans,
        BUG_ON(bch2_inode_unpack(k, &u));
 
        if (!full &&
-           !(u.bi_flags & (BCH_INODE_I_SIZE_DIRTY|
-                           BCH_INODE_I_SECTORS_DIRTY|
-                           BCH_INODE_UNLINKED)))
+           !(u.bi_flags & (BCH_INODE_i_size_dirty|
+                           BCH_INODE_i_sectors_dirty|
+                           BCH_INODE_unlinked)))
                return 0;
 
        if (prev->bi_inum != u.bi_inum)
                *prev = u;
 
        if (fsck_err_on(prev->bi_hash_seed      != u.bi_hash_seed ||
-                       inode_d_type(prev)      != inode_d_type(&u), c,
+                       inode_d_type(prev)      != inode_d_type(&u),
+                       c, inode_snapshot_mismatch,
                        "inodes in different snapshots don't match")) {
                bch_err(c, "repair not implemented yet");
                return -EINVAL;
        }
 
-       if ((u.bi_flags & (BCH_INODE_I_SIZE_DIRTY|BCH_INODE_UNLINKED)) &&
+       if ((u.bi_flags & (BCH_INODE_i_size_dirty|BCH_INODE_unlinked)) &&
            bch2_key_has_snapshot_overwrites(trans, BTREE_ID_inodes, k.k->p)) {
                struct bpos new_min_pos;
 
@@ -872,7 +878,7 @@ static int check_inode(struct btree_trans *trans,
                if (ret)
                        goto err;
 
-               u.bi_flags &= ~BCH_INODE_I_SIZE_DIRTY|BCH_INODE_UNLINKED;
+               u.bi_flags &= ~BCH_INODE_i_size_dirty|BCH_INODE_unlinked;
 
                ret = __write_inode(trans, &u, iter->pos.snapshot);
                bch_err_msg(c, ret, "in fsck updating inode");
@@ -884,9 +890,10 @@ static int check_inode(struct btree_trans *trans,
                return 0;
        }
 
-       if (u.bi_flags & BCH_INODE_UNLINKED &&
+       if (u.bi_flags & BCH_INODE_unlinked &&
            (!c->sb.clean ||
-            fsck_err(c, "filesystem marked clean, but inode %llu unlinked",
+            fsck_err(c, inode_unlinked_but_clean,
+                     "filesystem marked clean, but inode %llu unlinked",
                      u.bi_inum))) {
                bch2_trans_unlock(trans);
                bch2_fs_lazy_rw(c);
@@ -896,9 +903,10 @@ static int check_inode(struct btree_trans *trans,
                return ret;
        }
 
-       if (u.bi_flags & BCH_INODE_I_SIZE_DIRTY &&
+       if (u.bi_flags & BCH_INODE_i_size_dirty &&
            (!c->sb.clean ||
-            fsck_err(c, "filesystem marked clean, but inode %llu has i_size dirty",
+            fsck_err(c, inode_i_size_dirty_but_clean,
+                     "filesystem marked clean, but inode %llu has i_size dirty",
                      u.bi_inum))) {
                bch_verbose(c, "truncating inode %llu", u.bi_inum);
 
@@ -922,15 +930,16 @@ static int check_inode(struct btree_trans *trans,
                 * We truncated without our normal sector accounting hook, just
                 * make sure we recalculate it:
                 */
-               u.bi_flags |= BCH_INODE_I_SECTORS_DIRTY;
+               u.bi_flags |= BCH_INODE_i_sectors_dirty;
 
-               u.bi_flags &= ~BCH_INODE_I_SIZE_DIRTY;
+               u.bi_flags &= ~BCH_INODE_i_size_dirty;
                do_update = true;
        }
 
-       if (u.bi_flags & BCH_INODE_I_SECTORS_DIRTY &&
+       if (u.bi_flags & BCH_INODE_i_sectors_dirty &&
            (!c->sb.clean ||
-            fsck_err(c, "filesystem marked clean, but inode %llu has i_sectors dirty",
+            fsck_err(c, inode_i_sectors_dirty_but_clean,
+                     "filesystem marked clean, but inode %llu has i_sectors dirty",
                      u.bi_inum))) {
                s64 sectors;
 
@@ -944,14 +953,14 @@ static int check_inode(struct btree_trans *trans,
                }
 
                u.bi_sectors = sectors;
-               u.bi_flags &= ~BCH_INODE_I_SECTORS_DIRTY;
+               u.bi_flags &= ~BCH_INODE_i_sectors_dirty;
                do_update = true;
        }
 
-       if (u.bi_flags & BCH_INODE_BACKPTR_UNTRUSTED) {
+       if (u.bi_flags & BCH_INODE_backptr_untrusted) {
                u.bi_dir = 0;
                u.bi_dir_offset = 0;
-               u.bi_flags &= ~BCH_INODE_BACKPTR_UNTRUSTED;
+               u.bi_flags &= ~BCH_INODE_backptr_untrusted;
                do_update = true;
        }
 
@@ -1056,10 +1065,11 @@ static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w)
                        return -BCH_ERR_internal_fsck_err;
                }
 
-               if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SECTORS_DIRTY), c,
-                           "inode %llu:%u has incorrect i_sectors: got %llu, should be %llu",
-                           w->last_pos.inode, i->snapshot,
-                           i->inode.bi_sectors, i->count)) {
+               if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_i_sectors_dirty),
+                               c, inode_i_sectors_wrong,
+                               "inode %llu:%u has incorrect i_sectors: got %llu, should be %llu",
+                               w->last_pos.inode, i->snapshot,
+                               i->inode.bi_sectors, i->count)) {
                        i->inode.bi_sectors = i->count;
                        ret = fsck_write_inode(trans, &i->inode, i->snapshot);
                        if (ret)
@@ -1200,7 +1210,8 @@ static int overlapping_extents_found(struct btree_trans *trans,
        prt_printf(&buf, "\n  overwriting %s extent",
                   pos1.snapshot >= pos2.p.snapshot ? "first" : "second");
 
-       if (fsck_err(c, "overlapping extents%s", buf.buf)) {
+       if (fsck_err(c, extent_overlapping,
+                    "overlapping extents%s", buf.buf)) {
                struct btree_iter *old_iter = &iter1;
                struct disk_reservation res = { 0 };
 
@@ -1297,6 +1308,28 @@ err:
        return ret;
 }
 
+static int check_extent_overbig(struct btree_trans *trans, struct btree_iter *iter,
+                               struct bkey_s_c k)
+{
+       struct bch_fs *c = trans->c;
+       struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
+       struct bch_extent_crc_unpacked crc;
+       const union bch_extent_entry *i;
+       unsigned encoded_extent_max_sectors = c->opts.encoded_extent_max >> 9;
+
+       bkey_for_each_crc(k.k, ptrs, crc, i)
+               if (crc_is_encoded(crc) &&
+                   crc.uncompressed_size > encoded_extent_max_sectors) {
+                       struct printbuf buf = PRINTBUF;
+
+                       bch2_bkey_val_to_text(&buf, c, k);
+                       bch_err(c, "overbig encoded extent, please report this:\n  %s", buf.buf);
+                       printbuf_exit(&buf);
+               }
+
+       return 0;
+}
+
 static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
                        struct bkey_s_c k,
                        struct inode_walker *inode,
@@ -1333,7 +1366,7 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
                goto err;
 
        if (k.k->type != KEY_TYPE_whiteout) {
-               if (fsck_err_on(!i, c,
+               if (fsck_err_on(!i, c, extent_in_missing_inode,
                                "extent in missing inode:\n  %s",
                                (printbuf_reset(&buf),
                                 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
@@ -1341,7 +1374,8 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
 
                if (fsck_err_on(i &&
                                !S_ISREG(i->inode.bi_mode) &&
-                               !S_ISLNK(i->inode.bi_mode), c,
+                               !S_ISLNK(i->inode.bi_mode),
+                               c, extent_in_non_reg_inode,
                                "extent in non regular inode mode %o:\n  %s",
                                i->inode.bi_mode,
                                (printbuf_reset(&buf),
@@ -1371,9 +1405,10 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
                        continue;
 
                if (k.k->type != KEY_TYPE_whiteout) {
-                       if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
+                       if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_i_size_dirty) &&
                                        k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9 &&
-                                       !bkey_extent_is_reservation(k), c,
+                                       !bkey_extent_is_reservation(k),
+                                       c, extent_past_end_of_inode,
                                        "extent type past end of inode %llu:%u, i_size %llu\n  %s",
                                        i->inode.bi_inum, i->snapshot, i->inode.bi_size,
                                        (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
@@ -1432,7 +1467,8 @@ int bch2_check_extents(struct bch_fs *c)
                        &res, NULL,
                        BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL, ({
                bch2_disk_reservation_put(c, &res);
-               check_extent(trans, &iter, k, &w, &s, &extent_ends);
+               check_extent(trans, &iter, k, &w, &s, &extent_ends) ?:
+               check_extent_overbig(trans, &iter, k);
        })) ?:
        check_i_sectors(trans, &w);
 
@@ -1446,6 +1482,30 @@ int bch2_check_extents(struct bch_fs *c)
        return ret;
 }
 
+int bch2_check_indirect_extents(struct bch_fs *c)
+{
+       struct btree_trans *trans = bch2_trans_get(c);
+       struct btree_iter iter;
+       struct bkey_s_c k;
+       struct disk_reservation res = { 0 };
+       int ret = 0;
+
+       ret = for_each_btree_key_commit(trans, iter, BTREE_ID_reflink,
+                       POS_MIN,
+                       BTREE_ITER_PREFETCH, k,
+                       &res, NULL,
+                       BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL, ({
+               bch2_disk_reservation_put(c, &res);
+               check_extent_overbig(trans, &iter, k);
+       }));
+
+       bch2_disk_reservation_put(c, &res);
+       bch2_trans_put(trans);
+
+       bch_err_fn(c, ret);
+       return ret;
+}
+
 static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
 {
        struct bch_fs *c = trans->c;
@@ -1470,7 +1530,8 @@ static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
                                continue;
                }
 
-               if (fsck_err_on(i->inode.bi_nlink != i->count, c,
+               if (fsck_err_on(i->inode.bi_nlink != i->count,
+                               c, inode_dir_wrong_nlink,
                                "directory %llu:%u with wrong i_nlink: got %u, should be %llu",
                                w->last_pos.inode, i->snapshot, i->inode.bi_nlink, i->count)) {
                        i->inode.bi_nlink = i->count;
@@ -1514,27 +1575,28 @@ static int check_dirent_target(struct btree_trans *trans,
                backpointer_exists = ret;
                ret = 0;
 
-               if (fsck_err_on(S_ISDIR(target->bi_mode) &&
-                               backpointer_exists, c,
+               if (fsck_err_on(S_ISDIR(target->bi_mode) && backpointer_exists,
+                               c, inode_dir_multiple_links,
                                "directory %llu with multiple links",
                                target->bi_inum)) {
                        ret = __remove_dirent(trans, d.k->p);
                        goto out;
                }
 
-               if (fsck_err_on(backpointer_exists &&
-                               !target->bi_nlink, c,
+               if (fsck_err_on(backpointer_exists && !target->bi_nlink,
+                               c, inode_multiple_links_but_nlink_0,
                                "inode %llu type %s has multiple links but i_nlink 0",
                                target->bi_inum, bch2_d_types[d.v->d_type])) {
                        target->bi_nlink++;
-                       target->bi_flags &= ~BCH_INODE_UNLINKED;
+                       target->bi_flags &= ~BCH_INODE_unlinked;
 
                        ret = __write_inode(trans, target, target_snapshot);
                        if (ret)
                                goto err;
                }
 
-               if (fsck_err_on(!backpointer_exists, c,
+               if (fsck_err_on(!backpointer_exists,
+                               c, inode_wrong_backpointer,
                                "inode %llu:%u has wrong backpointer:\n"
                                "got       %llu:%llu\n"
                                "should be %llu:%llu",
@@ -1552,7 +1614,8 @@ static int check_dirent_target(struct btree_trans *trans,
                }
        }
 
-       if (fsck_err_on(d.v->d_type != inode_d_type(target), c,
+       if (fsck_err_on(d.v->d_type != inode_d_type(target),
+                       c, dirent_d_type_wrong,
                        "incorrect d_type: got %s, should be %s:\n%s",
                        bch2_d_type_str(d.v->d_type),
                        bch2_d_type_str(inode_d_type(target)),
@@ -1576,7 +1639,8 @@ static int check_dirent_target(struct btree_trans *trans,
        if (d.v->d_type == DT_SUBVOL &&
            target->bi_parent_subvol != le32_to_cpu(d.v->d_parent_subvol) &&
            (c->sb.version < bcachefs_metadata_version_subvol_dirent ||
-            fsck_err(c, "dirent has wrong d_parent_subvol field: got %u, should be %u",
+            fsck_err(c, dirent_d_parent_subvol_wrong,
+                     "dirent has wrong d_parent_subvol field: got %u, should be %u",
                      le32_to_cpu(d.v->d_parent_subvol),
                      target->bi_parent_subvol))) {
                n = bch2_trans_kmalloc(trans, bkey_bytes(d.k));
@@ -1648,7 +1712,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
                *hash_info = bch2_hash_info_init(c, &dir->inodes.data[0].inode);
        dir->first_this_inode = false;
 
-       if (fsck_err_on(!i, c,
+       if (fsck_err_on(!i, c, dirent_in_missing_dir_inode,
                        "dirent in nonexisting directory:\n%s",
                        (printbuf_reset(&buf),
                         bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
@@ -1660,7 +1724,8 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
        if (!i)
                goto out;
 
-       if (fsck_err_on(!S_ISDIR(i->inode.bi_mode), c,
+       if (fsck_err_on(!S_ISDIR(i->inode.bi_mode),
+                       c, dirent_in_non_dir_inode,
                        "dirent in non directory inode type %s:\n%s",
                        bch2_d_type_str(inode_d_type(&i->inode)),
                        (printbuf_reset(&buf),
@@ -1694,7 +1759,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
                if (ret && !bch2_err_matches(ret, ENOENT))
                        goto err;
 
-               if (fsck_err_on(ret, c,
+               if (fsck_err_on(ret, c, dirent_to_missing_subvol,
                                "dirent points to missing subvolume %u",
                                le32_to_cpu(d.v->d_child_subvol))) {
                        ret = __remove_dirent(trans, d.k->p);
@@ -1706,7 +1771,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
                if (ret && !bch2_err_matches(ret, ENOENT))
                        goto err;
 
-               if (fsck_err_on(ret, c,
+               if (fsck_err_on(ret, c, subvol_to_missing_root,
                                "subvolume %u points to missing subvolume root %llu",
                                target_subvol,
                                target_inum)) {
@@ -1715,7 +1780,8 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
                        goto err;
                }
 
-               if (fsck_err_on(subvol_root.bi_subvol != target_subvol, c,
+               if (fsck_err_on(subvol_root.bi_subvol != target_subvol,
+                               c, subvol_root_wrong_bi_subvol,
                                "subvol root %llu has wrong bi_subvol field: got %u, should be %u",
                                target_inum,
                                subvol_root.bi_subvol, target_subvol)) {
@@ -1734,7 +1800,8 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
                if (ret)
                        goto err;
 
-               if (fsck_err_on(!target->inodes.nr, c,
+               if (fsck_err_on(!target->inodes.nr,
+                               c, dirent_to_missing_inode,
                                "dirent points to missing inode: (equiv %u)\n%s",
                                equiv.snapshot,
                                (printbuf_reset(&buf),
@@ -1820,7 +1887,7 @@ static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
                *hash_info = bch2_hash_info_init(c, &inode->inodes.data[0].inode);
        inode->first_this_inode = false;
 
-       if (fsck_err_on(!i, c,
+       if (fsck_err_on(!i, c, xattr_in_missing_inode,
                        "xattr for missing inode %llu",
                        k.k->p.inode))
                return bch2_btree_delete_at(trans, iter, 0);
@@ -1869,7 +1936,8 @@ static int check_root_trans(struct btree_trans *trans)
        if (ret && !bch2_err_matches(ret, ENOENT))
                return ret;
 
-       if (mustfix_fsck_err_on(ret, c, "root subvol missing")) {
+       if (mustfix_fsck_err_on(ret, c, root_subvol_missing,
+                               "root subvol missing")) {
                struct bkey_i_subvolume root_subvol;
 
                snapshot        = U32_MAX;
@@ -1895,8 +1963,10 @@ static int check_root_trans(struct btree_trans *trans)
        if (ret && !bch2_err_matches(ret, ENOENT))
                return ret;
 
-       if (mustfix_fsck_err_on(ret, c, "root directory missing") ||
-           mustfix_fsck_err_on(!S_ISDIR(root_inode.bi_mode), c,
+       if (mustfix_fsck_err_on(ret, c, root_dir_missing,
+                               "root directory missing") ||
+           mustfix_fsck_err_on(!S_ISDIR(root_inode.bi_mode),
+                               c, root_inode_not_dir,
                                "root inode not a directory")) {
                bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755,
                                0, NULL);
@@ -2000,7 +2070,8 @@ static int check_path(struct btree_trans *trans,
                }
 
                if (bch2_err_matches(ret, ENOENT)) {
-                       if (fsck_err(c,  "unreachable inode %llu:%u, type %s nlink %u backptr %llu:%llu",
+                       if (fsck_err(c,  inode_unreachable,
+                                    "unreachable inode %llu:%u, type %s nlink %u backptr %llu:%llu",
                                     inode->bi_inum, snapshot,
                                     bch2_d_type_str(inode_d_type(inode)),
                                     inode->bi_nlink,
@@ -2040,7 +2111,8 @@ static int check_path(struct btree_trans *trans,
                                pr_err("%llu:%u", i->inum, i->snapshot);
                        pr_err("%llu:%u", inode->bi_inum, snapshot);
 
-                       if (!fsck_err(c, "directory structure loop"))
+                       if (!fsck_err(c, dir_loop,
+                                     "directory structure loop"))
                                return 0;
 
                        ret = commit_do(trans, NULL, NULL,
@@ -2088,7 +2160,7 @@ int bch2_check_directory_structure(struct bch_fs *c)
                        break;
                }
 
-               if (u.bi_flags & BCH_INODE_UNLINKED)
+               if (u.bi_flags & BCH_INODE_unlinked)
                        continue;
 
                ret = check_path(trans, &path, &u, iter.pos.snapshot);
@@ -2300,7 +2372,8 @@ static int check_nlinks_update_inode(struct btree_trans *trans, struct btree_ite
                link = &links->d[++*idx];
        }
 
-       if (fsck_err_on(bch2_inode_nlink_get(&u) != link->count, c,
+       if (fsck_err_on(bch2_inode_nlink_get(&u) != link->count,
+                       c, inode_wrong_nlink,
                        "inode %llu type %s has wrong i_nlink (%u, should be %u)",
                        u.bi_inum, bch2_d_types[mode_to_type(u.bi_mode)],
                        bch2_inode_nlink_get(&u), link->count)) {
index 90c87b5089a01403bceabeeed390fa3adfea760e..da991e8cf27eb493ed5aac5a3e3da606ae089968 100644 (file)
@@ -4,6 +4,7 @@
 
 int bch2_check_inodes(struct bch_fs *);
 int bch2_check_extents(struct bch_fs *);
+int bch2_check_indirect_extents(struct bch_fs *);
 int bch2_check_dirents(struct bch_fs *);
 int bch2_check_xattrs(struct bch_fs *);
 int bch2_check_root(struct bch_fs *);
index bb3f443d8381cc1dd087e961a593a989146e6014..def77f2d88024b788b6ce7c03bda07827f73a621 100644 (file)
@@ -6,6 +6,7 @@
 #include "bkey_methods.h"
 #include "btree_update.h"
 #include "buckets.h"
+#include "compress.h"
 #include "error.h"
 #include "extents.h"
 #include "extent_update.h"
 
 #include <asm/unaligned.h>
 
-const char * const bch2_inode_opts[] = {
 #define x(name, ...)   #name,
+const char * const bch2_inode_opts[] = {
        BCH_INODE_OPTS()
-#undef  x
        NULL,
 };
 
+static const char * const bch2_inode_flag_strs[] = {
+       BCH_INODE_FLAGS()
+       NULL
+};
+#undef  x
+
 static const u8 byte_table[8] = { 1, 2, 3, 4, 6, 8, 10, 13 };
 
 static int inode_decode_field(const u8 *in, const u8 *end,
@@ -361,9 +367,10 @@ int bch2_inode_peek(struct btree_trans *trans,
        return ret;
 }
 
-int bch2_inode_write(struct btree_trans *trans,
+int bch2_inode_write_flags(struct btree_trans *trans,
                     struct btree_iter *iter,
-                    struct bch_inode_unpacked *inode)
+                    struct bch_inode_unpacked *inode,
+                    enum btree_update_flags flags)
 {
        struct bkey_inode_buf *inode_p;
 
@@ -373,7 +380,7 @@ int bch2_inode_write(struct btree_trans *trans,
 
        bch2_inode_pack_inlined(inode_p, inode);
        inode_p->inode.k.p.snapshot = iter->snapshot;
-       return bch2_trans_update(trans, iter, &inode_p->inode.k_i, 0);
+       return bch2_trans_update(trans, iter, &inode_p->inode.k_i, flags);
 }
 
 struct bkey_i *bch2_inode_to_v3(struct btree_trans *trans, struct bkey_i *k)
@@ -397,117 +404,121 @@ struct bkey_i *bch2_inode_to_v3(struct btree_trans *trans, struct bkey_i *k)
        return &inode_p->inode.k_i;
 }
 
-static int __bch2_inode_invalid(struct bkey_s_c k, struct printbuf *err)
+static int __bch2_inode_invalid(struct bch_fs *c, struct bkey_s_c k, struct printbuf *err)
 {
        struct bch_inode_unpacked unpacked;
+       int ret = 0;
 
-       if (k.k->p.inode) {
-               prt_printf(err, "nonzero k.p.inode");
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       if (k.k->p.offset < BLOCKDEV_INODE_MAX) {
-               prt_printf(err, "fs inode in blockdev range");
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(k.k->p.inode, c, err,
+                        inode_pos_inode_nonzero,
+                        "nonzero k.p.inode");
 
-       if (bch2_inode_unpack(k, &unpacked)) {
-               prt_printf(err, "invalid variable length fields");
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(k.k->p.offset < BLOCKDEV_INODE_MAX, c, err,
+                        inode_pos_blockdev_range,
+                        "fs inode in blockdev range");
 
-       if (unpacked.bi_data_checksum >= BCH_CSUM_OPT_NR + 1) {
-               prt_printf(err, "invalid data checksum type (%u >= %u",
-                       unpacked.bi_data_checksum, BCH_CSUM_OPT_NR + 1);
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(bch2_inode_unpack(k, &unpacked), c, err,
+                        inode_unpack_error,
+                        "invalid variable length fields");
 
-       if (unpacked.bi_compression >= BCH_COMPRESSION_OPT_NR + 1) {
-               prt_printf(err, "invalid data checksum type (%u >= %u)",
-                      unpacked.bi_compression, BCH_COMPRESSION_OPT_NR + 1);
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(unpacked.bi_data_checksum >= BCH_CSUM_OPT_NR + 1, c, err,
+                        inode_checksum_type_invalid,
+                        "invalid data checksum type (%u >= %u",
+                        unpacked.bi_data_checksum, BCH_CSUM_OPT_NR + 1);
 
-       if ((unpacked.bi_flags & BCH_INODE_UNLINKED) &&
-           unpacked.bi_nlink != 0) {
-               prt_printf(err, "flagged as unlinked but bi_nlink != 0");
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(unpacked.bi_compression &&
+                        !bch2_compression_opt_valid(unpacked.bi_compression - 1), c, err,
+                        inode_compression_type_invalid,
+                        "invalid compression opt %u", unpacked.bi_compression - 1);
 
-       if (unpacked.bi_subvol && !S_ISDIR(unpacked.bi_mode)) {
-               prt_printf(err, "subvolume root but not a directory");
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on((unpacked.bi_flags & BCH_INODE_unlinked) &&
+                        unpacked.bi_nlink != 0, c, err,
+                        inode_unlinked_but_nlink_nonzero,
+                        "flagged as unlinked but bi_nlink != 0");
 
-       return 0;
+       bkey_fsck_err_on(unpacked.bi_subvol && !S_ISDIR(unpacked.bi_mode), c, err,
+                        inode_subvol_root_but_not_dir,
+                        "subvolume root but not a directory");
+fsck_err:
+       return ret;
 }
 
-int bch2_inode_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_inode_invalid(struct bch_fs *c, struct bkey_s_c k,
                       enum bkey_invalid_flags flags,
                       struct printbuf *err)
 {
        struct bkey_s_c_inode inode = bkey_s_c_to_inode(k);
+       int ret = 0;
 
-       if (INODE_STR_HASH(inode.v) >= BCH_STR_HASH_NR) {
-               prt_printf(err, "invalid str hash type (%llu >= %u)",
-                      INODE_STR_HASH(inode.v), BCH_STR_HASH_NR);
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(INODE_STR_HASH(inode.v) >= BCH_STR_HASH_NR, c, err,
+                        inode_str_hash_invalid,
+                        "invalid str hash type (%llu >= %u)",
+                        INODE_STR_HASH(inode.v), BCH_STR_HASH_NR);
 
-       return __bch2_inode_invalid(k, err);
+       ret = __bch2_inode_invalid(c, k, err);
+fsck_err:
+       return ret;
 }
 
-int bch2_inode_v2_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_inode_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
                          enum bkey_invalid_flags flags,
                          struct printbuf *err)
 {
        struct bkey_s_c_inode_v2 inode = bkey_s_c_to_inode_v2(k);
+       int ret = 0;
 
-       if (INODEv2_STR_HASH(inode.v) >= BCH_STR_HASH_NR) {
-               prt_printf(err, "invalid str hash type (%llu >= %u)",
-                      INODEv2_STR_HASH(inode.v), BCH_STR_HASH_NR);
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(INODEv2_STR_HASH(inode.v) >= BCH_STR_HASH_NR, c, err,
+                        inode_str_hash_invalid,
+                        "invalid str hash type (%llu >= %u)",
+                        INODEv2_STR_HASH(inode.v), BCH_STR_HASH_NR);
 
-       return __bch2_inode_invalid(k, err);
+       ret = __bch2_inode_invalid(c, k, err);
+fsck_err:
+       return ret;
 }
 
-int bch2_inode_v3_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_inode_v3_invalid(struct bch_fs *c, struct bkey_s_c k,
                          enum bkey_invalid_flags flags,
                          struct printbuf *err)
 {
        struct bkey_s_c_inode_v3 inode = bkey_s_c_to_inode_v3(k);
+       int ret = 0;
 
-       if (INODEv3_FIELDS_START(inode.v) < INODEv3_FIELDS_START_INITIAL ||
-           INODEv3_FIELDS_START(inode.v) > bkey_val_u64s(inode.k)) {
-               prt_printf(err, "invalid fields_start (got %llu, min %u max %zu)",
-                      INODEv3_FIELDS_START(inode.v),
-                      INODEv3_FIELDS_START_INITIAL,
-                      bkey_val_u64s(inode.k));
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(INODEv3_FIELDS_START(inode.v) < INODEv3_FIELDS_START_INITIAL ||
+                        INODEv3_FIELDS_START(inode.v) > bkey_val_u64s(inode.k), c, err,
+                        inode_v3_fields_start_bad,
+                        "invalid fields_start (got %llu, min %u max %zu)",
+                        INODEv3_FIELDS_START(inode.v),
+                        INODEv3_FIELDS_START_INITIAL,
+                        bkey_val_u64s(inode.k));
 
-       if (INODEv3_STR_HASH(inode.v) >= BCH_STR_HASH_NR) {
-               prt_printf(err, "invalid str hash type (%llu >= %u)",
-                      INODEv3_STR_HASH(inode.v), BCH_STR_HASH_NR);
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(INODEv3_STR_HASH(inode.v) >= BCH_STR_HASH_NR, c, err,
+                        inode_str_hash_invalid,
+                        "invalid str hash type (%llu >= %u)",
+                        INODEv3_STR_HASH(inode.v), BCH_STR_HASH_NR);
 
-       return __bch2_inode_invalid(k, err);
+       ret = __bch2_inode_invalid(c, k, err);
+fsck_err:
+       return ret;
 }
 
 static void __bch2_inode_unpacked_to_text(struct printbuf *out,
                                          struct bch_inode_unpacked *inode)
 {
-       prt_printf(out, "mode %o flags %x journal_seq %llu bi_size %llu bi_sectors %llu bi_version %llu",
-              inode->bi_mode, inode->bi_flags,
+       prt_printf(out, "mode=%o ", inode->bi_mode);
+
+       prt_str(out, "flags=");
+       prt_bitflags(out, bch2_inode_flag_strs, inode->bi_flags & ((1U << 20) - 1));
+       prt_printf(out, " (%x)", inode->bi_flags);
+
+       prt_printf(out, " journal_seq=%llu bi_size=%llu bi_sectors=%llu bi_version=%llu",
               inode->bi_journal_seq,
               inode->bi_size,
               inode->bi_sectors,
               inode->bi_version);
 
 #define x(_name, _bits)                                                \
-       prt_printf(out, " "#_name " %llu", (u64) inode->_name);
+       prt_printf(out, " "#_name "=%llu", (u64) inode->_name);
        BCH_INODE_FIELDS_v3()
 #undef  x
 }
@@ -546,7 +557,7 @@ static inline u64 bkey_inode_flags(struct bkey_s_c k)
 
 static inline bool bkey_is_deleted_inode(struct bkey_s_c k)
 {
-       return bkey_inode_flags(k) & BCH_INODE_UNLINKED;
+       return bkey_inode_flags(k) & BCH_INODE_unlinked;
 }
 
 int bch2_trans_mark_inode(struct btree_trans *trans,
@@ -610,16 +621,17 @@ int bch2_mark_inode(struct btree_trans *trans,
        return 0;
 }
 
-int bch2_inode_generation_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_inode_generation_invalid(struct bch_fs *c, struct bkey_s_c k,
                                  enum bkey_invalid_flags flags,
                                  struct printbuf *err)
 {
-       if (k.k->p.inode) {
-               prt_printf(err, "nonzero k.p.inode");
-               return -BCH_ERR_invalid_bkey;
-       }
+       int ret = 0;
 
-       return 0;
+       bkey_fsck_err_on(k.k->p.inode, c, err,
+                        inode_pos_inode_nonzero,
+                        "nonzero k.p.inode");
+fsck_err:
+       return ret;
 }
 
 void bch2_inode_generation_to_text(struct printbuf *out, struct bch_fs *c,
@@ -926,8 +938,8 @@ int bch2_inode_find_by_inum(struct bch_fs *c, subvol_inum inum,
 
 int bch2_inode_nlink_inc(struct bch_inode_unpacked *bi)
 {
-       if (bi->bi_flags & BCH_INODE_UNLINKED)
-               bi->bi_flags &= ~BCH_INODE_UNLINKED;
+       if (bi->bi_flags & BCH_INODE_unlinked)
+               bi->bi_flags &= ~BCH_INODE_unlinked;
        else {
                if (bi->bi_nlink == U32_MAX)
                        return -EINVAL;
@@ -940,13 +952,13 @@ int bch2_inode_nlink_inc(struct bch_inode_unpacked *bi)
 
 void bch2_inode_nlink_dec(struct btree_trans *trans, struct bch_inode_unpacked *bi)
 {
-       if (bi->bi_nlink && (bi->bi_flags & BCH_INODE_UNLINKED)) {
+       if (bi->bi_nlink && (bi->bi_flags & BCH_INODE_unlinked)) {
                bch2_trans_inconsistent(trans, "inode %llu unlinked but link count nonzero",
                                        bi->bi_inum);
                return;
        }
 
-       if (bi->bi_flags & BCH_INODE_UNLINKED) {
+       if (bi->bi_flags & BCH_INODE_unlinked) {
                bch2_trans_inconsistent(trans, "inode %llu link count underflow", bi->bi_inum);
                return;
        }
@@ -954,7 +966,7 @@ void bch2_inode_nlink_dec(struct btree_trans *trans, struct bch_inode_unpacked *
        if (bi->bi_nlink)
                bi->bi_nlink--;
        else
-               bi->bi_flags |= BCH_INODE_UNLINKED;
+               bi->bi_flags |= BCH_INODE_unlinked;
 }
 
 struct bch_opts bch2_inode_opts_to_opts(struct bch_inode_unpacked *inode)
@@ -979,6 +991,18 @@ void bch2_inode_opts_get(struct bch_io_opts *opts, struct bch_fs *c,
                opts->compression = opts->background_compression = opts->data_checksum = opts->erasure_code = 0;
 }
 
+int bch2_inum_opts_get(struct btree_trans *trans, subvol_inum inum, struct bch_io_opts *opts)
+{
+       struct bch_inode_unpacked inode;
+       int ret = lockrestart_do(trans, bch2_inode_find_by_inum_trans(trans, inum, &inode));
+
+       if (ret)
+               return ret;
+
+       bch2_inode_opts_get(opts, trans->c, &inode);
+       return 0;
+}
+
 int bch2_inode_rm_snapshot(struct btree_trans *trans, u64 inum, u32 snapshot)
 {
        struct bch_fs *c = trans->c;
@@ -1042,53 +1066,85 @@ err:
        return ret ?: -BCH_ERR_transaction_restart_nested;
 }
 
-static int may_delete_deleted_inode(struct btree_trans *trans, struct bpos pos)
+static int may_delete_deleted_inode(struct btree_trans *trans,
+                                   struct btree_iter *iter,
+                                   struct bpos pos,
+                                   bool *need_another_pass)
 {
        struct bch_fs *c = trans->c;
-       struct btree_iter iter;
+       struct btree_iter inode_iter;
        struct bkey_s_c k;
        struct bch_inode_unpacked inode;
        int ret;
 
-       if (bch2_snapshot_is_internal_node(c, pos.snapshot))
-               return 0;
-
-       if (!fsck_err_on(c->sb.clean, c,
-                        "filesystem marked as clean but have deleted inode %llu:%u",
-                        pos.offset, pos.snapshot))
-               return 0;
-
-       k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes, pos, BTREE_ITER_CACHED);
+       k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, pos, BTREE_ITER_CACHED);
        ret = bkey_err(k);
        if (ret)
                return ret;
 
        ret = bkey_is_inode(k.k) ? 0 : -BCH_ERR_ENOENT_inode;
        if (fsck_err_on(!bkey_is_inode(k.k), c,
+                       deleted_inode_missing,
                        "nonexistent inode %llu:%u in deleted_inodes btree",
                        pos.offset, pos.snapshot))
                goto delete;
 
        ret = bch2_inode_unpack(k, &inode);
        if (ret)
-               goto err;
+               goto out;
 
        if (fsck_err_on(S_ISDIR(inode.bi_mode), c,
+                       deleted_inode_is_dir,
                        "directory %llu:%u in deleted_inodes btree",
                        pos.offset, pos.snapshot))
                goto delete;
 
-       if (fsck_err_on(!(inode.bi_flags & BCH_INODE_UNLINKED), c,
+       if (fsck_err_on(!(inode.bi_flags & BCH_INODE_unlinked), c,
+                       deleted_inode_not_unlinked,
                        "non-deleted inode %llu:%u in deleted_inodes btree",
                        pos.offset, pos.snapshot))
                goto delete;
 
-       return 1;
-err:
+       if (c->sb.clean &&
+           !fsck_err(c,
+                     deleted_inode_but_clean,
+                     "filesystem marked as clean but have deleted inode %llu:%u",
+                     pos.offset, pos.snapshot)) {
+               ret = 0;
+               goto out;
+       }
+
+       if (bch2_snapshot_is_internal_node(c, pos.snapshot)) {
+               struct bpos new_min_pos;
+
+               ret = bch2_propagate_key_to_snapshot_leaves(trans, inode_iter.btree_id, k, &new_min_pos);
+               if (ret)
+                       goto out;
+
+               inode.bi_flags &= ~BCH_INODE_unlinked;
+
+               ret = bch2_inode_write_flags(trans, &inode_iter, &inode,
+                                            BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
+               bch_err_msg(c, ret, "clearing inode unlinked flag");
+               if (ret)
+                       goto out;
+
+               /*
+                * We'll need another write buffer flush to pick up the new
+                * unlinked inodes in the snapshot leaves:
+                */
+               *need_another_pass = true;
+               return 0;
+       }
+
+       ret = 1;
+out:
 fsck_err:
+       bch2_trans_iter_exit(trans, &inode_iter);
        return ret;
 delete:
-       return bch2_btree_bit_mod(trans, BTREE_ID_deleted_inodes, pos, false);
+       ret = bch2_btree_bit_mod(trans, BTREE_ID_deleted_inodes, pos, false);
+       goto out;
 }
 
 int bch2_delete_dead_inodes(struct bch_fs *c)
@@ -1096,7 +1152,10 @@ int bch2_delete_dead_inodes(struct bch_fs *c)
        struct btree_trans *trans = bch2_trans_get(c);
        struct btree_iter iter;
        struct bkey_s_c k;
+       bool need_another_pass;
        int ret;
+again:
+       need_another_pass = false;
 
        ret = bch2_btree_write_buffer_flush_sync(trans);
        if (ret)
@@ -1110,7 +1169,8 @@ int bch2_delete_dead_inodes(struct bch_fs *c)
         */
        for_each_btree_key(trans, iter, BTREE_ID_deleted_inodes, POS_MIN,
                           BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
-               ret = lockrestart_do(trans, may_delete_deleted_inode(trans, k.k->p));
+               ret = lockrestart_do(trans, may_delete_deleted_inode(trans, &iter, k.k->p,
+                                                                    &need_another_pass));
                if (ret < 0)
                        break;
 
@@ -1120,12 +1180,17 @@ int bch2_delete_dead_inodes(struct bch_fs *c)
                                bch2_fs_lazy_rw(c);
                        }
 
+                       bch_verbose(c, "deleting unlinked inode %llu:%u", k.k->p.offset, k.k->p.snapshot);
+
                        ret = bch2_inode_rm_snapshot(trans, k.k->p.offset, k.k->p.snapshot);
                        if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
                                break;
                }
        }
        bch2_trans_iter_exit(trans, &iter);
+
+       if (!ret && need_another_pass)
+               goto again;
 err:
        bch2_trans_put(trans);
 
index a7464e1b696046a074f10f2a72cc38163718f041..88818a332b1e5fcaa5fd9b350d958ef582c05161 100644 (file)
@@ -3,16 +3,17 @@
 #define _BCACHEFS_INODE_H
 
 #include "bkey.h"
+#include "bkey_methods.h"
 #include "opts.h"
 
 enum bkey_invalid_flags;
 extern const char * const bch2_inode_opts[];
 
-int bch2_inode_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_inode_invalid(struct bch_fs *, struct bkey_s_c,
                       enum bkey_invalid_flags, struct printbuf *);
-int bch2_inode_v2_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_inode_v2_invalid(struct bch_fs *, struct bkey_s_c,
                          enum bkey_invalid_flags, struct printbuf *);
-int bch2_inode_v3_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_inode_v3_invalid(struct bch_fs *, struct bkey_s_c,
                          enum bkey_invalid_flags, struct printbuf *);
 void bch2_inode_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
@@ -52,7 +53,7 @@ static inline bool bkey_is_inode(const struct bkey *k)
                k->type == KEY_TYPE_inode_v3;
 }
 
-int bch2_inode_generation_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_inode_generation_invalid(struct bch_fs *, struct bkey_s_c,
                                  enum bkey_invalid_flags, struct printbuf *);
 void bch2_inode_generation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
@@ -101,8 +102,16 @@ void bch2_inode_unpacked_to_text(struct printbuf *, struct bch_inode_unpacked *)
 
 int bch2_inode_peek(struct btree_trans *, struct btree_iter *,
                    struct bch_inode_unpacked *, subvol_inum, unsigned);
-int bch2_inode_write(struct btree_trans *, struct btree_iter *,
-                    struct bch_inode_unpacked *);
+
+int bch2_inode_write_flags(struct btree_trans *, struct btree_iter *,
+                    struct bch_inode_unpacked *, enum btree_update_flags);
+
+static inline int bch2_inode_write(struct btree_trans *trans,
+                    struct btree_iter *iter,
+                    struct bch_inode_unpacked *inode)
+{
+       return bch2_inode_write_flags(trans, iter, inode, 0);
+}
 
 void bch2_inode_init_early(struct bch_fs *,
                           struct bch_inode_unpacked *);
@@ -177,7 +186,7 @@ static inline unsigned nlink_bias(umode_t mode)
 
 static inline unsigned bch2_inode_nlink_get(struct bch_inode_unpacked *bi)
 {
-       return bi->bi_flags & BCH_INODE_UNLINKED
+       return bi->bi_flags & BCH_INODE_unlinked
                  ? 0
                  : bi->bi_nlink + nlink_bias(bi->bi_mode);
 }
@@ -187,10 +196,10 @@ static inline void bch2_inode_nlink_set(struct bch_inode_unpacked *bi,
 {
        if (nlink) {
                bi->bi_nlink = nlink - nlink_bias(bi->bi_mode);
-               bi->bi_flags &= ~BCH_INODE_UNLINKED;
+               bi->bi_flags &= ~BCH_INODE_unlinked;
        } else {
                bi->bi_nlink = 0;
-               bi->bi_flags |= BCH_INODE_UNLINKED;
+               bi->bi_flags |= BCH_INODE_unlinked;
        }
 }
 
@@ -200,6 +209,7 @@ void bch2_inode_nlink_dec(struct btree_trans *, struct bch_inode_unpacked *);
 struct bch_opts bch2_inode_opts_to_opts(struct bch_inode_unpacked *);
 void bch2_inode_opts_get(struct bch_io_opts *, struct bch_fs *,
                         struct bch_inode_unpacked *);
+int bch2_inum_opts_get(struct btree_trans*, subvol_inum, struct bch_io_opts *);
 
 int bch2_inode_rm_snapshot(struct btree_trans *, u64, u32);
 int bch2_delete_dead_inodes(struct bch_fs *);
index 119834cb8f9ee7f80c10b504ca8602ae5614ac37..bebc11444ef5ec598ef83c475716ea789b33bf69 100644 (file)
 #include "io_misc.h"
 #include "io_write.h"
 #include "logged_ops.h"
+#include "rebalance.h"
 #include "subvolume.h"
 
 /* Overwrites whatever was present with zeroes: */
 int bch2_extent_fallocate(struct btree_trans *trans,
                          subvol_inum inum,
                          struct btree_iter *iter,
-                         unsigned sectors,
+                         u64 sectors,
                          struct bch_io_opts opts,
                          s64 *i_sectors_delta,
                          struct write_point_specifier write_point)
@@ -104,7 +105,7 @@ int bch2_extent_fallocate(struct btree_trans *trans,
                if (ret)
                        goto err;
 
-               sectors = min(sectors, wp->sectors_free);
+               sectors = min_t(u64, sectors, wp->sectors_free);
                sectors_allocated = sectors;
 
                bch2_key_resize(&e->k, sectors);
@@ -355,6 +356,7 @@ static int __bch2_resume_logged_op_finsert(struct btree_trans *trans,
        struct btree_iter iter;
        struct bkey_i_logged_op_finsert *op = bkey_i_to_logged_op_finsert(op_k);
        subvol_inum inum = { le32_to_cpu(op->v.subvol), le64_to_cpu(op->v.inum) };
+       struct bch_io_opts opts;
        u64 dst_offset = le64_to_cpu(op->v.dst_offset);
        u64 src_offset = le64_to_cpu(op->v.src_offset);
        s64 shift = dst_offset - src_offset;
@@ -363,6 +365,10 @@ static int __bch2_resume_logged_op_finsert(struct btree_trans *trans,
        bool insert = shift > 0;
        int ret = 0;
 
+       ret = bch2_inum_opts_get(trans, inum, &opts);
+       if (ret)
+               return ret;
+
        bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
                             POS(inum.inum, 0),
                             BTREE_ITER_INTENT);
@@ -443,7 +449,10 @@ case LOGGED_OP_FINSERT_shift_extents:
 
                op->v.pos = cpu_to_le64(insert ? bkey_start_offset(&delete.k) : delete.k.p.offset);
 
-               ret =   bch2_btree_insert_trans(trans, BTREE_ID_extents, &delete, 0) ?:
+               ret =   bch2_bkey_set_needs_rebalance(c, copy,
+                                       opts.background_target,
+                                       opts.background_compression) ?:
+                       bch2_btree_insert_trans(trans, BTREE_ID_extents, &delete, 0) ?:
                        bch2_btree_insert_trans(trans, BTREE_ID_extents, copy, 0) ?:
                        bch2_logged_op_update(trans, &op->k_i) ?:
                        bch2_trans_commit(trans, &disk_res, NULL, BTREE_INSERT_NOFAIL);
index c9e6ed40e1b80c6582d40d0db40b9c1e3eef807d..9cb44a7c43c1714678ef27d4ead33b29ac567849 100644 (file)
@@ -3,7 +3,7 @@
 #define _BCACHEFS_IO_MISC_H
 
 int bch2_extent_fallocate(struct btree_trans *, subvol_inum, struct btree_iter *,
-                         unsigned, struct bch_io_opts, s64 *,
+                         u64, struct bch_io_opts, s64 *,
                          struct write_point_specifier);
 int bch2_fpunch_at(struct btree_trans *, struct btree_iter *,
                   subvol_inum, u64, s64 *);
index 443c3ea655271efc649bb425d752ef9d447f4dd4..a56ed553dc15e6c709c5fed992d0a5b097170703 100644 (file)
@@ -643,7 +643,7 @@ csum_err:
                "data checksum error: expected %0llx:%0llx got %0llx:%0llx (type %s)",
                rbio->pick.crc.csum.hi, rbio->pick.crc.csum.lo,
                csum.hi, csum.lo, bch2_csum_types[crc.csum_type]);
-       bch2_io_error(ca);
+       bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
        bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
        goto out;
 decompression_err:
@@ -677,7 +677,7 @@ static void bch2_read_endio(struct bio *bio)
        if (!rbio->split)
                rbio->bio.bi_end_io = rbio->end_io;
 
-       if (bch2_dev_inum_io_err_on(bio->bi_status, ca,
+       if (bch2_dev_inum_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_read,
                                    rbio->read_pos.inode,
                                    rbio->read_pos.offset,
                                    "data read error: %s",
@@ -1025,7 +1025,7 @@ get_bio:
                trans->notrace_relock_fail = true;
        } else {
                /* Attempting reconstruct read: */
-               if (bch2_ec_read_extent(c, rbio)) {
+               if (bch2_ec_read_extent(trans, rbio)) {
                        bch2_rbio_error(rbio, READ_RETRY_AVOID, BLK_STS_IOERR);
                        goto out;
                }
index 6e4f85eb6ec8ddbaa81b4201d7660712f65474a0..f02b3f7d26a016594c3de2a8f25b0006638b91c2 100644 (file)
@@ -202,6 +202,17 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
        struct btree_iter iter;
        struct bkey_i *k;
        struct bkey_i_inode_v3 *inode;
+       /*
+        * Crazy performance optimization:
+        * Every extent update needs to also update the inode: the inode trigger
+        * will set bi->journal_seq to the journal sequence number of this
+        * transaction - for fsync.
+        *
+        * But if that's the only reason we're updating the inode (we're not
+        * updating bi_size or bi_sectors), then we don't need the inode update
+        * to be journalled - if we crash, the bi_journal_seq update will be
+        * lost, but that's fine.
+        */
        unsigned inode_update_flags = BTREE_UPDATE_NOJOURNAL;
        int ret;
 
@@ -223,7 +234,7 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
 
        inode = bkey_i_to_inode_v3(k);
 
-       if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_I_SIZE_DIRTY) &&
+       if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_i_size_dirty) &&
            new_i_size > le64_to_cpu(inode->v.bi_size)) {
                inode->v.bi_size = cpu_to_le64(new_i_size);
                inode_update_flags = 0;
@@ -351,10 +362,13 @@ static int bch2_write_index_default(struct bch_write_op *op)
                                     bkey_start_pos(&sk.k->k),
                                     BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
 
-               ret = bch2_extent_update(trans, inum, &iter, sk.k,
-                                        &op->res,
-                                        op->new_i_size, &op->i_sectors_delta,
-                                        op->flags & BCH_WRITE_CHECK_ENOSPC);
+               ret =   bch2_bkey_set_needs_rebalance(c, sk.k,
+                                       op->opts.background_target,
+                                       op->opts.background_compression) ?:
+                       bch2_extent_update(trans, inum, &iter, sk.k,
+                                       &op->res,
+                                       op->new_i_size, &op->i_sectors_delta,
+                                       op->flags & BCH_WRITE_CHECK_ENOSPC);
                bch2_trans_iter_exit(trans, &iter);
 
                if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
@@ -495,7 +509,6 @@ static void __bch2_write_index(struct bch_write_op *op)
 {
        struct bch_fs *c = op->c;
        struct keylist *keys = &op->insert_keys;
-       struct bkey_i *k;
        unsigned dev;
        int ret = 0;
 
@@ -505,14 +518,6 @@ static void __bch2_write_index(struct bch_write_op *op)
                        goto err;
        }
 
-       /*
-        * probably not the ideal place to hook this in, but I don't
-        * particularly want to plumb io_opts all the way through the btree
-        * update stack right now
-        */
-       for_each_keylist_key(keys, k)
-               bch2_rebalance_add_key(c, bkey_i_to_s_c(k), &op->opts);
-
        if (!bch2_keylist_empty(keys)) {
                u64 sectors_start = keylist_sectors(keys);
 
@@ -643,7 +648,7 @@ static void bch2_write_endio(struct bio *bio)
        struct bch_fs *c                = wbio->c;
        struct bch_dev *ca              = bch_dev_bkey_exists(c, wbio->dev);
 
-       if (bch2_dev_inum_io_err_on(bio->bi_status, ca,
+       if (bch2_dev_inum_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
                                    op->pos.inode,
                                    wbio->inode_offset << 9,
                                    "data write error: %s",
@@ -816,6 +821,7 @@ static enum prep_encoded_ret {
 
        /* Can we just write the entire extent as is? */
        if (op->crc.uncompressed_size == op->crc.live_size &&
+           op->crc.uncompressed_size <= c->opts.encoded_extent_max >> 9 &&
            op->crc.compressed_size <= wp->sectors_free &&
            (op->crc.compression_type == bch2_compression_opt_to_type(op->compression_opt) ||
             op->incompressible)) {
@@ -1091,9 +1097,7 @@ static bool bch2_extent_is_writeable(struct bch_write_op *op,
 
        e = bkey_s_c_to_extent(k);
        extent_for_each_ptr_decode(e, p, entry) {
-               if (p.crc.csum_type ||
-                   crc_is_compressed(p.crc) ||
-                   p.has_ec)
+               if (crc_is_encoded(p.crc) || p.has_ec)
                        return false;
 
                replicas += bch2_extent_ptr_durability(c, &p);
index 0e7a9ffa3671f729459a3f1a6032021e09937925..5b5d69f2316b216746c0c08db2346c2c8c95ff16 100644 (file)
@@ -1019,6 +1019,25 @@ err:
        return ret;
 }
 
+int bch2_fs_journal_alloc(struct bch_fs *c)
+{
+       struct bch_dev *ca;
+       unsigned i;
+
+       for_each_online_member(ca, c, i) {
+               if (ca->journal.nr)
+                       continue;
+
+               int ret = bch2_dev_journal_alloc(ca);
+               if (ret) {
+                       percpu_ref_put(&ca->io_ref);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
 /* startup/shutdown: */
 
 static bool bch2_journal_writing_to_device(struct journal *j, unsigned dev_idx)
index 491133cc52f3bf38f9c80cf94daf7d0d5b0cc6c1..011711e99c8d825ec968cf513f82c08a66ecabc5 100644 (file)
@@ -534,6 +534,7 @@ bool bch2_journal_seq_pins_to_text(struct printbuf *, struct journal *, u64 *);
 int bch2_set_nr_journal_buckets(struct bch_fs *, struct bch_dev *,
                                unsigned nr);
 int bch2_dev_journal_alloc(struct bch_dev *);
+int bch2_fs_journal_alloc(struct bch_fs *);
 
 void bch2_dev_journal_stop(struct journal *, struct bch_dev *);
 
index 6a3d6a374e9cc4385547eaf8a32894cfda15b49e..f4bc2cdbfdd7921b4d562cf0df7d29b1f8c51c87 100644 (file)
@@ -140,7 +140,8 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
                if (!dup->csum_good)
                        goto replace;
 
-               fsck_err(c, "found duplicate but non identical journal entries (seq %llu)",
+               fsck_err(c, journal_entry_replicas_data_mismatch,
+                        "found duplicate but non identical journal entries (seq %llu)",
                         le64_to_cpu(j->seq));
                i = dup;
                goto found;
@@ -235,7 +236,7 @@ static void journal_entry_err_msg(struct printbuf *out,
        prt_str(out, ": ");
 }
 
-#define journal_entry_err(c, version, jset, entry, msg, ...)           \
+#define journal_entry_err(c, version, jset, entry, _err, msg, ...)     \
 ({                                                                     \
        struct printbuf _buf = PRINTBUF;                                \
                                                                        \
@@ -244,9 +245,10 @@ static void journal_entry_err_msg(struct printbuf *out,
                                                                        \
        switch (flags & BKEY_INVALID_WRITE) {                           \
        case READ:                                                      \
-               mustfix_fsck_err(c, "%s", _buf.buf);                    \
+               mustfix_fsck_err(c, _err, "%s", _buf.buf);              \
                break;                                                  \
        case WRITE:                                                     \
+               bch2_sb_error_count(c, BCH_FSCK_ERR_##_err);            \
                bch_err(c, "corrupt metadata before write: %s\n", _buf.buf);\
                if (bch2_fs_inconsistent(c)) {                          \
                        ret = -BCH_ERR_fsck_errors_not_fixed;           \
@@ -259,8 +261,8 @@ static void journal_entry_err_msg(struct printbuf *out,
        true;                                                           \
 })
 
-#define journal_entry_err_on(cond, c, version, jset, entry, msg, ...)  \
-       ((cond) ? journal_entry_err(c, version, jset, entry, msg, ##__VA_ARGS__) : false)
+#define journal_entry_err_on(cond, ...)                                        \
+       ((cond) ? journal_entry_err(__VA_ARGS__) : false)
 
 #define FSCK_DELETED_KEY       5
 
@@ -277,7 +279,10 @@ static int journal_validate_key(struct bch_fs *c,
        struct printbuf buf = PRINTBUF;
        int ret = 0;
 
-       if (journal_entry_err_on(!k->k.u64s, c, version, jset, entry, "k->u64s 0")) {
+       if (journal_entry_err_on(!k->k.u64s,
+                                c, version, jset, entry,
+                                journal_entry_bkey_u64s_0,
+                                "k->u64s 0")) {
                entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
                journal_entry_null_range(vstruct_next(entry), next);
                return FSCK_DELETED_KEY;
@@ -286,6 +291,7 @@ static int journal_validate_key(struct bch_fs *c,
        if (journal_entry_err_on((void *) bkey_next(k) >
                                 (void *) vstruct_next(entry),
                                 c, version, jset, entry,
+                                journal_entry_bkey_past_end,
                                 "extends past end of journal entry")) {
                entry->u64s = cpu_to_le16((u64 *) k - entry->_data);
                journal_entry_null_range(vstruct_next(entry), next);
@@ -294,6 +300,7 @@ static int journal_validate_key(struct bch_fs *c,
 
        if (journal_entry_err_on(k->k.format != KEY_FORMAT_CURRENT,
                                 c, version, jset, entry,
+                                journal_entry_bkey_bad_format,
                                 "bad format %u", k->k.format)) {
                le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
                memmove(k, bkey_next(k), next - (void *) bkey_next(k));
@@ -317,7 +324,8 @@ static int journal_validate_key(struct bch_fs *c,
                bch2_bkey_invalid(c, bkey_i_to_s_c(k),
                                  __btree_node_type(level, btree_id), write, &buf);
 
-               mustfix_fsck_err(c, "%s", buf.buf);
+               mustfix_fsck_err(c, journal_entry_bkey_invalid,
+                                "%s", buf.buf);
 
                le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
                memmove(k, bkey_next(k), next - (void *) bkey_next(k));
@@ -369,7 +377,7 @@ static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs
                        prt_newline(out);
                        prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);
                }
-               prt_printf(out, "btree=%s l=%u ", bch2_btree_ids[entry->btree_id], entry->level);
+               prt_printf(out, "btree=%s l=%u ", bch2_btree_id_str(entry->btree_id), entry->level);
                bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
                first = false;
        }
@@ -387,6 +395,7 @@ static int journal_entry_btree_root_validate(struct bch_fs *c,
        if (journal_entry_err_on(!entry->u64s ||
                                 le16_to_cpu(entry->u64s) != k->k.u64s,
                                 c, version, jset, entry,
+                                journal_entry_btree_root_bad_size,
                                 "invalid btree root journal entry: wrong number of keys")) {
                void *next = vstruct_next(entry);
                /*
@@ -436,6 +445,7 @@ static int journal_entry_blacklist_validate(struct bch_fs *c,
 
        if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 1,
                                 c, version, jset, entry,
+                                journal_entry_blacklist_bad_size,
                "invalid journal seq blacklist entry: bad size")) {
                journal_entry_null_range(entry, vstruct_next(entry));
        }
@@ -463,6 +473,7 @@ static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
 
        if (journal_entry_err_on(le16_to_cpu(entry->u64s) != 2,
                                 c, version, jset, entry,
+                                journal_entry_blacklist_v2_bad_size,
                "invalid journal seq blacklist entry: bad size")) {
                journal_entry_null_range(entry, vstruct_next(entry));
                goto out;
@@ -473,6 +484,7 @@ static int journal_entry_blacklist_v2_validate(struct bch_fs *c,
        if (journal_entry_err_on(le64_to_cpu(bl_entry->start) >
                                 le64_to_cpu(bl_entry->end),
                                 c, version, jset, entry,
+                                journal_entry_blacklist_v2_start_past_end,
                "invalid journal seq blacklist entry: start > end")) {
                journal_entry_null_range(entry, vstruct_next(entry));
        }
@@ -505,6 +517,7 @@ static int journal_entry_usage_validate(struct bch_fs *c,
 
        if (journal_entry_err_on(bytes < sizeof(*u),
                                 c, version, jset, entry,
+                                journal_entry_usage_bad_size,
                                 "invalid journal entry usage: bad size")) {
                journal_entry_null_range(entry, vstruct_next(entry));
                return ret;
@@ -539,6 +552,7 @@ static int journal_entry_data_usage_validate(struct bch_fs *c,
        if (journal_entry_err_on(bytes < sizeof(*u) ||
                                 bytes < sizeof(*u) + u->r.nr_devs,
                                 c, version, jset, entry,
+                                journal_entry_data_usage_bad_size,
                                 "invalid journal entry usage: bad size")) {
                journal_entry_null_range(entry, vstruct_next(entry));
                return ret;
@@ -570,13 +584,17 @@ static int journal_entry_clock_validate(struct bch_fs *c,
        int ret = 0;
 
        if (journal_entry_err_on(bytes != sizeof(*clock),
-                                c, version, jset, entry, "bad size")) {
+                                c, version, jset, entry,
+                                journal_entry_clock_bad_size,
+                                "bad size")) {
                journal_entry_null_range(entry, vstruct_next(entry));
                return ret;
        }
 
        if (journal_entry_err_on(clock->rw > 1,
-                                c, version, jset, entry, "bad rw")) {
+                                c, version, jset, entry,
+                                journal_entry_clock_bad_rw,
+                                "bad rw")) {
                journal_entry_null_range(entry, vstruct_next(entry));
                return ret;
        }
@@ -608,7 +626,9 @@ static int journal_entry_dev_usage_validate(struct bch_fs *c,
        int ret = 0;
 
        if (journal_entry_err_on(bytes < expected,
-                                c, version, jset, entry, "bad size (%u < %u)",
+                                c, version, jset, entry,
+                                journal_entry_dev_usage_bad_size,
+                                "bad size (%u < %u)",
                                 bytes, expected)) {
                journal_entry_null_range(entry, vstruct_next(entry));
                return ret;
@@ -617,13 +637,17 @@ static int journal_entry_dev_usage_validate(struct bch_fs *c,
        dev = le32_to_cpu(u->dev);
 
        if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
-                                c, version, jset, entry, "bad dev")) {
+                                c, version, jset, entry,
+                                journal_entry_dev_usage_bad_dev,
+                                "bad dev")) {
                journal_entry_null_range(entry, vstruct_next(entry));
                return ret;
        }
 
        if (journal_entry_err_on(u->pad,
-                                c, version, jset, entry, "bad pad")) {
+                                c, version, jset, entry,
+                                journal_entry_dev_usage_bad_pad,
+                                "bad pad")) {
                journal_entry_null_range(entry, vstruct_next(entry));
                return ret;
        }
@@ -738,7 +762,8 @@ static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
 
        vstruct_for_each(jset, entry) {
                if (journal_entry_err_on(vstruct_next(entry) > vstruct_last(jset),
-                                        c, version, jset, entry,
+                               c, version, jset, entry,
+                               journal_entry_past_jset_end,
                                "journal entry extends past end of jset")) {
                        jset->u64s = cpu_to_le32((u64 *) entry - jset->_data);
                        break;
@@ -767,6 +792,7 @@ static int jset_validate(struct bch_fs *c,
        version = le32_to_cpu(jset->version);
        if (journal_entry_err_on(!bch2_version_compatible(version),
                        c, version, jset, NULL,
+                       jset_unsupported_version,
                        "%s sector %llu seq %llu: incompatible journal entry version %u.%u",
                        ca ? ca->name : c->name,
                        sector, le64_to_cpu(jset->seq),
@@ -777,7 +803,8 @@ static int jset_validate(struct bch_fs *c,
        }
 
        if (journal_entry_err_on(!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(jset)),
-                                c, version, jset, NULL,
+                       c, version, jset, NULL,
+                       jset_unknown_csum,
                        "%s sector %llu seq %llu: journal entry with unknown csum type %llu",
                        ca ? ca->name : c->name,
                        sector, le64_to_cpu(jset->seq),
@@ -788,6 +815,7 @@ static int jset_validate(struct bch_fs *c,
        if (journal_entry_err_on(!JSET_NO_FLUSH(jset) &&
                                 le64_to_cpu(jset->last_seq) > le64_to_cpu(jset->seq),
                                 c, version, jset, NULL,
+                                jset_last_seq_newer_than_seq,
                                 "invalid journal entry: last_seq > seq (%llu > %llu)",
                                 le64_to_cpu(jset->last_seq),
                                 le64_to_cpu(jset->seq))) {
@@ -816,7 +844,8 @@ static int jset_validate_early(struct bch_fs *c,
 
        version = le32_to_cpu(jset->version);
        if (journal_entry_err_on(!bch2_version_compatible(version),
-                        c, version, jset, NULL,
+                       c, version, jset, NULL,
+                       jset_unsupported_version,
                        "%s sector %llu seq %llu: unknown journal entry version %u.%u",
                        ca ? ca->name : c->name,
                        sector, le64_to_cpu(jset->seq),
@@ -831,7 +860,8 @@ static int jset_validate_early(struct bch_fs *c,
                return JOURNAL_ENTRY_REREAD;
 
        if (journal_entry_err_on(bytes > bucket_sectors_left << 9,
-                        c, version, jset, NULL,
+                       c, version, jset, NULL,
+                       jset_past_bucket_end,
                        "%s sector %llu seq %llu: journal entry too big (%zu bytes)",
                        ca ? ca->name : c->name,
                        sector, le64_to_cpu(jset->seq), bytes))
@@ -900,7 +930,7 @@ reread:
                        ret = submit_bio_wait(bio);
                        kfree(bio);
 
-                       if (bch2_dev_io_err_on(ret, ca,
+                       if (bch2_dev_io_err_on(ret, ca, BCH_MEMBER_ERROR_read,
                                               "journal read error: sector %llu",
                                               offset) ||
                            bch2_meta_read_fault("journal")) {
@@ -956,7 +986,8 @@ reread:
                ja->bucket_seq[bucket] = le64_to_cpu(j->seq);
 
                csum_good = jset_csum_good(c, j);
-               if (!csum_good)
+               if (bch2_dev_io_err_on(!csum_good, ca, BCH_MEMBER_ERROR_checksum,
+                                      "journal checksum error"))
                        saw_bad = true;
 
                ret = bch2_encrypt(c, JSET_CSUM_TYPE(j), journal_nonce(j),
@@ -1172,6 +1203,7 @@ int bch2_journal_read(struct bch_fs *c,
 
                if (journal_entry_err_on(le64_to_cpu(i->j.last_seq) > le64_to_cpu(i->j.seq),
                                         c, le32_to_cpu(i->j.version), &i->j, NULL,
+                                        jset_last_seq_newer_than_seq,
                                         "invalid journal entry: last_seq > seq (%llu > %llu)",
                                         le64_to_cpu(i->j.last_seq),
                                         le64_to_cpu(i->j.seq)))
@@ -1188,7 +1220,8 @@ int bch2_journal_read(struct bch_fs *c,
        }
 
        if (!*last_seq) {
-               fsck_err(c, "journal read done, but no entries found after dropping non-flushes");
+               fsck_err(c, dirty_but_no_journal_entries_post_drop_nonflushes,
+                        "journal read done, but no entries found after dropping non-flushes");
                return 0;
        }
 
@@ -1214,6 +1247,7 @@ int bch2_journal_read(struct bch_fs *c,
 
                if (bch2_journal_seq_is_blacklisted(c, seq, true)) {
                        fsck_err_on(!JSET_NO_FLUSH(&i->j), c,
+                                   jset_seq_blacklisted,
                                    "found blacklisted journal entry %llu", seq);
                        i->ignore = true;
                }
@@ -1254,7 +1288,8 @@ int bch2_journal_read(struct bch_fs *c,
                        bch2_journal_ptrs_to_text(&buf2, c, i);
 
                        missing_end = seq - 1;
-                       fsck_err(c, "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
+                       fsck_err(c, journal_entries_missing,
+                                "journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
                                 "  prev at %s\n"
                                 "  next at %s",
                                 missing_start, missing_end,
@@ -1309,7 +1344,8 @@ int bch2_journal_read(struct bch_fs *c,
                if (!degraded &&
                    !bch2_replicas_marked(c, &replicas.e) &&
                    (le64_to_cpu(i->j.seq) == *last_seq ||
-                    fsck_err(c, "superblock not marked as containing replicas for journal entry %llu\n  %s",
+                    fsck_err(c, journal_entry_replicas_not_marked,
+                             "superblock not marked as containing replicas for journal entry %llu\n  %s",
                              le64_to_cpu(i->j.seq), buf.buf))) {
                        ret = bch2_mark_replicas(c, &replicas.e);
                        if (ret)
@@ -1581,7 +1617,8 @@ static void journal_write_endio(struct bio *bio)
        struct journal_buf *w = journal_last_unwritten_buf(j);
        unsigned long flags;
 
-       if (bch2_dev_io_err_on(bio->bi_status, ca, "error writing journal entry %llu: %s",
+       if (bch2_dev_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
+                              "error writing journal entry %llu: %s",
                               le64_to_cpu(w->data->seq),
                               bch2_blk_status_to_str(bio->bi_status)) ||
            bch2_meta_write_fault("journal")) {
@@ -1641,9 +1678,15 @@ static void do_journal_write(struct closure *cl)
        continue_at(cl, journal_write_done, c->io_complete_wq);
 }
 
-static void bch2_journal_entries_postprocess(struct bch_fs *c, struct jset *jset)
+static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w)
 {
-       struct jset_entry *i, *next, *prev = NULL;
+       struct bch_fs *c = container_of(j, struct bch_fs, journal);
+       struct jset_entry *start, *end, *i, *next, *prev = NULL;
+       struct jset *jset = w->data;
+       unsigned sectors, bytes, u64s;
+       bool validate_before_checksum = false;
+       unsigned long btree_roots_have = 0;
+       int ret;
 
        /*
         * Simple compaction, dropping empty jset_entries (from journal
@@ -1660,8 +1703,20 @@ static void bch2_journal_entries_postprocess(struct bch_fs *c, struct jset *jset
                if (!u64s)
                        continue;
 
-               if (i->type == BCH_JSET_ENTRY_btree_root)
+               /*
+                * New btree roots are set by journalling them; when the journal
+                * entry gets written we have to propagate them to
+                * c->btree_roots
+                *
+                * But, every journal entry we write has to contain all the
+                * btree roots (at least for now); so after we copy btree roots
+                * to c->btree_roots we have to get any missing btree roots and
+                * add them to this journal entry:
+                */
+               if (i->type == BCH_JSET_ENTRY_btree_root) {
                        bch2_journal_entry_to_btree_root(c, i);
+                       __set_bit(i->btree_id, &btree_roots_have);
+               }
 
                /* Can we merge with previous entry? */
                if (prev &&
@@ -1685,85 +1740,10 @@ static void bch2_journal_entries_postprocess(struct bch_fs *c, struct jset *jset
 
        prev = prev ? vstruct_next(prev) : jset->start;
        jset->u64s = cpu_to_le32((u64 *) prev - jset->_data);
-}
-
-void bch2_journal_write(struct closure *cl)
-{
-       struct journal *j = container_of(cl, struct journal, io);
-       struct bch_fs *c = container_of(j, struct bch_fs, journal);
-       struct bch_dev *ca;
-       struct journal_buf *w = journal_last_unwritten_buf(j);
-       struct bch_replicas_padded replicas;
-       struct jset_entry *start, *end;
-       struct jset *jset;
-       struct bio *bio;
-       struct printbuf journal_debug_buf = PRINTBUF;
-       bool validate_before_checksum = false;
-       unsigned i, sectors, bytes, u64s, nr_rw_members = 0;
-       int ret;
-
-       BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
-
-       journal_buf_realloc(j, w);
-       jset = w->data;
-
-       j->write_start_time = local_clock();
-
-       spin_lock(&j->lock);
-
-       /*
-        * If the journal is in an error state - we did an emergency shutdown -
-        * we prefer to continue doing journal writes. We just mark them as
-        * noflush so they'll never be used, but they'll still be visible by the
-        * list_journal tool - this helps in debugging.
-        *
-        * There's a caveat: the first journal write after marking the
-        * superblock dirty must always be a flush write, because on startup
-        * from a clean shutdown we didn't necessarily read the journal and the
-        * new journal write might overwrite whatever was in the journal
-        * previously - we can't leave the journal without any flush writes in
-        * it.
-        *
-        * So if we're in an error state, and we're still starting up, we don't
-        * write anything at all.
-        */
-       if (!test_bit(JOURNAL_NEED_FLUSH_WRITE, &j->flags) &&
-           (bch2_journal_error(j) ||
-            w->noflush ||
-            (!w->must_flush &&
-             (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
-             test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags)))) {
-               w->noflush = true;
-               SET_JSET_NO_FLUSH(jset, true);
-               jset->last_seq  = 0;
-               w->last_seq     = 0;
-
-               j->nr_noflush_writes++;
-       } else if (!bch2_journal_error(j)) {
-               j->last_flush_write = jiffies;
-               j->nr_flush_writes++;
-               clear_bit(JOURNAL_NEED_FLUSH_WRITE, &j->flags);
-       } else {
-               spin_unlock(&j->lock);
-               goto err;
-       }
-       spin_unlock(&j->lock);
-
-       /*
-        * New btree roots are set by journalling them; when the journal entry
-        * gets written we have to propagate them to c->btree_roots
-        *
-        * But, every journal entry we write has to contain all the btree roots
-        * (at least for now); so after we copy btree roots to c->btree_roots we
-        * have to get any missing btree roots and add them to this journal
-        * entry:
-        */
-
-       bch2_journal_entries_postprocess(c, jset);
 
        start = end = vstruct_last(jset);
 
-       end     = bch2_btree_roots_to_journal_entries(c, jset->start, end);
+       end     = bch2_btree_roots_to_journal_entries(c, end, btree_roots_have);
 
        bch2_journal_super_entries_add_common(c, &end,
                                le64_to_cpu(jset->seq));
@@ -1779,7 +1759,7 @@ void bch2_journal_write(struct closure *cl)
                bch2_fs_fatal_error(c, "aieeee! journal write overran available space, %zu > %u (extra %u reserved %u/%u)",
                                    vstruct_bytes(jset), w->sectors << 9,
                                    u64s, w->u64s_reserved, j->entry_u64s_reserved);
-               goto err;
+               return -EINVAL;
        }
 
        jset->magic             = cpu_to_le64(jset_magic(c));
@@ -1798,37 +1778,117 @@ void bch2_journal_write(struct closure *cl)
                validate_before_checksum = true;
 
        if (validate_before_checksum &&
-           jset_validate(c, NULL, jset, 0, WRITE))
-               goto err;
+           (ret = jset_validate(c, NULL, jset, 0, WRITE)))
+               return ret;
 
        ret = bch2_encrypt(c, JSET_CSUM_TYPE(jset), journal_nonce(jset),
                    jset->encrypted_start,
                    vstruct_end(jset) - (void *) jset->encrypted_start);
        if (bch2_fs_fatal_err_on(ret, c,
                        "error decrypting journal entry: %i", ret))
-               goto err;
+               return ret;
 
        jset->csum = csum_vstruct(c, JSET_CSUM_TYPE(jset),
                                  journal_nonce(jset), jset);
 
        if (!validate_before_checksum &&
-           jset_validate(c, NULL, jset, 0, WRITE))
-               goto err;
+           (ret = jset_validate(c, NULL, jset, 0, WRITE)))
+               return ret;
 
        memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
+       return 0;
+}
+
+static int bch2_journal_write_pick_flush(struct journal *j, struct journal_buf *w)
+{
+       struct bch_fs *c = container_of(j, struct bch_fs, journal);
+       int error = bch2_journal_error(j);
+
+       /*
+        * If the journal is in an error state - we did an emergency shutdown -
+        * we prefer to continue doing journal writes. We just mark them as
+        * noflush so they'll never be used, but they'll still be visible by the
+        * list_journal tool - this helps in debugging.
+        *
+        * There's a caveat: the first journal write after marking the
+        * superblock dirty must always be a flush write, because on startup
+        * from a clean shutdown we didn't necessarily read the journal and the
+        * new journal write might overwrite whatever was in the journal
+        * previously - we can't leave the journal without any flush writes in
+        * it.
+        *
+        * So if we're in an error state, and we're still starting up, we don't
+        * write anything at all.
+        */
+       if (error && test_bit(JOURNAL_NEED_FLUSH_WRITE, &j->flags))
+               return -EIO;
+
+       if (error ||
+           w->noflush ||
+           (!w->must_flush &&
+            (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) &&
+            test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags))) {
+                    w->noflush = true;
+               SET_JSET_NO_FLUSH(w->data, true);
+               w->data->last_seq       = 0;
+               w->last_seq             = 0;
+
+               j->nr_noflush_writes++;
+       } else {
+               j->last_flush_write = jiffies;
+               j->nr_flush_writes++;
+               clear_bit(JOURNAL_NEED_FLUSH_WRITE, &j->flags);
+       }
+
+       return 0;
+}
+
+void bch2_journal_write(struct closure *cl)
+{
+       struct journal *j = container_of(cl, struct journal, io);
+       struct bch_fs *c = container_of(j, struct bch_fs, journal);
+       struct bch_dev *ca;
+       struct journal_buf *w = journal_last_unwritten_buf(j);
+       struct bch_replicas_padded replicas;
+       struct bio *bio;
+       struct printbuf journal_debug_buf = PRINTBUF;
+       unsigned i, nr_rw_members = 0;
+       int ret;
+
+       BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
+
+       j->write_start_time = local_clock();
 
-retry_alloc:
        spin_lock(&j->lock);
-       ret = journal_write_alloc(j, w);
+       ret = bch2_journal_write_pick_flush(j, w);
+       spin_unlock(&j->lock);
+       if (ret)
+               goto err;
+
+       journal_buf_realloc(j, w);
+
+       ret = bch2_journal_write_prep(j, w);
+       if (ret)
+               goto err;
+
+       while (1) {
+               spin_lock(&j->lock);
+               ret = journal_write_alloc(j, w);
+               if (!ret || !j->can_discard)
+                       break;
 
-       if (ret && j->can_discard) {
                spin_unlock(&j->lock);
                bch2_journal_do_discards(j);
-               goto retry_alloc;
        }
 
-       if (ret)
+       if (ret) {
                __bch2_journal_debug_to_text(&journal_debug_buf, j);
+               spin_unlock(&j->lock);
+               bch_err(c, "Unable to allocate journal write:\n%s",
+                       journal_debug_buf.buf);
+               printbuf_exit(&journal_debug_buf);
+               goto err;
+       }
 
        /*
         * write is allocated, no longer need to account for it in
@@ -1843,13 +1903,6 @@ retry_alloc:
        bch2_journal_space_available(j);
        spin_unlock(&j->lock);
 
-       if (ret) {
-               bch_err(c, "Unable to allocate journal write:\n%s",
-                       journal_debug_buf.buf);
-               printbuf_exit(&journal_debug_buf);
-               goto err;
-       }
-
        w->devs_written = bch2_bkey_devs(bkey_i_to_s_c(&w->key));
 
        if (c->opts.nochanges)
@@ -1871,7 +1924,7 @@ retry_alloc:
        if (ret)
                goto err;
 
-       if (!JSET_NO_FLUSH(jset) && w->separate_flush) {
+       if (!JSET_NO_FLUSH(w->data) && w->separate_flush) {
                for_each_rw_member(ca, c, i) {
                        percpu_ref_get(&ca->io_ref);
 
index 215a653322f3b49c58fa0a4a8c775c1785ff1aeb..a5cc0ed195d6324d1f49718d5860b24045579f1b 100644 (file)
 #include "recovery.h"
 
 /* KEY_TYPE_lru is obsolete: */
-int bch2_lru_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_lru_invalid(struct bch_fs *c, struct bkey_s_c k,
                     enum bkey_invalid_flags flags,
                     struct printbuf *err)
 {
-       if (!lru_pos_time(k.k->p)) {
-               prt_printf(err, "lru entry at time=0");
-               return -BCH_ERR_invalid_bkey;
-
-       }
+       int ret = 0;
 
-       return 0;
+       bkey_fsck_err_on(!lru_pos_time(k.k->p), c, err,
+                        lru_entry_at_time_0,
+                        "lru entry at time=0");
+fsck_err:
+       return ret;
 }
 
 void bch2_lru_to_text(struct printbuf *out, struct bch_fs *c,
@@ -95,6 +95,7 @@ static int bch2_check_lru_key(struct btree_trans *trans,
        int ret;
 
        if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_pos), c,
+                       lru_entry_to_invalid_bucket,
                        "lru key points to nonexistent device:bucket %llu:%llu",
                        alloc_pos.inode, alloc_pos.offset))
                return bch2_btree_delete_at(trans, lru_iter, 0);
@@ -125,7 +126,8 @@ static int bch2_check_lru_key(struct btree_trans *trans,
                }
 
                if (c->opts.reconstruct_alloc ||
-                   fsck_err(c, "incorrect lru entry: lru %s time %llu\n"
+                   fsck_err(c, lru_entry_bad,
+                            "incorrect lru entry: lru %s time %llu\n"
                             "  %s\n"
                             "  for %s",
                             bch2_lru_types[type],
index be66bf9ad80911006cff4b6d5ea139141511e3c0..429dca816df5c5049c85e31ea20eb1e92ea694cf 100644 (file)
@@ -48,7 +48,7 @@ static inline enum bch_lru_type lru_type(struct bkey_s_c l)
        return BCH_LRU_read;
 }
 
-int bch2_lru_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_lru_invalid(struct bch_fs *, struct bkey_s_c,
                     enum bkey_invalid_flags, struct printbuf *);
 void bch2_lru_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
index 39a14e3216807d222fc11856fa285448f4a24ca3..ab749bf2fcbc551e68753857efdf008848d140b7 100644 (file)
@@ -20,6 +20,7 @@
 #include "keylist.h"
 #include "move.h"
 #include "replicas.h"
+#include "snapshot.h"
 #include "super-io.h"
 #include "trace.h"
 
@@ -59,20 +60,6 @@ static void trace_move_extent_alloc_mem_fail2(struct bch_fs *c, struct bkey_s_c
        }
 }
 
-static void progress_list_add(struct bch_fs *c, struct bch_move_stats *stats)
-{
-       mutex_lock(&c->data_progress_lock);
-       list_add(&stats->list, &c->data_progress_list);
-       mutex_unlock(&c->data_progress_lock);
-}
-
-static void progress_list_del(struct bch_fs *c, struct bch_move_stats *stats)
-{
-       mutex_lock(&c->data_progress_lock);
-       list_del(&stats->list);
-       mutex_unlock(&c->data_progress_lock);
-}
-
 struct moving_io {
        struct list_head                read_list;
        struct list_head                io_list;
@@ -156,35 +143,31 @@ static void move_read_endio(struct bio *bio)
        closure_put(&ctxt->cl);
 }
 
-void bch2_moving_ctxt_do_pending_writes(struct moving_context *ctxt,
-                                       struct btree_trans *trans)
+void bch2_moving_ctxt_do_pending_writes(struct moving_context *ctxt)
 {
        struct moving_io *io;
 
-       if (trans)
-               bch2_trans_unlock(trans);
-
        while ((io = bch2_moving_ctxt_next_pending_write(ctxt))) {
+               bch2_trans_unlock_long(ctxt->trans);
                list_del(&io->read_list);
                move_write(io);
        }
 }
 
-static void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt,
-                                      struct btree_trans *trans)
+void bch2_move_ctxt_wait_for_io(struct moving_context *ctxt)
 {
        unsigned sectors_pending = atomic_read(&ctxt->write_sectors);
 
-       move_ctxt_wait_event(ctxt, trans,
+       move_ctxt_wait_event(ctxt,
                !atomic_read(&ctxt->write_sectors) ||
                atomic_read(&ctxt->write_sectors) != sectors_pending);
 }
 
 void bch2_moving_ctxt_exit(struct moving_context *ctxt)
 {
-       struct bch_fs *c = ctxt->c;
+       struct bch_fs *c = ctxt->trans->c;
 
-       move_ctxt_wait_event(ctxt, NULL, list_empty(&ctxt->reads));
+       move_ctxt_wait_event(ctxt, list_empty(&ctxt->reads));
        closure_sync(&ctxt->cl);
 
        EBUG_ON(atomic_read(&ctxt->write_sectors));
@@ -192,16 +175,12 @@ void bch2_moving_ctxt_exit(struct moving_context *ctxt)
        EBUG_ON(atomic_read(&ctxt->read_sectors));
        EBUG_ON(atomic_read(&ctxt->read_ios));
 
-       if (ctxt->stats) {
-               progress_list_del(c, ctxt->stats);
-               trace_move_data(c,
-                               atomic64_read(&ctxt->stats->sectors_moved),
-                               atomic64_read(&ctxt->stats->keys_moved));
-       }
-
        mutex_lock(&c->moving_context_lock);
        list_del(&ctxt->list);
        mutex_unlock(&c->moving_context_lock);
+
+       bch2_trans_put(ctxt->trans);
+       memset(ctxt, 0, sizeof(*ctxt));
 }
 
 void bch2_moving_ctxt_init(struct moving_context *ctxt,
@@ -213,7 +192,7 @@ void bch2_moving_ctxt_init(struct moving_context *ctxt,
 {
        memset(ctxt, 0, sizeof(*ctxt));
 
-       ctxt->c         = c;
+       ctxt->trans     = bch2_trans_get(c);
        ctxt->fn        = (void *) _RET_IP_;
        ctxt->rate      = rate;
        ctxt->stats     = stats;
@@ -230,16 +209,17 @@ void bch2_moving_ctxt_init(struct moving_context *ctxt,
        mutex_lock(&c->moving_context_lock);
        list_add(&ctxt->list, &c->moving_context_list);
        mutex_unlock(&c->moving_context_lock);
+}
 
-       if (stats) {
-               progress_list_add(c, stats);
-               stats->data_type = BCH_DATA_user;
-       }
+void bch2_move_stats_exit(struct bch_move_stats *stats, struct bch_fs *c)
+{
+       trace_move_data(c, stats);
 }
 
 void bch2_move_stats_init(struct bch_move_stats *stats, char *name)
 {
        memset(stats, 0, sizeof(*stats));
+       stats->data_type = BCH_DATA_user;
        scnprintf(stats->name, sizeof(stats->name), "%s", name);
 }
 
@@ -286,15 +266,14 @@ static int bch2_extent_drop_ptrs(struct btree_trans *trans,
                bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_NOFAIL);
 }
 
-static int bch2_move_extent(struct btree_trans *trans,
-                           struct btree_iter *iter,
-                           struct moving_context *ctxt,
-                           struct move_bucket_in_flight *bucket_in_flight,
-                           struct bch_io_opts io_opts,
-                           enum btree_id btree_id,
-                           struct bkey_s_c k,
-                           struct data_update_opts data_opts)
+int bch2_move_extent(struct moving_context *ctxt,
+                    struct move_bucket_in_flight *bucket_in_flight,
+                    struct btree_iter *iter,
+                    struct bkey_s_c k,
+                    struct bch_io_opts io_opts,
+                    struct data_update_opts data_opts)
 {
+       struct btree_trans *trans = ctxt->trans;
        struct bch_fs *c = trans->c;
        struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
        struct moving_io *io;
@@ -303,6 +282,8 @@ static int bch2_move_extent(struct btree_trans *trans,
        unsigned sectors = k.k->size, pages;
        int ret = -ENOMEM;
 
+       if (ctxt->stats)
+               ctxt->stats->pos = BBPOS(iter->btree_id, iter->pos);
        trace_move_extent2(c, k);
 
        bch2_data_update_opts_normalize(k, &data_opts);
@@ -355,7 +336,7 @@ static int bch2_move_extent(struct btree_trans *trans,
        io->rbio.bio.bi_end_io          = move_read_endio;
 
        ret = bch2_data_update_init(trans, ctxt, &io->write, ctxt->wp,
-                                   io_opts, data_opts, btree_id, k);
+                                   io_opts, data_opts, iter->btree_id, k);
        if (ret && ret != -BCH_ERR_unwritten_extent_update)
                goto err_free_pages;
 
@@ -367,9 +348,11 @@ static int bch2_move_extent(struct btree_trans *trans,
 
        BUG_ON(ret);
 
-       io->write.ctxt = ctxt;
        io->write.op.end_io = move_write_done;
 
+       if (ctxt->rate)
+               bch2_ratelimit_increment(ctxt->rate, k.k->size);
+
        if (ctxt->stats) {
                atomic64_inc(&ctxt->stats->keys_moved);
                atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
@@ -399,7 +382,7 @@ static int bch2_move_extent(struct btree_trans *trans,
        closure_get(&ctxt->cl);
        bch2_read_extent(trans, &io->rbio,
                         bkey_start_pos(k.k),
-                        btree_id, k, 0,
+                        iter->btree_id, k, 0,
                         BCH_READ_NODECODE|
                         BCH_READ_LAST_FRAGMENT);
        return 0;
@@ -413,45 +396,96 @@ err:
        return ret;
 }
 
-static int lookup_inode(struct btree_trans *trans, struct bpos pos,
-                       struct bch_inode_unpacked *inode)
+struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans,
+                         struct per_snapshot_io_opts *io_opts,
+                         struct bkey_s_c extent_k)
+{
+       struct bch_fs *c = trans->c;
+       u32 restart_count = trans->restart_count;
+       int ret = 0;
+
+       if (io_opts->cur_inum != extent_k.k->p.inode) {
+               struct btree_iter iter;
+               struct bkey_s_c k;
+
+               io_opts->d.nr = 0;
+
+               for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, extent_k.k->p.inode),
+                                  BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
+                       if (k.k->p.offset != extent_k.k->p.inode)
+                               break;
+
+                       if (!bkey_is_inode(k.k))
+                               continue;
+
+                       struct bch_inode_unpacked inode;
+                       BUG_ON(bch2_inode_unpack(k, &inode));
+
+                       struct snapshot_io_opts_entry e = { .snapshot = k.k->p.snapshot };
+                       bch2_inode_opts_get(&e.io_opts, trans->c, &inode);
+
+                       ret = darray_push(&io_opts->d, e);
+                       if (ret)
+                               break;
+               }
+               bch2_trans_iter_exit(trans, &iter);
+               io_opts->cur_inum = extent_k.k->p.inode;
+       }
+
+       ret = ret ?: trans_was_restarted(trans, restart_count);
+       if (ret)
+               return ERR_PTR(ret);
+
+       if (extent_k.k->p.snapshot) {
+               struct snapshot_io_opts_entry *i;
+               darray_for_each(io_opts->d, i)
+                       if (bch2_snapshot_is_ancestor(c, extent_k.k->p.snapshot, i->snapshot))
+                               return &i->io_opts;
+       }
+
+       return &io_opts->fs_io_opts;
+}
+
+int bch2_move_get_io_opts_one(struct btree_trans *trans,
+                             struct bch_io_opts *io_opts,
+                             struct bkey_s_c extent_k)
 {
        struct btree_iter iter;
        struct bkey_s_c k;
        int ret;
 
-       bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes, pos,
-                            BTREE_ITER_ALL_SNAPSHOTS);
-       k = bch2_btree_iter_peek(&iter);
+       /* reflink btree? */
+       if (!extent_k.k->p.inode) {
+               *io_opts = bch2_opts_to_inode_opts(trans->c->opts);
+               return 0;
+       }
+
+       k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
+                              SPOS(0, extent_k.k->p.inode, extent_k.k->p.snapshot),
+                              BTREE_ITER_CACHED);
        ret = bkey_err(k);
-       if (ret)
-               goto err;
+       if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+               return ret;
 
-       if (!k.k || !bkey_eq(k.k->p, pos)) {
-               ret = -BCH_ERR_ENOENT_inode;
-               goto err;
+       if (!ret && bkey_is_inode(k.k)) {
+               struct bch_inode_unpacked inode;
+               bch2_inode_unpack(k, &inode);
+               bch2_inode_opts_get(io_opts, trans->c, &inode);
+       } else {
+               *io_opts = bch2_opts_to_inode_opts(trans->c->opts);
        }
 
-       ret = bkey_is_inode(k.k) ? 0 : -EIO;
-       if (ret)
-               goto err;
-
-       ret = bch2_inode_unpack(k, inode);
-       if (ret)
-               goto err;
-err:
        bch2_trans_iter_exit(trans, &iter);
-       return ret;
+       return 0;
 }
 
-static int move_ratelimit(struct btree_trans *trans,
-                         struct moving_context *ctxt)
+int bch2_move_ratelimit(struct moving_context *ctxt)
 {
-       struct bch_fs *c = trans->c;
+       struct bch_fs *c = ctxt->trans->c;
        u64 delay;
 
-       if (ctxt->wait_on_copygc) {
-               bch2_trans_unlock(trans);
+       if (ctxt->wait_on_copygc && !c->copygc_running) {
+               bch2_trans_unlock_long(ctxt->trans);
                wait_event_killable(c->copygc_running_wq,
                                    !c->copygc_running ||
                                    kthread_should_stop());
@@ -460,8 +494,12 @@ static int move_ratelimit(struct btree_trans *trans,
        do {
                delay = ctxt->rate ? bch2_ratelimit_delay(ctxt->rate) : 0;
 
+
                if (delay) {
-                       bch2_trans_unlock(trans);
+                       if (delay > HZ / 10)
+                               bch2_trans_unlock_long(ctxt->trans);
+                       else
+                               bch2_trans_unlock(ctxt->trans);
                        set_current_state(TASK_INTERRUPTIBLE);
                }
 
@@ -474,7 +512,7 @@ static int move_ratelimit(struct btree_trans *trans,
                        schedule_timeout(delay);
 
                if (unlikely(freezing(current))) {
-                       move_ctxt_wait_event(ctxt, trans, list_empty(&ctxt->reads));
+                       move_ctxt_wait_event(ctxt, list_empty(&ctxt->reads));
                        try_to_freeze();
                }
        } while (delay);
@@ -483,7 +521,7 @@ static int move_ratelimit(struct btree_trans *trans,
         * XXX: these limits really ought to be per device, SSDs and hard drives
         * will want different limits
         */
-       move_ctxt_wait_event(ctxt, trans,
+       move_ctxt_wait_event(ctxt,
                atomic_read(&ctxt->write_sectors) < c->opts.move_bytes_in_flight >> 9 &&
                atomic_read(&ctxt->read_sectors) < c->opts.move_bytes_in_flight >> 9 &&
                atomic_read(&ctxt->write_ios) < c->opts.move_ios_in_flight &&
@@ -492,52 +530,28 @@ static int move_ratelimit(struct btree_trans *trans,
        return 0;
 }
 
-static int move_get_io_opts(struct btree_trans *trans,
-                           struct bch_io_opts *io_opts,
-                           struct bkey_s_c k, u64 *cur_inum)
-{
-       struct bch_inode_unpacked inode;
-       int ret;
-
-       if (*cur_inum == k.k->p.inode)
-               return 0;
-
-       ret = lookup_inode(trans,
-                          SPOS(0, k.k->p.inode, k.k->p.snapshot),
-                          &inode);
-       if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
-               return ret;
-
-       if (!ret)
-               bch2_inode_opts_get(io_opts, trans->c, &inode);
-       else
-               *io_opts = bch2_opts_to_inode_opts(trans->c->opts);
-       *cur_inum = k.k->p.inode;
-       return 0;
-}
-
-static int __bch2_move_data(struct moving_context *ctxt,
-                           struct bpos start,
-                           struct bpos end,
-                           move_pred_fn pred, void *arg,
-                           enum btree_id btree_id)
+static int bch2_move_data_btree(struct moving_context *ctxt,
+                               struct bpos start,
+                               struct bpos end,
+                               move_pred_fn pred, void *arg,
+                               enum btree_id btree_id)
 {
-       struct bch_fs *c = ctxt->c;
-       struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
+       struct btree_trans *trans = ctxt->trans;
+       struct bch_fs *c = trans->c;
+       struct per_snapshot_io_opts snapshot_io_opts;
+       struct bch_io_opts *io_opts;
        struct bkey_buf sk;
-       struct btree_trans *trans = bch2_trans_get(c);
        struct btree_iter iter;
        struct bkey_s_c k;
        struct data_update_opts data_opts;
-       u64 cur_inum = U64_MAX;
        int ret = 0, ret2;
 
+       per_snapshot_io_opts_init(&snapshot_io_opts, c);
        bch2_bkey_buf_init(&sk);
 
        if (ctxt->stats) {
                ctxt->stats->data_type  = BCH_DATA_user;
-               ctxt->stats->btree_id   = btree_id;
-               ctxt->stats->pos        = start;
+               ctxt->stats->pos        = BBPOS(btree_id, start);
        }
 
        bch2_trans_iter_init(trans, &iter, btree_id, start,
@@ -547,7 +561,7 @@ static int __bch2_move_data(struct moving_context *ctxt,
        if (ctxt->rate)
                bch2_ratelimit_reset(ctxt->rate);
 
-       while (!move_ratelimit(trans, ctxt)) {
+       while (!bch2_move_ratelimit(ctxt)) {
                bch2_trans_begin(trans);
 
                k = bch2_btree_iter_peek(&iter);
@@ -564,17 +578,18 @@ static int __bch2_move_data(struct moving_context *ctxt,
                        break;
 
                if (ctxt->stats)
-                       ctxt->stats->pos = iter.pos;
+                       ctxt->stats->pos = BBPOS(iter.btree_id, iter.pos);
 
                if (!bkey_extent_is_direct_data(k.k))
                        goto next_nondata;
 
-               ret = move_get_io_opts(trans, &io_opts, k, &cur_inum);
+               io_opts = bch2_move_get_io_opts(trans, &snapshot_io_opts, k);
+               ret = PTR_ERR_OR_ZERO(io_opts);
                if (ret)
                        continue;
 
                memset(&data_opts, 0, sizeof(data_opts));
-               if (!pred(c, arg, k, &io_opts, &data_opts))
+               if (!pred(c, arg, k, io_opts, &data_opts))
                        goto next;
 
                /*
@@ -584,24 +599,20 @@ static int __bch2_move_data(struct moving_context *ctxt,
                bch2_bkey_buf_reassemble(&sk, c, k);
                k = bkey_i_to_s_c(sk.k);
 
-               ret2 = bch2_move_extent(trans, &iter, ctxt, NULL,
-                                       io_opts, btree_id, k, data_opts);
+               ret2 = bch2_move_extent(ctxt, NULL, &iter, k, *io_opts, data_opts);
                if (ret2) {
                        if (bch2_err_matches(ret2, BCH_ERR_transaction_restart))
                                continue;
 
                        if (ret2 == -ENOMEM) {
                                /* memory allocation failure, wait for some IO to finish */
-                               bch2_move_ctxt_wait_for_io(ctxt, trans);
+                               bch2_move_ctxt_wait_for_io(ctxt);
                                continue;
                        }
 
                        /* XXX signal failure */
                        goto next;
                }
-
-               if (ctxt->rate)
-                       bch2_ratelimit_increment(ctxt->rate, k.k->size);
 next:
                if (ctxt->stats)
                        atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
@@ -610,59 +621,68 @@ next_nondata:
        }
 
        bch2_trans_iter_exit(trans, &iter);
-       bch2_trans_put(trans);
        bch2_bkey_buf_exit(&sk, c);
+       per_snapshot_io_opts_exit(&snapshot_io_opts);
 
        return ret;
 }
 
-int bch2_move_data(struct bch_fs *c,
-                  enum btree_id start_btree_id, struct bpos start_pos,
-                  enum btree_id end_btree_id,   struct bpos end_pos,
-                  struct bch_ratelimit *rate,
-                  struct bch_move_stats *stats,
-                  struct write_point_specifier wp,
-                  bool wait_on_copygc,
-                  move_pred_fn pred, void *arg)
+int __bch2_move_data(struct moving_context *ctxt,
+                    struct bbpos start,
+                    struct bbpos end,
+                    move_pred_fn pred, void *arg)
 {
-       struct moving_context ctxt;
+       struct bch_fs *c = ctxt->trans->c;
        enum btree_id id;
        int ret = 0;
 
-       bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc);
-
-       for (id = start_btree_id;
-            id <= min_t(unsigned, end_btree_id, btree_id_nr_alive(c) - 1);
+       for (id = start.btree;
+            id <= min_t(unsigned, end.btree, btree_id_nr_alive(c) - 1);
             id++) {
-               stats->btree_id = id;
+               ctxt->stats->pos = BBPOS(id, POS_MIN);
 
-               if (id != BTREE_ID_extents &&
-                   id != BTREE_ID_reflink)
+               if (!btree_type_has_ptrs(id) ||
+                   !bch2_btree_id_root(c, id)->b)
                        continue;
 
-               if (!bch2_btree_id_root(c, id)->b)
-                       continue;
-
-               ret = __bch2_move_data(&ctxt,
-                                      id == start_btree_id ? start_pos : POS_MIN,
-                                      id == end_btree_id   ? end_pos   : POS_MAX,
+               ret = bch2_move_data_btree(ctxt,
+                                      id == start.btree ? start.pos : POS_MIN,
+                                      id == end.btree   ? end.pos   : POS_MAX,
                                       pred, arg, id);
                if (ret)
                        break;
        }
 
+       return ret;
+}
+
+int bch2_move_data(struct bch_fs *c,
+                  struct bbpos start,
+                  struct bbpos end,
+                  struct bch_ratelimit *rate,
+                  struct bch_move_stats *stats,
+                  struct write_point_specifier wp,
+                  bool wait_on_copygc,
+                  move_pred_fn pred, void *arg)
+{
+
+       struct moving_context ctxt;
+       int ret;
+
+       bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc);
+       ret = __bch2_move_data(&ctxt, start, end, pred, arg);
        bch2_moving_ctxt_exit(&ctxt);
 
        return ret;
 }
 
-int __bch2_evacuate_bucket(struct btree_trans *trans,
-                          struct moving_context *ctxt,
+int __bch2_evacuate_bucket(struct moving_context *ctxt,
                           struct move_bucket_in_flight *bucket_in_flight,
                           struct bpos bucket, int gen,
                           struct data_update_opts _data_opts)
 {
-       struct bch_fs *c = ctxt->c;
+       struct btree_trans *trans = ctxt->trans;
+       struct bch_fs *c = trans->c;
        struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
        struct btree_iter iter;
        struct bkey_buf sk;
@@ -673,7 +693,6 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
        struct data_update_opts data_opts;
        unsigned dirty_sectors, bucket_size;
        u64 fragmentation;
-       u64 cur_inum = U64_MAX;
        struct bpos bp_pos = POS_MIN;
        int ret = 0;
 
@@ -708,7 +727,7 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
                goto err;
        }
 
-       while (!(ret = move_ratelimit(trans, ctxt))) {
+       while (!(ret = bch2_move_ratelimit(ctxt))) {
                bch2_trans_begin(trans);
 
                ret = bch2_get_next_backpointer(trans, bucket, gen,
@@ -737,7 +756,7 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
                        bch2_bkey_buf_reassemble(&sk, c, k);
                        k = bkey_i_to_s_c(sk.k);
 
-                       ret = move_get_io_opts(trans, &io_opts, k, &cur_inum);
+                       ret = bch2_move_get_io_opts_one(trans, &io_opts, k);
                        if (ret) {
                                bch2_trans_iter_exit(trans, &iter);
                                continue;
@@ -758,23 +777,20 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
                                i++;
                        }
 
-                       ret = bch2_move_extent(trans, &iter, ctxt,
-                                       bucket_in_flight,
-                                       io_opts, bp.btree_id, k, data_opts);
+                       ret = bch2_move_extent(ctxt, bucket_in_flight,
+                                              &iter, k, io_opts, data_opts);
                        bch2_trans_iter_exit(trans, &iter);
 
                        if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
                                continue;
                        if (ret == -ENOMEM) {
                                /* memory allocation failure, wait for some IO to finish */
-                               bch2_move_ctxt_wait_for_io(ctxt, trans);
+                               bch2_move_ctxt_wait_for_io(ctxt);
                                continue;
                        }
                        if (ret)
                                goto err;
 
-                       if (ctxt->rate)
-                               bch2_ratelimit_increment(ctxt->rate, k.k->size);
                        if (ctxt->stats)
                                atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
                } else {
@@ -825,14 +841,12 @@ int bch2_evacuate_bucket(struct bch_fs *c,
                         struct write_point_specifier wp,
                         bool wait_on_copygc)
 {
-       struct btree_trans *trans = bch2_trans_get(c);
        struct moving_context ctxt;
        int ret;
 
        bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc);
-       ret = __bch2_evacuate_bucket(trans, &ctxt, NULL, bucket, gen, data_opts);
+       ret = __bch2_evacuate_bucket(&ctxt, NULL, bucket, gen, data_opts);
        bch2_moving_ctxt_exit(&ctxt);
-       bch2_trans_put(trans);
 
        return ret;
 }
@@ -849,21 +863,25 @@ static int bch2_move_btree(struct bch_fs *c,
 {
        bool kthread = (current->flags & PF_KTHREAD) != 0;
        struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts);
-       struct btree_trans *trans = bch2_trans_get(c);
+       struct moving_context ctxt;
+       struct btree_trans *trans;
        struct btree_iter iter;
        struct btree *b;
        enum btree_id id;
        struct data_update_opts data_opts;
        int ret = 0;
 
-       progress_list_add(c, stats);
+       bch2_moving_ctxt_init(&ctxt, c, NULL, stats,
+                             writepoint_ptr(&c->btree_write_point),
+                             true);
+       trans = ctxt.trans;
 
        stats->data_type = BCH_DATA_btree;
 
        for (id = start_btree_id;
             id <= min_t(unsigned, end_btree_id, btree_id_nr_alive(c) - 1);
             id++) {
-               stats->btree_id = id;
+               stats->pos = BBPOS(id, POS_MIN);
 
                if (!bch2_btree_id_root(c, id)->b)
                        continue;
@@ -882,7 +900,7 @@ retry:
                             bpos_cmp(b->key.k.p, end_pos)) > 0)
                                break;
 
-                       stats->pos = iter.pos;
+                       stats->pos = BBPOS(iter.btree_id, iter.pos);
 
                        if (!pred(c, arg, b, &io_opts, &data_opts))
                                goto next;
@@ -904,14 +922,10 @@ next:
                        break;
        }
 
-       bch2_trans_put(trans);
-
-       if (ret)
-               bch_err_fn(c, ret);
-
+       bch_err_fn(c, ret);
+       bch2_moving_ctxt_exit(&ctxt);
        bch2_btree_interior_updates_flush(c);
 
-       progress_list_del(c, stats);
        return ret;
 }
 
@@ -1032,8 +1046,7 @@ int bch2_scan_old_btree_nodes(struct bch_fs *c, struct bch_move_stats *stats)
                mutex_unlock(&c->sb_lock);
        }
 
-       if (ret)
-               bch_err_fn(c, ret);
+       bch_err_fn(c, ret);
        return ret;
 }
 
@@ -1056,14 +1069,16 @@ int bch2_data_job(struct bch_fs *c,
                ret = bch2_replicas_gc2(c) ?: ret;
 
                ret = bch2_move_data(c,
-                                    op.start_btree,    op.start_pos,
-                                    op.end_btree,      op.end_pos,
+                                    (struct bbpos) { op.start_btree,   op.start_pos },
+                                    (struct bbpos) { op.end_btree,     op.end_pos },
                                     NULL,
                                     stats,
                                     writepoint_hashed((unsigned long) current),
                                     true,
                                     rereplicate_pred, c) ?: ret;
                ret = bch2_replicas_gc2(c) ?: ret;
+
+               bch2_move_stats_exit(stats, c);
                break;
        case BCH_DATA_OP_MIGRATE:
                if (op.migrate.dev >= c->sb.nr_devices)
@@ -1080,18 +1095,21 @@ int bch2_data_job(struct bch_fs *c,
                ret = bch2_replicas_gc2(c) ?: ret;
 
                ret = bch2_move_data(c,
-                                    op.start_btree,    op.start_pos,
-                                    op.end_btree,      op.end_pos,
+                                    (struct bbpos) { op.start_btree,   op.start_pos },
+                                    (struct bbpos) { op.end_btree,     op.end_pos },
                                     NULL,
                                     stats,
                                     writepoint_hashed((unsigned long) current),
                                     true,
                                     migrate_pred, &op) ?: ret;
                ret = bch2_replicas_gc2(c) ?: ret;
+
+               bch2_move_stats_exit(stats, c);
                break;
        case BCH_DATA_OP_REWRITE_OLD_NODES:
                bch2_move_stats_init(stats, "rewrite_old_nodes");
                ret = bch2_scan_old_btree_nodes(c, stats);
+               bch2_move_stats_exit(stats, c);
                break;
        default:
                ret = -EINVAL;
@@ -1100,19 +1118,43 @@ int bch2_data_job(struct bch_fs *c,
        return ret;
 }
 
-static void bch2_moving_ctxt_to_text(struct printbuf *out, struct bch_fs *c, struct moving_context *ctxt)
+void bch2_move_stats_to_text(struct printbuf *out, struct bch_move_stats *stats)
 {
-       struct bch_move_stats *stats = ctxt->stats;
-       struct moving_io *io;
+       prt_printf(out, "%s: data type=%s pos=",
+                  stats->name,
+                  bch2_data_types[stats->data_type]);
+       bch2_bbpos_to_text(out, stats->pos);
+       prt_newline(out);
+       printbuf_indent_add(out, 2);
+
+       prt_str(out, "keys moved:  ");
+       prt_u64(out, atomic64_read(&stats->keys_moved));
+       prt_newline(out);
+
+       prt_str(out, "keys raced:  ");
+       prt_u64(out, atomic64_read(&stats->keys_raced));
+       prt_newline(out);
+
+       prt_str(out, "bytes seen:  ");
+       prt_human_readable_u64(out, atomic64_read(&stats->sectors_seen) << 9);
+       prt_newline(out);
 
-       prt_printf(out, "%s (%ps):", stats->name, ctxt->fn);
+       prt_str(out, "bytes moved: ");
+       prt_human_readable_u64(out, atomic64_read(&stats->sectors_moved) << 9);
        prt_newline(out);
 
-       prt_printf(out, " data type %s btree_id %s position: ",
-                  bch2_data_types[stats->data_type],
-                  bch2_btree_ids[stats->btree_id]);
-       bch2_bpos_to_text(out, stats->pos);
+       prt_str(out, "bytes raced: ");
+       prt_human_readable_u64(out, atomic64_read(&stats->sectors_raced) << 9);
        prt_newline(out);
+
+       printbuf_indent_sub(out, 2);
+}
+
+static void bch2_moving_ctxt_to_text(struct printbuf *out, struct bch_fs *c, struct moving_context *ctxt)
+{
+       struct moving_io *io;
+
+       bch2_move_stats_to_text(out, ctxt->stats);
        printbuf_indent_add(out, 2);
 
        prt_printf(out, "reads: ios %u/%u sectors %u/%u",
@@ -1153,7 +1195,4 @@ void bch2_fs_move_init(struct bch_fs *c)
 {
        INIT_LIST_HEAD(&c->moving_context_list);
        mutex_init(&c->moving_context_lock);
-
-       INIT_LIST_HEAD(&c->data_progress_list);
-       mutex_init(&c->data_progress_lock);
 }
index cbdd58db8782b24e06f03fbecd23efeaf8aaace7..07cf9d42643b4fe537b6db513285efc1f65bd366 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef _BCACHEFS_MOVE_H
 #define _BCACHEFS_MOVE_H
 
+#include "bbpos.h"
 #include "bcachefs_ioctl.h"
 #include "btree_iter.h"
 #include "buckets.h"
@@ -11,7 +12,7 @@
 struct bch_read_bio;
 
 struct moving_context {
-       struct bch_fs           *c;
+       struct btree_trans      *trans;
        struct list_head        list;
        void                    *fn;
 
@@ -37,13 +38,14 @@ struct moving_context {
        wait_queue_head_t       wait;
 };
 
-#define move_ctxt_wait_event(_ctxt, _trans, _cond)                     \
+#define move_ctxt_wait_event(_ctxt, _cond)                             \
 do {                                                                   \
        bool cond_finished = false;                                     \
-       bch2_moving_ctxt_do_pending_writes(_ctxt, _trans);              \
+       bch2_moving_ctxt_do_pending_writes(_ctxt);                      \
                                                                        \
        if (_cond)                                                      \
                break;                                                  \
+       bch2_trans_unlock_long((_ctxt)->trans);                         \
        __wait_event((_ctxt)->wait,                                     \
                     bch2_moving_ctxt_next_pending_write(_ctxt) ||      \
                     (cond_finished = (_cond)));                        \
@@ -59,22 +61,60 @@ void bch2_moving_ctxt_init(struct moving_context *, struct bch_fs *,
                           struct bch_ratelimit *, struct bch_move_stats *,
                           struct write_point_specifier, bool);
 struct moving_io *bch2_moving_ctxt_next_pending_write(struct moving_context *);
-void bch2_moving_ctxt_do_pending_writes(struct moving_context *,
-                                       struct btree_trans *);
+void bch2_moving_ctxt_do_pending_writes(struct moving_context *);
+void bch2_move_ctxt_wait_for_io(struct moving_context *);
+int bch2_move_ratelimit(struct moving_context *);
+
+/* Inodes in different snapshots may have different IO options: */
+struct snapshot_io_opts_entry {
+       u32                     snapshot;
+       struct bch_io_opts      io_opts;
+};
+
+struct per_snapshot_io_opts {
+       u64                     cur_inum;
+       struct bch_io_opts      fs_io_opts;
+       DARRAY(struct snapshot_io_opts_entry) d;
+};
+
+static inline void per_snapshot_io_opts_init(struct per_snapshot_io_opts *io_opts, struct bch_fs *c)
+{
+       memset(io_opts, 0, sizeof(*io_opts));
+       io_opts->fs_io_opts = bch2_opts_to_inode_opts(c->opts);
+}
+
+static inline void per_snapshot_io_opts_exit(struct per_snapshot_io_opts *io_opts)
+{
+       darray_exit(&io_opts->d);
+}
+
+struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *,
+                               struct per_snapshot_io_opts *, struct bkey_s_c);
+int bch2_move_get_io_opts_one(struct btree_trans *, struct bch_io_opts *, struct bkey_s_c);
 
 int bch2_scan_old_btree_nodes(struct bch_fs *, struct bch_move_stats *);
 
+int bch2_move_extent(struct moving_context *,
+                    struct move_bucket_in_flight *,
+                    struct btree_iter *,
+                    struct bkey_s_c,
+                    struct bch_io_opts,
+                    struct data_update_opts);
+
+int __bch2_move_data(struct moving_context *,
+                    struct bbpos,
+                    struct bbpos,
+                    move_pred_fn, void *);
 int bch2_move_data(struct bch_fs *,
-                  enum btree_id, struct bpos,
-                  enum btree_id, struct bpos,
+                  struct bbpos start,
+                  struct bbpos end,
                   struct bch_ratelimit *,
                   struct bch_move_stats *,
                   struct write_point_specifier,
                   bool,
                   move_pred_fn, void *);
 
-int __bch2_evacuate_bucket(struct btree_trans *,
-                          struct moving_context *,
+int __bch2_evacuate_bucket(struct moving_context *,
                           struct move_bucket_in_flight *,
                           struct bpos, int,
                           struct data_update_opts);
@@ -88,7 +128,10 @@ int bch2_data_job(struct bch_fs *,
                  struct bch_move_stats *,
                  struct bch_ioctl_data);
 
-void bch2_move_stats_init(struct bch_move_stats *stats, char *name);
+void bch2_move_stats_to_text(struct printbuf *, struct bch_move_stats *);
+void bch2_move_stats_exit(struct bch_move_stats *, struct bch_fs *);
+void bch2_move_stats_init(struct bch_move_stats *, char *);
+
 void bch2_fs_moving_ctxts_to_text(struct printbuf *, struct bch_fs *);
 
 void bch2_fs_move_init(struct bch_fs *);
index baf1f8570b3fe05f1733ae9be1cfa28c9e3db933..e22841ef31e475fdfa11d8dcc7d48adb8d333897 100644 (file)
@@ -2,17 +2,17 @@
 #ifndef _BCACHEFS_MOVE_TYPES_H
 #define _BCACHEFS_MOVE_TYPES_H
 
+#include "bbpos_types.h"
+
 struct bch_move_stats {
        enum bch_data_type      data_type;
-       enum btree_id           btree_id;
-       struct bpos             pos;
-       struct list_head        list;
+       struct bbpos            pos;
        char                    name[32];
 
        atomic64_t              keys_moved;
        atomic64_t              keys_raced;
-       atomic64_t              sectors_moved;
        atomic64_t              sectors_seen;
+       atomic64_t              sectors_moved;
        atomic64_t              sectors_raced;
 };
 
index 4017120baeeebddecee6180522fe99f84f291db1..0a0576326c5b2d433fcd4aace513379972f57152 100644 (file)
@@ -101,8 +101,7 @@ static int bch2_bucket_is_movable(struct btree_trans *trans,
        return ret;
 }
 
-static void move_buckets_wait(struct btree_trans *trans,
-                             struct moving_context *ctxt,
+static void move_buckets_wait(struct moving_context *ctxt,
                              struct buckets_in_flight *list,
                              bool flush)
 {
@@ -111,7 +110,7 @@ static void move_buckets_wait(struct btree_trans *trans,
 
        while ((i = list->first)) {
                if (flush)
-                       move_ctxt_wait_event(ctxt, trans, !atomic_read(&i->count));
+                       move_ctxt_wait_event(ctxt, !atomic_read(&i->count));
 
                if (atomic_read(&i->count))
                        break;
@@ -129,7 +128,7 @@ static void move_buckets_wait(struct btree_trans *trans,
                kfree(i);
        }
 
-       bch2_trans_unlock(trans);
+       bch2_trans_unlock_long(ctxt->trans);
 }
 
 static bool bucket_in_flight(struct buckets_in_flight *list,
@@ -140,11 +139,11 @@ static bool bucket_in_flight(struct buckets_in_flight *list,
 
 typedef DARRAY(struct move_bucket) move_buckets;
 
-static int bch2_copygc_get_buckets(struct btree_trans *trans,
-                       struct moving_context *ctxt,
+static int bch2_copygc_get_buckets(struct moving_context *ctxt,
                        struct buckets_in_flight *buckets_in_flight,
                        move_buckets *buckets)
 {
+       struct btree_trans *trans = ctxt->trans;
        struct bch_fs *c = trans->c;
        struct btree_iter iter;
        struct bkey_s_c k;
@@ -152,7 +151,7 @@ static int bch2_copygc_get_buckets(struct btree_trans *trans,
        size_t saw = 0, in_flight = 0, not_movable = 0, sectors = 0;
        int ret;
 
-       move_buckets_wait(trans, ctxt, buckets_in_flight, false);
+       move_buckets_wait(ctxt, buckets_in_flight, false);
 
        ret = bch2_btree_write_buffer_flush(trans);
        if (bch2_fs_fatal_err_on(ret, c, "%s: error %s from bch2_btree_write_buffer_flush()",
@@ -188,10 +187,11 @@ static int bch2_copygc_get_buckets(struct btree_trans *trans,
 }
 
 noinline
-static int bch2_copygc(struct btree_trans *trans,
-                      struct moving_context *ctxt,
-                      struct buckets_in_flight *buckets_in_flight)
+static int bch2_copygc(struct moving_context *ctxt,
+                      struct buckets_in_flight *buckets_in_flight,
+                      bool *did_work)
 {
+       struct btree_trans *trans = ctxt->trans;
        struct bch_fs *c = trans->c;
        struct data_update_opts data_opts = {
                .btree_insert_flags = BCH_WATERMARK_copygc,
@@ -202,7 +202,7 @@ static int bch2_copygc(struct btree_trans *trans,
        u64 moved = atomic64_read(&ctxt->stats->sectors_moved);
        int ret = 0;
 
-       ret = bch2_copygc_get_buckets(trans, ctxt, buckets_in_flight, &buckets);
+       ret = bch2_copygc_get_buckets(ctxt, buckets_in_flight, &buckets);
        if (ret)
                goto err;
 
@@ -221,10 +221,12 @@ static int bch2_copygc(struct btree_trans *trans,
                        break;
                }
 
-               ret = __bch2_evacuate_bucket(trans, ctxt, f, f->bucket.k.bucket,
+               ret = __bch2_evacuate_bucket(ctxt, f, f->bucket.k.bucket,
                                             f->bucket.k.gen, data_opts);
                if (ret)
                        goto err;
+
+               *did_work = true;
        }
 err:
        darray_exit(&buckets);
@@ -300,24 +302,24 @@ void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c)
 static int bch2_copygc_thread(void *arg)
 {
        struct bch_fs *c = arg;
-       struct btree_trans *trans;
        struct moving_context ctxt;
        struct bch_move_stats move_stats;
        struct io_clock *clock = &c->io_clock[WRITE];
-       struct buckets_in_flight buckets;
+       struct buckets_in_flight *buckets;
        u64 last, wait;
        int ret = 0;
 
-       memset(&buckets, 0, sizeof(buckets));
-
-       ret = rhashtable_init(&buckets.table, &bch_move_bucket_params);
+       buckets = kzalloc(sizeof(struct buckets_in_flight), GFP_KERNEL);
+       if (!buckets)
+               return -ENOMEM;
+       ret = rhashtable_init(&buckets->table, &bch_move_bucket_params);
        if (ret) {
+               kfree(buckets);
                bch_err_msg(c, ret, "allocating copygc buckets in flight");
                return ret;
        }
 
        set_freezable();
-       trans = bch2_trans_get(c);
 
        bch2_move_stats_init(&move_stats, "copygc");
        bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats,
@@ -325,16 +327,18 @@ static int bch2_copygc_thread(void *arg)
                              false);
 
        while (!ret && !kthread_should_stop()) {
-               bch2_trans_unlock(trans);
+               bool did_work = false;
+
+               bch2_trans_unlock_long(ctxt.trans);
                cond_resched();
 
                if (!c->copy_gc_enabled) {
-                       move_buckets_wait(trans, &ctxt, &buckets, true);
+                       move_buckets_wait(&ctxt, buckets, true);
                        kthread_wait_freezable(c->copy_gc_enabled);
                }
 
                if (unlikely(freezing(current))) {
-                       move_buckets_wait(trans, &ctxt, &buckets, true);
+                       move_buckets_wait(&ctxt, buckets, true);
                        __refrigerator(false);
                        continue;
                }
@@ -345,7 +349,7 @@ static int bch2_copygc_thread(void *arg)
                if (wait > clock->max_slop) {
                        c->copygc_wait_at = last;
                        c->copygc_wait = last + wait;
-                       move_buckets_wait(trans, &ctxt, &buckets, true);
+                       move_buckets_wait(&ctxt, buckets, true);
                        trace_and_count(c, copygc_wait, c, wait, last + wait);
                        bch2_kthread_io_clock_wait(clock, last + wait,
                                        MAX_SCHEDULE_TIMEOUT);
@@ -355,16 +359,29 @@ static int bch2_copygc_thread(void *arg)
                c->copygc_wait = 0;
 
                c->copygc_running = true;
-               ret = bch2_copygc(trans, &ctxt, &buckets);
+               ret = bch2_copygc(&ctxt, buckets, &did_work);
                c->copygc_running = false;
 
                wake_up(&c->copygc_running_wq);
+
+               if (!wait && !did_work) {
+                       u64 min_member_capacity = bch2_min_rw_member_capacity(c);
+
+                       if (min_member_capacity == U64_MAX)
+                               min_member_capacity = 128 * 2048;
+
+                       bch2_trans_unlock_long(ctxt.trans);
+                       bch2_kthread_io_clock_wait(clock, last + (min_member_capacity >> 6),
+                                       MAX_SCHEDULE_TIMEOUT);
+               }
        }
 
-       move_buckets_wait(trans, &ctxt, &buckets, true);
-       rhashtable_destroy(&buckets.table);
-       bch2_trans_put(trans);
+       move_buckets_wait(&ctxt, buckets, true);
+
+       rhashtable_destroy(&buckets->table);
+       kfree(buckets);
        bch2_moving_ctxt_exit(&ctxt);
+       bch2_move_stats_exit(&move_stats, c);
 
        return 0;
 }
index 232f50c73a9452efa01cfa97436588bb42b1fc87..8dd4046cca41ef23b061f4aeac1892f82a504d65 100644 (file)
 
 #define x(t, n, ...) [n] = #t,
 
-const char * const bch2_iops_measurements[] = {
-       BCH_IOPS_MEASUREMENTS()
-       NULL
-};
-
 const char * const bch2_error_actions[] = {
        BCH_ERROR_ACTIONS()
        NULL
@@ -42,9 +37,8 @@ const char * const bch2_sb_compat[] = {
        NULL
 };
 
-const char * const bch2_btree_ids[] = {
+const char * const __bch2_btree_ids[] = {
        BCH_BTREE_IDS()
-       "interior btree node",
        NULL
 };
 
@@ -271,14 +265,14 @@ int bch2_opt_validate(const struct bch_option *opt, u64 v, struct printbuf *err)
                if (err)
                        prt_printf(err, "%s: too small (min %llu)",
                               opt->attr.name, opt->min);
-               return -ERANGE;
+               return -BCH_ERR_ERANGE_option_too_small;
        }
 
        if (opt->max && v >= opt->max) {
                if (err)
                        prt_printf(err, "%s: too big (max %llu)",
                               opt->attr.name, opt->max);
-               return -ERANGE;
+               return -BCH_ERR_ERANGE_option_too_big;
        }
 
        if ((opt->flags & OPT_SB_FIELD_SECTORS) && (v & 511)) {
@@ -295,6 +289,9 @@ int bch2_opt_validate(const struct bch_option *opt, u64 v, struct printbuf *err)
                return -EINVAL;
        }
 
+       if (opt->fn.validate)
+               return opt->fn.validate(v, err);
+
        return 0;
 }
 
index 55014336c5f7534d3deadf16fc77671108d25d7b..8526f177450a56900c907a2e4cba3950fe5f9e00 100644 (file)
 
 struct bch_fs;
 
-extern const char * const bch2_iops_measurements[];
 extern const char * const bch2_error_actions[];
 extern const char * const bch2_fsck_fix_opts[];
 extern const char * const bch2_version_upgrade_opts[];
 extern const char * const bch2_sb_features[];
 extern const char * const bch2_sb_compat[];
-extern const char * const bch2_btree_ids[];
+extern const char * const __bch2_btree_ids[];
 extern const char * const bch2_csum_types[];
 extern const char * const bch2_csum_opts[];
 extern const char * const bch2_compression_types[];
@@ -74,6 +73,7 @@ enum opt_type {
 struct bch_opt_fn {
        int (*parse)(struct bch_fs *, const char *, u64 *, struct printbuf *);
        void (*to_text)(struct printbuf *, struct bch_fs *, struct bch_sb *, u64);
+       int (*validate)(u64, struct printbuf *);
 };
 
 /**
index de41f9a144920b83fb816182a4c97fecc1df87bf..5e653eb81d54f8fdfcca37038eeaf5a1febdb8e7 100644 (file)
@@ -415,11 +415,11 @@ void bch2_prt_bitflags(struct printbuf *out,
        while (list[nr])
                nr++;
 
-       while (flags && (bit = __ffs(flags)) < nr) {
+       while (flags && (bit = __ffs64(flags)) < nr) {
                if (!first)
                        bch2_prt_printf(out, ",");
                first = false;
                bch2_prt_printf(out, "%s", list[bit]);
-               flags ^= 1 << bit;
+               flags ^= BIT_ULL(bit);
        }
 }
index cb68ae44d597a6a97babfc0540a55035a692a12a..a54647c36b8501b7099c81fd5c4e9a6cba410787 100644 (file)
@@ -59,17 +59,18 @@ const struct bch_sb_field_ops bch_sb_field_ops_quota = {
        .to_text        = bch2_sb_quota_to_text,
 };
 
-int bch2_quota_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_quota_invalid(struct bch_fs *c, struct bkey_s_c k,
                       enum bkey_invalid_flags flags,
                       struct printbuf *err)
 {
-       if (k.k->p.inode >= QTYP_NR) {
-               prt_printf(err, "invalid quota type (%llu >= %u)",
-                      k.k->p.inode, QTYP_NR);
-               return -BCH_ERR_invalid_bkey;
-       }
+       int ret = 0;
 
-       return 0;
+       bkey_fsck_err_on(k.k->p.inode >= QTYP_NR, c, err,
+                        quota_type_invalid,
+                        "invalid quota type (%llu >= %u)",
+                        k.k->p.inode, QTYP_NR);
+fsck_err:
+       return ret;
 }
 
 void bch2_quota_to_text(struct printbuf *out, struct bch_fs *c,
index 2f463874a3628238fb62c17f341d698f4fc3f3e6..884f601f41c425b711f14ea05be3c8fc26f12158 100644 (file)
@@ -8,7 +8,7 @@
 enum bkey_invalid_flags;
 extern const struct bch_sb_field_ops bch_sb_field_ops_quota;
 
-int bch2_quota_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_quota_invalid(struct bch_fs *, struct bkey_s_c,
                       enum bkey_invalid_flags, struct printbuf *);
 void bch2_quota_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
index 568f1e8e7507e73913ff70c2e1769b9750f3289e..3319190b8d9c330fde44ad959bc299aa00d2ba87 100644 (file)
@@ -1,15 +1,21 @@
 // SPDX-License-Identifier: GPL-2.0
 
 #include "bcachefs.h"
+#include "alloc_background.h"
 #include "alloc_foreground.h"
 #include "btree_iter.h"
+#include "btree_update.h"
+#include "btree_write_buffer.h"
 #include "buckets.h"
 #include "clock.h"
 #include "compress.h"
 #include "disk_groups.h"
 #include "errcode.h"
+#include "error.h"
+#include "inode.h"
 #include "move.h"
 #include "rebalance.h"
+#include "subvolume.h"
 #include "super-io.h"
 #include "trace.h"
 
 #include <linux/kthread.h>
 #include <linux/sched/cputime.h>
 
-/*
- * Check if an extent should be moved:
- * returns -1 if it should not be moved, or
- * device of pointer that should be moved, if known, or INT_MAX if unknown
- */
-static bool rebalance_pred(struct bch_fs *c, void *arg,
-                          struct bkey_s_c k,
-                          struct bch_io_opts *io_opts,
-                          struct data_update_opts *data_opts)
-{
-       struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
-       unsigned i;
-
-       data_opts->rewrite_ptrs         = 0;
-       data_opts->target               = io_opts->background_target;
-       data_opts->extra_replicas       = 0;
-       data_opts->btree_insert_flags   = 0;
-
-       if (io_opts->background_compression &&
-           !bch2_bkey_is_incompressible(k)) {
-               const union bch_extent_entry *entry;
-               struct extent_ptr_decoded p;
-
-               i = 0;
-               bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
-                       if (!p.ptr.cached &&
-                           p.crc.compression_type !=
-                           bch2_compression_opt_to_type(io_opts->background_compression))
-                               data_opts->rewrite_ptrs |= 1U << i;
-                       i++;
-               }
-       }
+#define REBALANCE_WORK_SCAN_OFFSET     (U64_MAX - 1)
 
-       if (io_opts->background_target) {
-               const struct bch_extent_ptr *ptr;
+static const char * const bch2_rebalance_state_strs[] = {
+#define x(t) #t,
+       BCH_REBALANCE_STATES()
+       NULL
+#undef x
+};
 
-               i = 0;
-               bkey_for_each_ptr(ptrs, ptr) {
-                       if (!ptr->cached &&
-                           !bch2_dev_in_target(c, ptr->dev, io_opts->background_target) &&
-                           bch2_target_accepts_data(c, BCH_DATA_user, io_opts->background_target))
-                               data_opts->rewrite_ptrs |= 1U << i;
-                       i++;
-               }
-       }
+static int __bch2_set_rebalance_needs_scan(struct btree_trans *trans, u64 inum)
+{
+       struct btree_iter iter;
+       struct bkey_s_c k;
+       struct bkey_i_cookie *cookie;
+       u64 v;
+       int ret;
 
-       return data_opts->rewrite_ptrs != 0;
+       bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
+                            SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
+                            BTREE_ITER_INTENT);
+       k = bch2_btree_iter_peek_slot(&iter);
+       ret = bkey_err(k);
+       if (ret)
+               goto err;
+
+       v = k.k->type == KEY_TYPE_cookie
+               ? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie)
+               : 0;
+
+       cookie = bch2_trans_kmalloc(trans, sizeof(*cookie));
+       ret = PTR_ERR_OR_ZERO(cookie);
+       if (ret)
+               goto err;
+
+       bkey_cookie_init(&cookie->k_i);
+       cookie->k.p = iter.pos;
+       cookie->v.cookie = cpu_to_le64(v + 1);
+
+       ret = bch2_trans_update(trans, &iter, &cookie->k_i, 0);
+err:
+       bch2_trans_iter_exit(trans, &iter);
+       return ret;
 }
 
-void bch2_rebalance_add_key(struct bch_fs *c,
-                           struct bkey_s_c k,
-                           struct bch_io_opts *io_opts)
+int bch2_set_rebalance_needs_scan(struct bch_fs *c, u64 inum)
 {
-       struct data_update_opts update_opts = { 0 };
-       struct bkey_ptrs_c ptrs;
-       const struct bch_extent_ptr *ptr;
-       unsigned i;
-
-       if (!rebalance_pred(c, NULL, k, io_opts, &update_opts))
-               return;
-
-       i = 0;
-       ptrs = bch2_bkey_ptrs_c(k);
-       bkey_for_each_ptr(ptrs, ptr) {
-               if ((1U << i) && update_opts.rewrite_ptrs)
-                       if (atomic64_add_return(k.k->size,
-                                       &bch_dev_bkey_exists(c, ptr->dev)->rebalance_work) ==
-                           k.k->size)
-                               rebalance_wakeup(c);
-               i++;
-       }
+       int ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
+                           __bch2_set_rebalance_needs_scan(trans, inum));
+       rebalance_wakeup(c);
+       return ret;
 }
 
-void bch2_rebalance_add_work(struct bch_fs *c, u64 sectors)
+int bch2_set_fs_needs_rebalance(struct bch_fs *c)
 {
-       if (atomic64_add_return(sectors, &c->rebalance.work_unknown_dev) ==
-           sectors)
-               rebalance_wakeup(c);
+       return bch2_set_rebalance_needs_scan(c, 0);
 }
 
-struct rebalance_work {
-       int             dev_most_full_idx;
-       unsigned        dev_most_full_percent;
-       u64             dev_most_full_work;
-       u64             dev_most_full_capacity;
-       u64             total_work;
-};
+static int bch2_clear_rebalance_needs_scan(struct btree_trans *trans, u64 inum, u64 cookie)
+{
+       struct btree_iter iter;
+       struct bkey_s_c k;
+       u64 v;
+       int ret;
+
+       bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
+                            SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
+                            BTREE_ITER_INTENT);
+       k = bch2_btree_iter_peek_slot(&iter);
+       ret = bkey_err(k);
+       if (ret)
+               goto err;
+
+       v = k.k->type == KEY_TYPE_cookie
+               ? le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie)
+               : 0;
+
+       if (v == cookie)
+               ret = bch2_btree_delete_at(trans, &iter, 0);
+err:
+       bch2_trans_iter_exit(trans, &iter);
+       return ret;
+}
 
-static void rebalance_work_accumulate(struct rebalance_work *w,
-               u64 dev_work, u64 unknown_dev, u64 capacity, int idx)
+static struct bkey_s_c next_rebalance_entry(struct btree_trans *trans,
+                                           struct btree_iter *work_iter)
 {
-       unsigned percent_full;
-       u64 work = dev_work + unknown_dev;
+       return !kthread_should_stop()
+               ? bch2_btree_iter_peek(work_iter)
+               : bkey_s_c_null;
+}
 
-       /* avoid divide by 0 */
-       if (!capacity)
-               return;
+static int bch2_bkey_clear_needs_rebalance(struct btree_trans *trans,
+                                          struct btree_iter *iter,
+                                          struct bkey_s_c k)
+{
+       struct bkey_i *n = bch2_bkey_make_mut(trans, iter, &k, 0);
+       int ret = PTR_ERR_OR_ZERO(n);
+       if (ret)
+               return ret;
+
+       extent_entry_drop(bkey_i_to_s(n),
+                         (void *) bch2_bkey_rebalance_opts(bkey_i_to_s_c(n)));
+       return bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_NOFAIL);
+}
+
+static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans,
+                       struct bpos work_pos,
+                       struct btree_iter *extent_iter,
+                       struct data_update_opts *data_opts)
+{
+       struct bch_fs *c = trans->c;
+       struct bkey_s_c k;
+
+       bch2_trans_iter_exit(trans, extent_iter);
+       bch2_trans_iter_init(trans, extent_iter,
+                            work_pos.inode ? BTREE_ID_extents : BTREE_ID_reflink,
+                            work_pos,
+                            BTREE_ITER_ALL_SNAPSHOTS);
+       k = bch2_btree_iter_peek_slot(extent_iter);
+       if (bkey_err(k))
+               return k;
+
+       const struct bch_extent_rebalance *r = k.k ? bch2_bkey_rebalance_opts(k) : NULL;
+       if (!r) {
+               /* raced due to btree write buffer, nothing to do */
+               return bkey_s_c_null;
+       }
 
-       if (work < dev_work || work < unknown_dev)
-               work = U64_MAX;
-       work = min(work, capacity);
+       memset(data_opts, 0, sizeof(*data_opts));
 
-       percent_full = div64_u64(work * 100, capacity);
+       data_opts->rewrite_ptrs         =
+               bch2_bkey_ptrs_need_rebalance(c, k, r->target, r->compression);
+       data_opts->target               = r->target;
 
-       if (percent_full >= w->dev_most_full_percent) {
-               w->dev_most_full_idx            = idx;
-               w->dev_most_full_percent        = percent_full;
-               w->dev_most_full_work           = work;
-               w->dev_most_full_capacity       = capacity;
+       if (!data_opts->rewrite_ptrs) {
+               /*
+                * device we would want to write to offline? devices in target
+                * changed?
+                *
+                * We'll now need a full scan before this extent is picked up
+                * again:
+                */
+               int ret = bch2_bkey_clear_needs_rebalance(trans, extent_iter, k);
+               if (ret)
+                       return bkey_s_c_err(ret);
+               return bkey_s_c_null;
        }
 
-       if (w->total_work + dev_work >= w->total_work &&
-           w->total_work + dev_work >= dev_work)
-               w->total_work += dev_work;
+       return k;
 }
 
-static struct rebalance_work rebalance_work(struct bch_fs *c)
+noinline_for_stack
+static int do_rebalance_extent(struct moving_context *ctxt,
+                              struct bpos work_pos,
+                              struct btree_iter *extent_iter)
 {
-       struct bch_dev *ca;
-       struct rebalance_work ret = { .dev_most_full_idx = -1 };
-       u64 unknown_dev = atomic64_read(&c->rebalance.work_unknown_dev);
-       unsigned i;
-
-       for_each_online_member(ca, c, i)
-               rebalance_work_accumulate(&ret,
-                       atomic64_read(&ca->rebalance_work),
-                       unknown_dev,
-                       bucket_to_sector(ca, ca->mi.nbuckets -
-                                        ca->mi.first_bucket),
-                       i);
-
-       rebalance_work_accumulate(&ret,
-               unknown_dev, 0, c->capacity, -1);
+       struct btree_trans *trans = ctxt->trans;
+       struct bch_fs *c = trans->c;
+       struct bch_fs_rebalance *r = &trans->c->rebalance;
+       struct data_update_opts data_opts;
+       struct bch_io_opts io_opts;
+       struct bkey_s_c k;
+       struct bkey_buf sk;
+       int ret;
+
+       ctxt->stats = &r->work_stats;
+       r->state = BCH_REBALANCE_working;
+
+       bch2_bkey_buf_init(&sk);
+
+       ret = bkey_err(k = next_rebalance_extent(trans, work_pos,
+                                                extent_iter, &data_opts));
+       if (ret || !k.k)
+               goto out;
 
+       ret = bch2_move_get_io_opts_one(trans, &io_opts, k);
+       if (ret)
+               goto out;
+
+       atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
+
+       /*
+        * The iterator gets unlocked by __bch2_read_extent - need to
+        * save a copy of @k elsewhere:
+        */
+       bch2_bkey_buf_reassemble(&sk, c, k);
+       k = bkey_i_to_s_c(sk.k);
+
+       ret = bch2_move_extent(ctxt, NULL, extent_iter, k, io_opts, data_opts);
+       if (ret) {
+               if (bch2_err_matches(ret, ENOMEM)) {
+                       /* memory allocation failure, wait for some IO to finish */
+                       bch2_move_ctxt_wait_for_io(ctxt);
+                       ret = -BCH_ERR_transaction_restart_nested;
+               }
+
+               if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+                       goto out;
+
+               /* skip it and continue, XXX signal failure */
+               ret = 0;
+       }
+out:
+       bch2_bkey_buf_exit(&sk, c);
        return ret;
 }
 
-static void rebalance_work_reset(struct bch_fs *c)
+static bool rebalance_pred(struct bch_fs *c, void *arg,
+                          struct bkey_s_c k,
+                          struct bch_io_opts *io_opts,
+                          struct data_update_opts *data_opts)
 {
-       struct bch_dev *ca;
-       unsigned i;
+       unsigned target, compression;
+
+       if (k.k->p.inode) {
+               target          = io_opts->background_target;
+               compression     = io_opts->background_compression ?: io_opts->compression;
+       } else {
+               const struct bch_extent_rebalance *r = bch2_bkey_rebalance_opts(k);
 
-       for_each_online_member(ca, c, i)
-               atomic64_set(&ca->rebalance_work, 0);
+               target          = r ? r->target : io_opts->background_target;
+               compression     = r ? r->compression :
+                       (io_opts->background_compression ?: io_opts->compression);
+       }
 
-       atomic64_set(&c->rebalance.work_unknown_dev, 0);
+       data_opts->rewrite_ptrs         = bch2_bkey_ptrs_need_rebalance(c, k, target, compression);
+       data_opts->target               = target;
+       return data_opts->rewrite_ptrs != 0;
 }
 
-static unsigned long curr_cputime(void)
+static int do_rebalance_scan(struct moving_context *ctxt, u64 inum, u64 cookie)
 {
-       u64 utime, stime;
+       struct btree_trans *trans = ctxt->trans;
+       struct bch_fs_rebalance *r = &trans->c->rebalance;
+       int ret;
+
+       bch2_move_stats_init(&r->scan_stats, "rebalance_scan");
+       ctxt->stats = &r->scan_stats;
 
-       task_cputime_adjusted(current, &utime, &stime);
-       return nsecs_to_jiffies(utime + stime);
+       if (!inum) {
+               r->scan_start   = BBPOS_MIN;
+               r->scan_end     = BBPOS_MAX;
+       } else {
+               r->scan_start   = BBPOS(BTREE_ID_extents, POS(inum, 0));
+               r->scan_end     = BBPOS(BTREE_ID_extents, POS(inum, U64_MAX));
+       }
+
+       r->state = BCH_REBALANCE_scanning;
+
+       ret = __bch2_move_data(ctxt, r->scan_start, r->scan_end, rebalance_pred, NULL) ?:
+               commit_do(trans, NULL, NULL, BTREE_INSERT_NOFAIL,
+                         bch2_clear_rebalance_needs_scan(trans, inum, cookie));
+
+       bch2_move_stats_exit(&r->scan_stats, trans->c);
+       return ret;
 }
 
-static int bch2_rebalance_thread(void *arg)
+static void rebalance_wait(struct bch_fs *c)
 {
-       struct bch_fs *c = arg;
        struct bch_fs_rebalance *r = &c->rebalance;
        struct io_clock *clock = &c->io_clock[WRITE];
-       struct rebalance_work w, p;
-       struct bch_move_stats move_stats;
-       unsigned long start, prev_start;
-       unsigned long prev_run_time, prev_run_cputime;
-       unsigned long cputime, prev_cputime;
-       u64 io_start;
-       long throttle;
+       u64 now = atomic64_read(&clock->now);
+       u64 min_member_capacity = bch2_min_rw_member_capacity(c);
 
-       set_freezable();
+       if (min_member_capacity == U64_MAX)
+               min_member_capacity = 128 * 2048;
+
+       r->wait_iotime_end              = now + (min_member_capacity >> 6);
 
-       io_start        = atomic64_read(&clock->now);
-       p               = rebalance_work(c);
-       prev_start      = jiffies;
-       prev_cputime    = curr_cputime();
+       if (r->state != BCH_REBALANCE_waiting) {
+               r->wait_iotime_start    = now;
+               r->wait_wallclock_start = ktime_get_real_ns();
+               r->state                = BCH_REBALANCE_waiting;
+       }
+
+       bch2_kthread_io_clock_wait(clock, r->wait_iotime_end, MAX_SCHEDULE_TIMEOUT);
+}
 
-       bch2_move_stats_init(&move_stats, "rebalance");
-       while (!kthread_wait_freezable(r->enabled)) {
-               cond_resched();
+static int do_rebalance(struct moving_context *ctxt)
+{
+       struct btree_trans *trans = ctxt->trans;
+       struct bch_fs *c = trans->c;
+       struct bch_fs_rebalance *r = &c->rebalance;
+       struct btree_iter rebalance_work_iter, extent_iter = { NULL };
+       struct bkey_s_c k;
+       int ret = 0;
 
-               start                   = jiffies;
-               cputime                 = curr_cputime();
+       bch2_move_stats_init(&r->work_stats, "rebalance_work");
+       bch2_move_stats_init(&r->scan_stats, "rebalance_scan");
 
-               prev_run_time           = start - prev_start;
-               prev_run_cputime        = cputime - prev_cputime;
+       bch2_trans_iter_init(trans, &rebalance_work_iter,
+                            BTREE_ID_rebalance_work, POS_MIN,
+                            BTREE_ITER_ALL_SNAPSHOTS);
 
-               w                       = rebalance_work(c);
-               BUG_ON(!w.dev_most_full_capacity);
+       while (!bch2_move_ratelimit(ctxt) &&
+              !kthread_wait_freezable(r->enabled)) {
+               bch2_trans_begin(trans);
 
-               if (!w.total_work) {
-                       r->state = REBALANCE_WAITING;
-                       kthread_wait_freezable(rebalance_work(c).total_work);
+               ret = bkey_err(k = next_rebalance_entry(trans, &rebalance_work_iter));
+               if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
                        continue;
-               }
+               if (ret || !k.k)
+                       break;
 
-               /*
-                * If there isn't much work to do, throttle cpu usage:
-                */
-               throttle = prev_run_cputime * 100 /
-                       max(1U, w.dev_most_full_percent) -
-                       prev_run_time;
-
-               if (w.dev_most_full_percent < 20 && throttle > 0) {
-                       r->throttled_until_iotime = io_start +
-                               div_u64(w.dev_most_full_capacity *
-                                       (20 - w.dev_most_full_percent),
-                                       50);
-
-                       if (atomic64_read(&clock->now) + clock->max_slop <
-                           r->throttled_until_iotime) {
-                               r->throttled_until_cputime = start + throttle;
-                               r->state = REBALANCE_THROTTLED;
-
-                               bch2_kthread_io_clock_wait(clock,
-                                       r->throttled_until_iotime,
-                                       throttle);
-                               continue;
-                       }
-               }
+               ret = k.k->type == KEY_TYPE_cookie
+                       ? do_rebalance_scan(ctxt, k.k->p.inode,
+                                           le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie))
+                       : do_rebalance_extent(ctxt, k.k->p, &extent_iter);
 
-               /* minimum 1 mb/sec: */
-               r->pd.rate.rate =
-                       max_t(u64, 1 << 11,
-                             r->pd.rate.rate *
-                             max(p.dev_most_full_percent, 1U) /
-                             max(w.dev_most_full_percent, 1U));
-
-               io_start        = atomic64_read(&clock->now);
-               p               = w;
-               prev_start      = start;
-               prev_cputime    = cputime;
-
-               r->state = REBALANCE_RUNNING;
-               memset(&move_stats, 0, sizeof(move_stats));
-               rebalance_work_reset(c);
-
-               bch2_move_data(c,
-                              0,               POS_MIN,
-                              BTREE_ID_NR,     POS_MAX,
-                              /* ratelimiting disabled for now */
-                              NULL, /*  &r->pd.rate, */
-                              &move_stats,
-                              writepoint_ptr(&c->rebalance_write_point),
-                              true,
-                              rebalance_pred, NULL);
+               if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
+                       continue;
+               if (ret)
+                       break;
+
+               bch2_btree_iter_advance(&rebalance_work_iter);
        }
 
-       return 0;
+       bch2_trans_iter_exit(trans, &extent_iter);
+       bch2_trans_iter_exit(trans, &rebalance_work_iter);
+       bch2_move_stats_exit(&r->scan_stats, c);
+
+       if (!ret &&
+           !kthread_should_stop() &&
+           !atomic64_read(&r->work_stats.sectors_seen) &&
+           !atomic64_read(&r->scan_stats.sectors_seen)) {
+               bch2_trans_unlock_long(trans);
+               rebalance_wait(c);
+       }
+
+       if (!bch2_err_matches(ret, EROFS))
+               bch_err_fn(c, ret);
+       return ret;
 }
 
-void bch2_rebalance_work_to_text(struct printbuf *out, struct bch_fs *c)
+static int bch2_rebalance_thread(void *arg)
 {
+       struct bch_fs *c = arg;
        struct bch_fs_rebalance *r = &c->rebalance;
-       struct rebalance_work w = rebalance_work(c);
+       struct moving_context ctxt;
+       int ret;
 
-       if (!out->nr_tabstops)
-               printbuf_tabstop_push(out, 20);
+       set_freezable();
 
-       prt_printf(out, "fullest_dev (%i):", w.dev_most_full_idx);
-       prt_tab(out);
+       bch2_moving_ctxt_init(&ctxt, c, NULL, &r->work_stats,
+                             writepoint_ptr(&c->rebalance_write_point),
+                             true);
 
-       prt_human_readable_u64(out, w.dev_most_full_work << 9);
-       prt_printf(out, "/");
-       prt_human_readable_u64(out, w.dev_most_full_capacity << 9);
-       prt_newline(out);
+       while (!kthread_should_stop() &&
+              !(ret = do_rebalance(&ctxt)))
+               ;
 
-       prt_printf(out, "total work:");
-       prt_tab(out);
+       bch2_moving_ctxt_exit(&ctxt);
 
-       prt_human_readable_u64(out, w.total_work << 9);
-       prt_printf(out, "/");
-       prt_human_readable_u64(out, c->capacity << 9);
-       prt_newline(out);
+       return 0;
+}
+
+void bch2_rebalance_status_to_text(struct printbuf *out, struct bch_fs *c)
+{
+       struct bch_fs_rebalance *r = &c->rebalance;
 
-       prt_printf(out, "rate:");
-       prt_tab(out);
-       prt_printf(out, "%u", r->pd.rate.rate);
+       prt_str(out, bch2_rebalance_state_strs[r->state]);
        prt_newline(out);
+       printbuf_indent_add(out, 2);
 
        switch (r->state) {
-       case REBALANCE_WAITING:
-               prt_printf(out, "waiting");
+       case BCH_REBALANCE_waiting: {
+               u64 now = atomic64_read(&c->io_clock[WRITE].now);
+
+               prt_str(out, "io wait duration:  ");
+               bch2_prt_human_readable_s64(out, r->wait_iotime_end - r->wait_iotime_start);
+               prt_newline(out);
+
+               prt_str(out, "io wait remaining: ");
+               bch2_prt_human_readable_s64(out, r->wait_iotime_end - now);
+               prt_newline(out);
+
+               prt_str(out, "duration waited:   ");
+               bch2_pr_time_units(out, ktime_get_real_ns() - r->wait_wallclock_start);
+               prt_newline(out);
                break;
-       case REBALANCE_THROTTLED:
-               prt_printf(out, "throttled for %lu sec or ",
-                      (r->throttled_until_cputime - jiffies) / HZ);
-               prt_human_readable_u64(out,
-                           (r->throttled_until_iotime -
-                            atomic64_read(&c->io_clock[WRITE].now)) << 9);
-               prt_printf(out, " io");
+       }
+       case BCH_REBALANCE_working:
+               bch2_move_stats_to_text(out, &r->work_stats);
                break;
-       case REBALANCE_RUNNING:
-               prt_printf(out, "running");
+       case BCH_REBALANCE_scanning:
+               bch2_move_stats_to_text(out, &r->scan_stats);
                break;
        }
        prt_newline(out);
+       printbuf_indent_sub(out, 2);
 }
 
 void bch2_rebalance_stop(struct bch_fs *c)
@@ -361,6 +461,4 @@ int bch2_rebalance_start(struct bch_fs *c)
 void bch2_fs_rebalance_init(struct bch_fs *c)
 {
        bch2_pd_controller_init(&c->rebalance.pd);
-
-       atomic64_set(&c->rebalance.work_unknown_dev, S64_MAX);
 }
index 7ade0bb81cce8d1ac0a12819a44f3eb2d2e8f79d..28a52638f16cc113848cf3925758e12d510dc247 100644 (file)
@@ -4,6 +4,9 @@
 
 #include "rebalance_types.h"
 
+int bch2_set_rebalance_needs_scan(struct bch_fs *, u64 inum);
+int bch2_set_fs_needs_rebalance(struct bch_fs *);
+
 static inline void rebalance_wakeup(struct bch_fs *c)
 {
        struct task_struct *p;
@@ -15,11 +18,7 @@ static inline void rebalance_wakeup(struct bch_fs *c)
        rcu_read_unlock();
 }
 
-void bch2_rebalance_add_key(struct bch_fs *, struct bkey_s_c,
-                           struct bch_io_opts *);
-void bch2_rebalance_add_work(struct bch_fs *, u64);
-
-void bch2_rebalance_work_to_text(struct printbuf *, struct bch_fs *);
+void bch2_rebalance_status_to_text(struct printbuf *, struct bch_fs *);
 
 void bch2_rebalance_stop(struct bch_fs *);
 int bch2_rebalance_start(struct bch_fs *);
index 7462a92e95985d91cdc454485d045659240dd0fc..0fffb536c1d0c1b65d1a2a68730cab49f7535db2 100644 (file)
@@ -2,25 +2,36 @@
 #ifndef _BCACHEFS_REBALANCE_TYPES_H
 #define _BCACHEFS_REBALANCE_TYPES_H
 
+#include "bbpos_types.h"
 #include "move_types.h"
 
-enum rebalance_state {
-       REBALANCE_WAITING,
-       REBALANCE_THROTTLED,
-       REBALANCE_RUNNING,
+#define BCH_REBALANCE_STATES()         \
+       x(waiting)                      \
+       x(working)                      \
+       x(scanning)
+
+enum bch_rebalance_states {
+#define x(t)   BCH_REBALANCE_##t,
+       BCH_REBALANCE_STATES()
+#undef x
 };
 
 struct bch_fs_rebalance {
-       struct task_struct __rcu *thread;
+       struct task_struct __rcu        *thread;
        struct bch_pd_controller pd;
 
-       atomic64_t              work_unknown_dev;
+       enum bch_rebalance_states       state;
+       u64                             wait_iotime_start;
+       u64                             wait_iotime_end;
+       u64                             wait_wallclock_start;
+
+       struct bch_move_stats           work_stats;
 
-       enum rebalance_state    state;
-       u64                     throttled_until_iotime;
-       unsigned long           throttled_until_cputime;
+       struct bbpos                    scan_start;
+       struct bbpos                    scan_end;
+       struct bch_move_stats           scan_stats;
 
-       unsigned                enabled:1;
+       unsigned                        enabled:1;
 };
 
 #endif /* _BCACHEFS_REBALANCE_TYPES_H */
index 4cd660650e5bda264283f10624b52a3f02910b37..9c30500ce9200af8be8f71a50f5fa02c356e4400 100644 (file)
@@ -23,6 +23,7 @@
 #include "logged_ops.h"
 #include "move.h"
 #include "quota.h"
+#include "rebalance.h"
 #include "recovery.h"
 #include "replicas.h"
 #include "sb-clean.h"
@@ -182,7 +183,7 @@ static int bch2_journal_replay(struct bch_fs *c)
                             bch2_journal_replay_key(trans, k));
                if (ret) {
                        bch_err(c, "journal replay: error while replaying key at btree %s level %u: %s",
-                               bch2_btree_ids[k->btree_id], k->level, bch2_err_str(ret));
+                               bch2_btree_id_str(k->btree_id), k->level, bch2_err_str(ret));
                        goto err;
                }
        }
@@ -225,7 +226,7 @@ static int journal_replay_entry_early(struct bch_fs *c,
 
                if (entry->u64s) {
                        r->level = entry->level;
-                       bkey_copy(&r->key, &entry->start[0]);
+                       bkey_copy(&r->key, (struct bkey_i *) entry->start);
                        r->error = 0;
                } else {
                        r->error = -EIO;
@@ -364,10 +365,12 @@ static int read_btree_roots(struct bch_fs *c)
                }
 
                if (r->error) {
-                       __fsck_err(c, btree_id_is_alloc(i)
+                       __fsck_err(c,
+                                  btree_id_is_alloc(i)
                                   ? FSCK_CAN_IGNORE : 0,
+                                  btree_root_bkey_invalid,
                                   "invalid btree root %s",
-                                  bch2_btree_ids[i]);
+                                  bch2_btree_id_str(i));
                        if (i == BTREE_ID_alloc)
                                c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
                }
@@ -375,8 +378,9 @@ static int read_btree_roots(struct bch_fs *c)
                ret = bch2_btree_root_read(c, i, &r->key, r->level);
                if (ret) {
                        fsck_err(c,
+                                btree_root_read_error,
                                 "error reading btree root %s",
-                                bch2_btree_ids[i]);
+                                bch2_btree_id_str(i));
                        if (btree_id_is_alloc(i))
                                c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
                        ret = 0;
@@ -713,6 +717,7 @@ int bch2_fs_recovery(struct bch_fs *c)
                if (mustfix_fsck_err_on(c->sb.clean &&
                                        last_journal_entry &&
                                        !journal_entry_empty(last_journal_entry), c,
+                               clean_but_journal_not_empty,
                                "filesystem marked clean but journal not empty")) {
                        c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
                        SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
@@ -720,7 +725,9 @@ int bch2_fs_recovery(struct bch_fs *c)
                }
 
                if (!last_journal_entry) {
-                       fsck_err_on(!c->sb.clean, c, "no journal entries found");
+                       fsck_err_on(!c->sb.clean, c,
+                                   dirty_but_no_journal_entries,
+                                   "no journal entries found");
                        if (clean)
                                goto use_clean;
 
@@ -728,6 +735,13 @@ int bch2_fs_recovery(struct bch_fs *c)
                                if (*i) {
                                        last_journal_entry = &(*i)->j;
                                        (*i)->ignore = false;
+                                       /*
+                                        * This was probably a NO_FLUSH entry,
+                                        * so last_seq was garbage - but we know
+                                        * we're only using a single journal
+                                        * entry, set it here:
+                                        */
+                                       (*i)->j.last_seq = (*i)->j.seq;
                                        break;
                                }
                }
@@ -901,7 +915,7 @@ out:
        }
        kfree(clean);
 
-       if (!ret && test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags)) {
+       if (!ret && test_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags)) {
                bch2_fs_read_write_early(c);
                bch2_delete_dead_snapshots_async(c);
        }
@@ -946,16 +960,12 @@ int bch2_fs_initialize(struct bch_fs *c)
        for (i = 0; i < BTREE_ID_NR; i++)
                bch2_btree_root_alloc(c, i);
 
-       for_each_online_member(ca, c, i)
+       for_each_member_device(ca, c, i)
                bch2_dev_usage_init(ca);
 
-       for_each_online_member(ca, c, i) {
-               ret = bch2_dev_journal_alloc(ca);
-               if (ret) {
-                       percpu_ref_put(&ca->io_ref);
-                       goto err;
-               }
-       }
+       ret = bch2_fs_journal_alloc(c);
+       if (ret)
+               goto err;
 
        /*
         * journal_res_get() will crash if called before this has
@@ -973,15 +983,13 @@ int bch2_fs_initialize(struct bch_fs *c)
         * btree updates
         */
        bch_verbose(c, "marking superblocks");
-       for_each_member_device(ca, c, i) {
-               ret = bch2_trans_mark_dev_sb(c, ca);
-               if (ret) {
-                       percpu_ref_put(&ca->ref);
-                       goto err;
-               }
+       ret = bch2_trans_mark_dev_sbs(c);
+       bch_err_msg(c, ret, "marking superblocks");
+       if (ret)
+               goto err;
 
+       for_each_online_member(ca, c, i)
                ca->new_fs_bucket_idx = 0;
-       }
 
        ret = bch2_fs_freespace_init(c);
        if (ret)
index fbfa9d831d6f2382647c77dbcd0c6c15e82976f7..515e3d62c2ac9ec481694985ddaa7b1722760d6f 100644 (file)
@@ -14,6 +14,8 @@
        x(snapshots_read,               PASS_ALWAYS)                                            \
        x(check_topology,               0)                                                      \
        x(check_allocations,            PASS_FSCK)                                              \
+       x(trans_mark_dev_sbs,           PASS_ALWAYS|PASS_SILENT)                                \
+       x(fs_journal_alloc,             PASS_ALWAYS|PASS_SILENT)                                \
        x(set_may_go_rw,                PASS_ALWAYS|PASS_SILENT)                                \
        x(journal_replay,               PASS_ALWAYS)                                            \
        x(check_alloc_info,             PASS_FSCK)                                              \
        x(check_snapshot_trees,         PASS_FSCK)                                              \
        x(check_snapshots,              PASS_FSCK)                                              \
        x(check_subvols,                PASS_FSCK)                                              \
-       x(delete_dead_snapshots,        PASS_FSCK|PASS_UNCLEAN)                                 \
+       x(delete_dead_snapshots,        PASS_FSCK)                                              \
        x(fs_upgrade_for_subvolumes,    0)                                                      \
        x(resume_logged_ops,            PASS_ALWAYS)                                            \
        x(check_inodes,                 PASS_FSCK)                                              \
        x(check_extents,                PASS_FSCK)                                              \
+       x(check_indirect_extents,       PASS_FSCK)                                              \
        x(check_dirents,                PASS_FSCK)                                              \
        x(check_xattrs,                 PASS_FSCK)                                              \
        x(check_root,                   PASS_FSCK)                                              \
@@ -39,6 +42,7 @@
        x(check_nlinks,                 PASS_FSCK)                                              \
        x(delete_dead_inodes,           PASS_FSCK|PASS_UNCLEAN)                                 \
        x(fix_reflink_p,                0)                                                      \
+       x(set_fs_needs_rebalance,       0)                                                      \
 
 enum bch_recovery_pass {
 #define x(n, when)     BCH_RECOVERY_PASS_##n,
index d77d0ea9affffe14b71a7a5a377a38c1a4143672..6e1bfe9feb59e4abe96e1dc74b30196fa5766f48 100644 (file)
@@ -7,6 +7,7 @@
 #include "inode.h"
 #include "io_misc.h"
 #include "io_write.h"
+#include "rebalance.h"
 #include "reflink.h"
 #include "subvolume.h"
 #include "super-io.h"
@@ -27,7 +28,7 @@ static inline unsigned bkey_type_to_indirect(const struct bkey *k)
 
 /* reflink pointers */
 
-int bch2_reflink_p_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_reflink_p_invalid(struct bch_fs *c, struct bkey_s_c k,
                           enum bkey_invalid_flags flags,
                           struct printbuf *err)
 {
@@ -74,7 +75,7 @@ bool bch2_reflink_p_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r
 
 /* indirect extents */
 
-int bch2_reflink_v_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_reflink_v_invalid(struct bch_fs *c, struct bkey_s_c k,
                           enum bkey_invalid_flags flags,
                           struct printbuf *err)
 {
@@ -103,28 +104,29 @@ bool bch2_reflink_v_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r
 }
 #endif
 
+static inline void check_indirect_extent_deleting(struct bkey_i *new, unsigned *flags)
+{
+       if ((*flags & BTREE_TRIGGER_INSERT) && !*bkey_refcount(new)) {
+               new->k.type = KEY_TYPE_deleted;
+               new->k.size = 0;
+               set_bkey_val_u64s(&new->k, 0);;
+               *flags &= ~BTREE_TRIGGER_INSERT;
+       }
+}
+
 int bch2_trans_mark_reflink_v(struct btree_trans *trans,
                              enum btree_id btree_id, unsigned level,
                              struct bkey_s_c old, struct bkey_i *new,
                              unsigned flags)
 {
-       if (!(flags & BTREE_TRIGGER_OVERWRITE)) {
-               struct bkey_i_reflink_v *r = bkey_i_to_reflink_v(new);
-
-               if (!r->v.refcount) {
-                       r->k.type = KEY_TYPE_deleted;
-                       r->k.size = 0;
-                       set_bkey_val_u64s(&r->k, 0);
-                       return 0;
-               }
-       }
+       check_indirect_extent_deleting(new, &flags);
 
        return bch2_trans_mark_extent(trans, btree_id, level, old, new, flags);
 }
 
 /* indirect inline data */
 
-int bch2_indirect_inline_data_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_indirect_inline_data_invalid(struct bch_fs *c, struct bkey_s_c k,
                                      enum bkey_invalid_flags flags,
                                      struct printbuf *err)
 {
@@ -132,7 +134,7 @@ int bch2_indirect_inline_data_invalid(const struct bch_fs *c, struct bkey_s_c k,
 }
 
 void bch2_indirect_inline_data_to_text(struct printbuf *out,
-                                       struct bch_fs *c, struct bkey_s_c k)
+                                      struct bch_fs *c, struct bkey_s_c k)
 {
        struct bkey_s_c_indirect_inline_data d = bkey_s_c_to_indirect_inline_data(k);
        unsigned datalen = bkey_inline_data_bytes(k.k);
@@ -147,16 +149,7 @@ int bch2_trans_mark_indirect_inline_data(struct btree_trans *trans,
                              struct bkey_s_c old, struct bkey_i *new,
                              unsigned flags)
 {
-       if (!(flags & BTREE_TRIGGER_OVERWRITE)) {
-               struct bkey_i_indirect_inline_data *r =
-                       bkey_i_to_indirect_inline_data(new);
-
-               if (!r->v.refcount) {
-                       r->k.type = KEY_TYPE_deleted;
-                       r->k.size = 0;
-                       set_bkey_val_u64s(&r->k, 0);
-               }
-       }
+       check_indirect_extent_deleting(new, &flags);
 
        return 0;
 }
@@ -260,8 +253,9 @@ s64 bch2_remap_range(struct bch_fs *c,
        struct bpos dst_start = POS(dst_inum.inum, dst_offset);
        struct bpos src_start = POS(src_inum.inum, src_offset);
        struct bpos dst_end = dst_start, src_end = src_start;
+       struct bch_io_opts opts;
        struct bpos src_want;
-       u64 dst_done;
+       u64 dst_done = 0;
        u32 dst_snapshot, src_snapshot;
        int ret = 0, ret2 = 0;
 
@@ -277,6 +271,10 @@ s64 bch2_remap_range(struct bch_fs *c,
        bch2_bkey_buf_init(&new_src);
        trans = bch2_trans_get(c);
 
+       ret = bch2_inum_opts_get(trans, src_inum, &opts);
+       if (ret)
+               goto err;
+
        bch2_trans_iter_init(trans, &src_iter, BTREE_ID_extents, src_start,
                             BTREE_ITER_INTENT);
        bch2_trans_iter_init(trans, &dst_iter, BTREE_ID_extents, dst_start,
@@ -360,10 +358,13 @@ s64 bch2_remap_range(struct bch_fs *c,
                                min(src_k.k->p.offset - src_want.offset,
                                    dst_end.offset - dst_iter.pos.offset));
 
-               ret = bch2_extent_update(trans, dst_inum, &dst_iter,
-                                        new_dst.k, &disk_res,
-                                        new_i_size, i_sectors_delta,
-                                        true);
+               ret =   bch2_bkey_set_needs_rebalance(c, new_dst.k,
+                                       opts.background_target,
+                                       opts.background_compression) ?:
+                       bch2_extent_update(trans, dst_inum, &dst_iter,
+                                       new_dst.k, &disk_res,
+                                       new_i_size, i_sectors_delta,
+                                       true);
                bch2_disk_reservation_put(c, &disk_res);
        }
        bch2_trans_iter_exit(trans, &dst_iter);
@@ -394,7 +395,7 @@ s64 bch2_remap_range(struct bch_fs *c,
 
                bch2_trans_iter_exit(trans, &inode_iter);
        } while (bch2_err_matches(ret2, BCH_ERR_transaction_restart));
-
+err:
        bch2_trans_put(trans);
        bch2_bkey_buf_exit(&new_src, c);
        bch2_bkey_buf_exit(&new_dst, c);
index fe52538efb522940cd6cf7b853bc6d5d4afe3c94..8ccf3f9c4939eed45d9d9dc231bf5632506de836 100644 (file)
@@ -4,7 +4,7 @@
 
 enum bkey_invalid_flags;
 
-int bch2_reflink_p_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_reflink_p_invalid(struct bch_fs *, struct bkey_s_c,
                           enum bkey_invalid_flags, struct printbuf *);
 void bch2_reflink_p_to_text(struct printbuf *, struct bch_fs *,
                            struct bkey_s_c);
@@ -19,7 +19,7 @@ bool bch2_reflink_p_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
        .min_val_size   = 16,                                   \
 })
 
-int bch2_reflink_v_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_reflink_v_invalid(struct bch_fs *, struct bkey_s_c,
                           enum bkey_invalid_flags, struct printbuf *);
 void bch2_reflink_v_to_text(struct printbuf *, struct bch_fs *,
                            struct bkey_s_c);
@@ -35,7 +35,7 @@ int bch2_trans_mark_reflink_v(struct btree_trans *, enum btree_id, unsigned,
        .min_val_size   = 8,                                    \
 })
 
-int bch2_indirect_inline_data_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_indirect_inline_data_invalid(struct bch_fs *, struct bkey_s_c,
                                      enum bkey_invalid_flags, struct printbuf *);
 void bch2_indirect_inline_data_to_text(struct printbuf *,
                                struct bch_fs *, struct bkey_s_c);
index cef2a0447b8601a191ac39852ae4a7e0d9fc317d..1c3ae13bfced1d8ce9eeee118cb6e9fe1552e7a5 100644 (file)
@@ -462,18 +462,13 @@ int bch2_replicas_gc_end(struct bch_fs *c, int ret)
 {
        lockdep_assert_held(&c->replicas_gc_lock);
 
-       if (ret)
-               goto err;
-
        mutex_lock(&c->sb_lock);
        percpu_down_write(&c->mark_lock);
 
-       ret = bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc);
-       if (ret)
-               goto err;
+       ret =   ret ?:
+               bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc) ?:
+               replicas_table_update(c, &c->replicas_gc);
 
-       ret = replicas_table_update(c, &c->replicas_gc);
-err:
        kfree(c->replicas_gc.entries);
        c->replicas_gc.entries = NULL;
 
@@ -579,12 +574,9 @@ retry:
 
        bch2_cpu_replicas_sort(&new);
 
-       ret = bch2_cpu_replicas_to_sb_replicas(c, &new);
-       if (ret)
-               goto err;
+       ret =   bch2_cpu_replicas_to_sb_replicas(c, &new) ?:
+               replicas_table_update(c, &new);
 
-       ret = replicas_table_update(c, &new);
-err:
        kfree(new.entries);
 
        percpu_up_write(&c->mark_lock);
index 61203d7c8d36054400ff0d1ee2e1838966168a73..e151ada1c8bd2db23e31bc1f6f027815585e8ab2 100644 (file)
@@ -82,6 +82,7 @@ int bch2_verify_superblock_clean(struct bch_fs *c,
        int ret = 0;
 
        if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
+                       sb_clean_journal_seq_mismatch,
                        "superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown",
                        le64_to_cpu(clean->journal_seq),
                        le64_to_cpu(j->seq))) {
@@ -119,6 +120,7 @@ int bch2_verify_superblock_clean(struct bch_fs *c,
                                    k1->k.u64s != k2->k.u64s ||
                                    memcmp(k1, k2, bkey_bytes(&k1->k)) ||
                                    l1 != l2, c,
+                       sb_clean_btree_root_mismatch,
                        "superblock btree root %u doesn't match journal after clean shutdown\n"
                        "sb:      l=%u %s\n"
                        "journal: l=%u %s\n", i,
@@ -140,6 +142,7 @@ struct bch_sb_field_clean *bch2_read_superblock_clean(struct bch_fs *c)
        sb_clean = bch2_sb_field_get(c->disk_sb.sb, clean);
 
        if (fsck_err_on(!sb_clean, c,
+                       sb_clean_missing,
                        "superblock marked clean but clean section not present")) {
                SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
                c->sb.clean = false;
@@ -373,7 +376,7 @@ void bch2_fs_mark_clean(struct bch_fs *c)
 
        entry = sb_clean->start;
        bch2_journal_super_entries_add_common(c, &entry, 0);
-       entry = bch2_btree_roots_to_journal_entries(c, entry, entry);
+       entry = bch2_btree_roots_to_journal_entries(c, entry, 0);
        BUG_ON((void *) entry > vstruct_end(&sb_clean->field));
 
        memset(entry, 0,
diff --git a/fs/bcachefs/sb-errors.c b/fs/bcachefs/sb-errors.c
new file mode 100644 (file)
index 0000000..f0930ab
--- /dev/null
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "bcachefs.h"
+#include "sb-errors.h"
+#include "super-io.h"
+
+static const char * const bch2_sb_error_strs[] = {
+#define x(t, n, ...) [n] = #t,
+       BCH_SB_ERRS()
+       NULL
+};
+
+static void bch2_sb_error_id_to_text(struct printbuf *out, enum bch_sb_error_id id)
+{
+       if (id < BCH_SB_ERR_MAX)
+               prt_str(out, bch2_sb_error_strs[id]);
+       else
+               prt_printf(out, "(unknown error %u)", id);
+}
+
+static inline unsigned bch2_sb_field_errors_nr_entries(struct bch_sb_field_errors *e)
+{
+       return e
+               ? (bch2_sb_field_bytes(&e->field) - sizeof(*e)) / sizeof(e->entries[0])
+               : 0;
+}
+
+static inline unsigned bch2_sb_field_errors_u64s(unsigned nr)
+{
+       return (sizeof(struct bch_sb_field_errors) +
+               sizeof(struct bch_sb_field_error_entry) * nr) / sizeof(u64);
+}
+
+static int bch2_sb_errors_validate(struct bch_sb *sb, struct bch_sb_field *f,
+                                  struct printbuf *err)
+{
+       struct bch_sb_field_errors *e = field_to_type(f, errors);
+       unsigned i, nr = bch2_sb_field_errors_nr_entries(e);
+
+       for (i = 0; i < nr; i++) {
+               if (!BCH_SB_ERROR_ENTRY_NR(&e->entries[i])) {
+                       prt_printf(err, "entry with count 0 (id ");
+                       bch2_sb_error_id_to_text(err, BCH_SB_ERROR_ENTRY_ID(&e->entries[i]));
+                       prt_printf(err, ")");
+                       return -BCH_ERR_invalid_sb_errors;
+               }
+
+               if (i + 1 < nr &&
+                   BCH_SB_ERROR_ENTRY_ID(&e->entries[i]) >=
+                   BCH_SB_ERROR_ENTRY_ID(&e->entries[i + 1])) {
+                       prt_printf(err, "entries out of order");
+                       return -BCH_ERR_invalid_sb_errors;
+               }
+       }
+
+       return 0;
+}
+
+static void bch2_sb_errors_to_text(struct printbuf *out, struct bch_sb *sb,
+                                  struct bch_sb_field *f)
+{
+       struct bch_sb_field_errors *e = field_to_type(f, errors);
+       unsigned i, nr = bch2_sb_field_errors_nr_entries(e);
+
+       if (out->nr_tabstops <= 1)
+               printbuf_tabstop_push(out, 16);
+
+       for (i = 0; i < nr; i++) {
+               bch2_sb_error_id_to_text(out, BCH_SB_ERROR_ENTRY_ID(&e->entries[i]));
+               prt_tab(out);
+               prt_u64(out, BCH_SB_ERROR_ENTRY_NR(&e->entries[i]));
+               prt_tab(out);
+               bch2_prt_datetime(out, le64_to_cpu(e->entries[i].last_error_time));
+               prt_newline(out);
+       }
+}
+
+const struct bch_sb_field_ops bch_sb_field_ops_errors = {
+       .validate       = bch2_sb_errors_validate,
+       .to_text        = bch2_sb_errors_to_text,
+};
+
+void bch2_sb_error_count(struct bch_fs *c, enum bch_sb_error_id err)
+{
+       bch_sb_errors_cpu *e = &c->fsck_error_counts;
+       struct bch_sb_error_entry_cpu n = {
+               .id = err,
+               .nr = 1,
+               .last_error_time = ktime_get_real_seconds()
+       };
+       unsigned i;
+
+       mutex_lock(&c->fsck_error_counts_lock);
+       for (i = 0; i < e->nr; i++) {
+               if (err == e->data[i].id) {
+                       e->data[i].nr++;
+                       e->data[i].last_error_time = n.last_error_time;
+                       goto out;
+               }
+               if (err < e->data[i].id)
+                       break;
+       }
+
+       if (darray_make_room(e, 1))
+               goto out;
+
+       darray_insert_item(e, i, n);
+out:
+       mutex_unlock(&c->fsck_error_counts_lock);
+}
+
+void bch2_sb_errors_from_cpu(struct bch_fs *c)
+{
+       bch_sb_errors_cpu *src = &c->fsck_error_counts;
+       struct bch_sb_field_errors *dst =
+               bch2_sb_field_resize(&c->disk_sb, errors,
+                                    bch2_sb_field_errors_u64s(src->nr));
+       unsigned i;
+
+       if (!dst)
+               return;
+
+       for (i = 0; i < src->nr; i++) {
+               SET_BCH_SB_ERROR_ENTRY_ID(&dst->entries[i], src->data[i].id);
+               SET_BCH_SB_ERROR_ENTRY_NR(&dst->entries[i], src->data[i].nr);
+               dst->entries[i].last_error_time = cpu_to_le64(src->data[i].last_error_time);
+       }
+}
+
+static int bch2_sb_errors_to_cpu(struct bch_fs *c)
+{
+       struct bch_sb_field_errors *src = bch2_sb_field_get(c->disk_sb.sb, errors);
+       bch_sb_errors_cpu *dst = &c->fsck_error_counts;
+       unsigned i, nr = bch2_sb_field_errors_nr_entries(src);
+       int ret;
+
+       if (!nr)
+               return 0;
+
+       mutex_lock(&c->fsck_error_counts_lock);
+       ret = darray_make_room(dst, nr);
+       if (ret)
+               goto err;
+
+       dst->nr = nr;
+
+       for (i = 0; i < nr; i++) {
+               dst->data[i].id = BCH_SB_ERROR_ENTRY_ID(&src->entries[i]);
+               dst->data[i].nr = BCH_SB_ERROR_ENTRY_NR(&src->entries[i]);
+               dst->data[i].last_error_time = le64_to_cpu(src->entries[i].last_error_time);
+       }
+err:
+       mutex_unlock(&c->fsck_error_counts_lock);
+
+       return ret;
+}
+
+void bch2_fs_sb_errors_exit(struct bch_fs *c)
+{
+       darray_exit(&c->fsck_error_counts);
+}
+
+void bch2_fs_sb_errors_init_early(struct bch_fs *c)
+{
+       mutex_init(&c->fsck_error_counts_lock);
+       darray_init(&c->fsck_error_counts);
+}
+
+int bch2_fs_sb_errors_init(struct bch_fs *c)
+{
+       return bch2_sb_errors_to_cpu(c);
+}
diff --git a/fs/bcachefs/sb-errors.h b/fs/bcachefs/sb-errors.h
new file mode 100644 (file)
index 0000000..5a09a53
--- /dev/null
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SB_ERRORS_H
+#define _BCACHEFS_SB_ERRORS_H
+
+#include "sb-errors_types.h"
+
+#define BCH_SB_ERRS()                                                  \
+       x(clean_but_journal_not_empty,                          0)      \
+       x(dirty_but_no_journal_entries,                         1)      \
+       x(dirty_but_no_journal_entries_post_drop_nonflushes,    2)      \
+       x(sb_clean_journal_seq_mismatch,                        3)      \
+       x(sb_clean_btree_root_mismatch,                         4)      \
+       x(sb_clean_missing,                                     5)      \
+       x(jset_unsupported_version,                             6)      \
+       x(jset_unknown_csum,                                    7)      \
+       x(jset_last_seq_newer_than_seq,                         8)      \
+       x(jset_past_bucket_end,                                 9)      \
+       x(jset_seq_blacklisted,                                 10)     \
+       x(journal_entries_missing,                              11)     \
+       x(journal_entry_replicas_not_marked,                    12)     \
+       x(journal_entry_past_jset_end,                          13)     \
+       x(journal_entry_replicas_data_mismatch,                 14)     \
+       x(journal_entry_bkey_u64s_0,                            15)     \
+       x(journal_entry_bkey_past_end,                          16)     \
+       x(journal_entry_bkey_bad_format,                        17)     \
+       x(journal_entry_bkey_invalid,                           18)     \
+       x(journal_entry_btree_root_bad_size,                    19)     \
+       x(journal_entry_blacklist_bad_size,                     20)     \
+       x(journal_entry_blacklist_v2_bad_size,                  21)     \
+       x(journal_entry_blacklist_v2_start_past_end,            22)     \
+       x(journal_entry_usage_bad_size,                         23)     \
+       x(journal_entry_data_usage_bad_size,                    24)     \
+       x(journal_entry_clock_bad_size,                         25)     \
+       x(journal_entry_clock_bad_rw,                           26)     \
+       x(journal_entry_dev_usage_bad_size,                     27)     \
+       x(journal_entry_dev_usage_bad_dev,                      28)     \
+       x(journal_entry_dev_usage_bad_pad,                      29)     \
+       x(btree_node_unreadable,                                30)     \
+       x(btree_node_fault_injected,                            31)     \
+       x(btree_node_bad_magic,                                 32)     \
+       x(btree_node_bad_seq,                                   33)     \
+       x(btree_node_unsupported_version,                       34)     \
+       x(btree_node_bset_older_than_sb_min,                    35)     \
+       x(btree_node_bset_newer_than_sb,                        36)     \
+       x(btree_node_data_missing,                              37)     \
+       x(btree_node_bset_after_end,                            38)     \
+       x(btree_node_replicas_sectors_written_mismatch,         39)     \
+       x(btree_node_replicas_data_mismatch,                    40)     \
+       x(bset_unknown_csum,                                    41)     \
+       x(bset_bad_csum,                                        42)     \
+       x(bset_past_end_of_btree_node,                          43)     \
+       x(bset_wrong_sector_offset,                             44)     \
+       x(bset_empty,                                           45)     \
+       x(bset_bad_seq,                                         46)     \
+       x(bset_blacklisted_journal_seq,                         47)     \
+       x(first_bset_blacklisted_journal_seq,                   48)     \
+       x(btree_node_bad_btree,                                 49)     \
+       x(btree_node_bad_level,                                 50)     \
+       x(btree_node_bad_min_key,                               51)     \
+       x(btree_node_bad_max_key,                               52)     \
+       x(btree_node_bad_format,                                53)     \
+       x(btree_node_bkey_past_bset_end,                        54)     \
+       x(btree_node_bkey_bad_format,                           55)     \
+       x(btree_node_bad_bkey,                                  56)     \
+       x(btree_node_bkey_out_of_order,                         57)     \
+       x(btree_root_bkey_invalid,                              58)     \
+       x(btree_root_read_error,                                59)     \
+       x(btree_root_bad_min_key,                               50)     \
+       x(btree_root_bad_max_key,                               61)     \
+       x(btree_node_read_error,                                62)     \
+       x(btree_node_topology_bad_min_key,                      63)     \
+       x(btree_node_topology_bad_max_key,                      64)     \
+       x(btree_node_topology_overwritten_by_prev_node,         65)     \
+       x(btree_node_topology_overwritten_by_next_node,         66)     \
+       x(btree_node_topology_interior_node_empty,              67)     \
+       x(fs_usage_hidden_wrong,                                68)     \
+       x(fs_usage_btree_wrong,                                 69)     \
+       x(fs_usage_data_wrong,                                  70)     \
+       x(fs_usage_cached_wrong,                                71)     \
+       x(fs_usage_reserved_wrong,                              72)     \
+       x(fs_usage_persistent_reserved_wrong,                   73)     \
+       x(fs_usage_nr_inodes_wrong,                             74)     \
+       x(fs_usage_replicas_wrong,                              75)     \
+       x(dev_usage_buckets_wrong,                              76)     \
+       x(dev_usage_sectors_wrong,                              77)     \
+       x(dev_usage_fragmented_wrong,                           78)     \
+       x(dev_usage_buckets_ec_wrong,                           79)     \
+       x(bkey_version_in_future,                               80)     \
+       x(bkey_u64s_too_small,                                  81)     \
+       x(bkey_invalid_type_for_btree,                          82)     \
+       x(bkey_extent_size_zero,                                83)     \
+       x(bkey_extent_size_greater_than_offset,                 84)     \
+       x(bkey_size_nonzero,                                    85)     \
+       x(bkey_snapshot_nonzero,                                86)     \
+       x(bkey_snapshot_zero,                                   87)     \
+       x(bkey_at_pos_max,                                      88)     \
+       x(bkey_before_start_of_btree_node,                      89)     \
+       x(bkey_after_end_of_btree_node,                         90)     \
+       x(bkey_val_size_nonzero,                                91)     \
+       x(bkey_val_size_too_small,                              92)     \
+       x(alloc_v1_val_size_bad,                                93)     \
+       x(alloc_v2_unpack_error,                                94)     \
+       x(alloc_v3_unpack_error,                                95)     \
+       x(alloc_v4_val_size_bad,                                96)     \
+       x(alloc_v4_backpointers_start_bad,                      97)     \
+       x(alloc_key_data_type_bad,                              98)     \
+       x(alloc_key_empty_but_have_data,                        99)     \
+       x(alloc_key_dirty_sectors_0,                            100)    \
+       x(alloc_key_data_type_inconsistency,                    101)    \
+       x(alloc_key_to_missing_dev_bucket,                      102)    \
+       x(alloc_key_cached_inconsistency,                       103)    \
+       x(alloc_key_cached_but_read_time_zero,                  104)    \
+       x(alloc_key_to_missing_lru_entry,                       105)    \
+       x(alloc_key_data_type_wrong,                            106)    \
+       x(alloc_key_gen_wrong,                                  107)    \
+       x(alloc_key_dirty_sectors_wrong,                        108)    \
+       x(alloc_key_cached_sectors_wrong,                       109)    \
+       x(alloc_key_stripe_wrong,                               110)    \
+       x(alloc_key_stripe_redundancy_wrong,                    111)    \
+       x(bucket_sector_count_overflow,                         112)    \
+       x(bucket_metadata_type_mismatch,                        113)    \
+       x(need_discard_key_wrong,                               114)    \
+       x(freespace_key_wrong,                                  115)    \
+       x(freespace_hole_missing,                               116)    \
+       x(bucket_gens_val_size_bad,                             117)    \
+       x(bucket_gens_key_wrong,                                118)    \
+       x(bucket_gens_hole_wrong,                               119)    \
+       x(bucket_gens_to_invalid_dev,                           120)    \
+       x(bucket_gens_to_invalid_buckets,                       121)    \
+       x(bucket_gens_nonzero_for_invalid_buckets,              122)    \
+       x(need_discard_freespace_key_to_invalid_dev_bucket,     123)    \
+       x(need_discard_freespace_key_bad,                       124)    \
+       x(backpointer_pos_wrong,                                125)    \
+       x(backpointer_to_missing_device,                        126)    \
+       x(backpointer_to_missing_alloc,                         127)    \
+       x(backpointer_to_missing_ptr,                           128)    \
+       x(lru_entry_at_time_0,                                  129)    \
+       x(lru_entry_to_invalid_bucket,                          130)    \
+       x(lru_entry_bad,                                        131)    \
+       x(btree_ptr_val_too_big,                                132)    \
+       x(btree_ptr_v2_val_too_big,                             133)    \
+       x(btree_ptr_has_non_ptr,                                134)    \
+       x(extent_ptrs_invalid_entry,                            135)    \
+       x(extent_ptrs_no_ptrs,                                  136)    \
+       x(extent_ptrs_too_many_ptrs,                            137)    \
+       x(extent_ptrs_redundant_crc,                            138)    \
+       x(extent_ptrs_redundant_stripe,                         139)    \
+       x(extent_ptrs_unwritten,                                140)    \
+       x(extent_ptrs_written_and_unwritten,                    141)    \
+       x(ptr_to_invalid_device,                                142)    \
+       x(ptr_to_duplicate_device,                              143)    \
+       x(ptr_after_last_bucket,                                144)    \
+       x(ptr_before_first_bucket,                              145)    \
+       x(ptr_spans_multiple_buckets,                           146)    \
+       x(ptr_to_missing_backpointer,                           147)    \
+       x(ptr_to_missing_alloc_key,                             148)    \
+       x(ptr_to_missing_replicas_entry,                        149)    \
+       x(ptr_to_missing_stripe,                                150)    \
+       x(ptr_to_incorrect_stripe,                              151)    \
+       x(ptr_gen_newer_than_bucket_gen,                        152)    \
+       x(ptr_too_stale,                                        153)    \
+       x(stale_dirty_ptr,                                      154)    \
+       x(ptr_bucket_data_type_mismatch,                        155)    \
+       x(ptr_cached_and_erasure_coded,                         156)    \
+       x(ptr_crc_uncompressed_size_too_small,                  157)    \
+       x(ptr_crc_csum_type_unknown,                            158)    \
+       x(ptr_crc_compression_type_unknown,                     159)    \
+       x(ptr_crc_redundant,                                    160)    \
+       x(ptr_crc_uncompressed_size_too_big,                    161)    \
+       x(ptr_crc_nonce_mismatch,                               162)    \
+       x(ptr_stripe_redundant,                                 163)    \
+       x(reservation_key_nr_replicas_invalid,                  164)    \
+       x(reflink_v_refcount_wrong,                             165)    \
+       x(reflink_p_to_missing_reflink_v,                       166)    \
+       x(stripe_pos_bad,                                       167)    \
+       x(stripe_val_size_bad,                                  168)    \
+       x(stripe_sector_count_wrong,                            169)    \
+       x(snapshot_tree_pos_bad,                                170)    \
+       x(snapshot_tree_to_missing_snapshot,                    171)    \
+       x(snapshot_tree_to_missing_subvol,                      172)    \
+       x(snapshot_tree_to_wrong_subvol,                        173)    \
+       x(snapshot_tree_to_snapshot_subvol,                     174)    \
+       x(snapshot_pos_bad,                                     175)    \
+       x(snapshot_parent_bad,                                  176)    \
+       x(snapshot_children_not_normalized,                     177)    \
+       x(snapshot_child_duplicate,                             178)    \
+       x(snapshot_child_bad,                                   179)    \
+       x(snapshot_skiplist_not_normalized,                     180)    \
+       x(snapshot_skiplist_bad,                                181)    \
+       x(snapshot_should_not_have_subvol,                      182)    \
+       x(snapshot_to_bad_snapshot_tree,                        183)    \
+       x(snapshot_bad_depth,                                   184)    \
+       x(snapshot_bad_skiplist,                                185)    \
+       x(subvol_pos_bad,                                       186)    \
+       x(subvol_not_master_and_not_snapshot,                   187)    \
+       x(subvol_to_missing_root,                               188)    \
+       x(subvol_root_wrong_bi_subvol,                          189)    \
+       x(bkey_in_missing_snapshot,                             190)    \
+       x(inode_pos_inode_nonzero,                              191)    \
+       x(inode_pos_blockdev_range,                             192)    \
+       x(inode_unpack_error,                                   193)    \
+       x(inode_str_hash_invalid,                               194)    \
+       x(inode_v3_fields_start_bad,                            195)    \
+       x(inode_snapshot_mismatch,                              196)    \
+       x(inode_unlinked_but_clean,                             197)    \
+       x(inode_unlinked_but_nlink_nonzero,                     198)    \
+       x(inode_checksum_type_invalid,                          199)    \
+       x(inode_compression_type_invalid,                       200)    \
+       x(inode_subvol_root_but_not_dir,                        201)    \
+       x(inode_i_size_dirty_but_clean,                         202)    \
+       x(inode_i_sectors_dirty_but_clean,                      203)    \
+       x(inode_i_sectors_wrong,                                204)    \
+       x(inode_dir_wrong_nlink,                                205)    \
+       x(inode_dir_multiple_links,                             206)    \
+       x(inode_multiple_links_but_nlink_0,                     207)    \
+       x(inode_wrong_backpointer,                              208)    \
+       x(inode_wrong_nlink,                                    209)    \
+       x(inode_unreachable,                                    210)    \
+       x(deleted_inode_but_clean,                              211)    \
+       x(deleted_inode_missing,                                212)    \
+       x(deleted_inode_is_dir,                                 213)    \
+       x(deleted_inode_not_unlinked,                           214)    \
+       x(extent_overlapping,                                   215)    \
+       x(extent_in_missing_inode,                              216)    \
+       x(extent_in_non_reg_inode,                              217)    \
+       x(extent_past_end_of_inode,                             218)    \
+       x(dirent_empty_name,                                    219)    \
+       x(dirent_val_too_big,                                   220)    \
+       x(dirent_name_too_long,                                 221)    \
+       x(dirent_name_embedded_nul,                             222)    \
+       x(dirent_name_dot_or_dotdot,                            223)    \
+       x(dirent_name_has_slash,                                224)    \
+       x(dirent_d_type_wrong,                                  225)    \
+       x(dirent_d_parent_subvol_wrong,                         226)    \
+       x(dirent_in_missing_dir_inode,                          227)    \
+       x(dirent_in_non_dir_inode,                              228)    \
+       x(dirent_to_missing_inode,                              229)    \
+       x(dirent_to_missing_subvol,                             230)    \
+       x(dirent_to_itself,                                     231)    \
+       x(quota_type_invalid,                                   232)    \
+       x(xattr_val_size_too_small,                             233)    \
+       x(xattr_val_size_too_big,                               234)    \
+       x(xattr_invalid_type,                                   235)    \
+       x(xattr_name_invalid_chars,                             236)    \
+       x(xattr_in_missing_inode,                               237)    \
+       x(root_subvol_missing,                                  238)    \
+       x(root_dir_missing,                                     239)    \
+       x(root_inode_not_dir,                                   240)    \
+       x(dir_loop,                                             241)    \
+       x(hash_table_key_duplicate,                             242)    \
+       x(hash_table_key_wrong_offset,                          243)
+
+enum bch_sb_error_id {
+#define x(t, n) BCH_FSCK_ERR_##t = n,
+       BCH_SB_ERRS()
+#undef x
+       BCH_SB_ERR_MAX
+};
+
+extern const struct bch_sb_field_ops bch_sb_field_ops_errors;
+
+void bch2_sb_error_count(struct bch_fs *, enum bch_sb_error_id);
+
+void bch2_sb_errors_from_cpu(struct bch_fs *);
+
+void bch2_fs_sb_errors_exit(struct bch_fs *);
+void bch2_fs_sb_errors_init_early(struct bch_fs *);
+int bch2_fs_sb_errors_init(struct bch_fs *);
+
+#endif /* _BCACHEFS_SB_ERRORS_H */
diff --git a/fs/bcachefs/sb-errors_types.h b/fs/bcachefs/sb-errors_types.h
new file mode 100644 (file)
index 0000000..b1c0998
--- /dev/null
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BCACHEFS_SB_ERRORS_TYPES_H
+#define _BCACHEFS_SB_ERRORS_TYPES_H
+
+#include "darray.h"
+
+struct bch_sb_error_entry_cpu {
+       u64                     id:16,
+                               nr:48;
+       u64                     last_error_time;
+};
+
+typedef DARRAY(struct bch_sb_error_entry_cpu) bch_sb_errors_cpu;
+
+#endif /* _BCACHEFS_SB_ERRORS_TYPES_H */
+
index 6dd85bb996feed0c318595215d4d72f2c63b0371..bed0f857fe5b7627639ee24202dba1002910eee7 100644 (file)
@@ -7,21 +7,28 @@
 #include "sb-members.h"
 #include "super-io.h"
 
-/* Code for bch_sb_field_members_v1: */
+#define x(t, n, ...) [n] = #t,
+static const char * const bch2_iops_measurements[] = {
+       BCH_IOPS_MEASUREMENTS()
+       NULL
+};
 
-static struct bch_member *members_v2_get_mut(struct bch_sb_field_members_v2 *mi, int i)
-{
-       return (void *) mi->_members + (i * le16_to_cpu(mi->member_bytes));
-}
+char * const bch2_member_error_strs[] = {
+       BCH_MEMBER_ERROR_TYPES()
+       NULL
+};
+#undef x
+
+/* Code for bch_sb_field_members_v1: */
 
 struct bch_member *bch2_members_v2_get_mut(struct bch_sb *sb, int i)
 {
-       return members_v2_get_mut(bch2_sb_field_get(sb, members_v2), i);
+       return __bch2_members_v2_get_mut(bch2_sb_field_get(sb, members_v2), i);
 }
 
 static struct bch_member members_v2_get(struct bch_sb_field_members_v2 *mi, int i)
 {
-       struct bch_member ret, *p = members_v2_get_mut(mi, i);
+       struct bch_member ret, *p = __bch2_members_v2_get_mut(mi, i);
        memset(&ret, 0, sizeof(ret));
        memcpy(&ret, p, min_t(size_t, le16_to_cpu(mi->member_bytes), sizeof(ret)));
        return ret;
@@ -36,7 +43,8 @@ static struct bch_member members_v1_get(struct bch_sb_field_members_v1 *mi, int
 {
        struct bch_member ret, *p = members_v1_get_mut(mi, i);
        memset(&ret, 0, sizeof(ret));
-       memcpy(&ret, p, min_t(size_t, sizeof(struct bch_member), sizeof(ret))); return ret;
+       memcpy(&ret, p, min_t(size_t, BCH_MEMBER_V1_BYTES, sizeof(ret)));
+       return ret;
 }
 
 struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i)
@@ -62,7 +70,7 @@ static int sb_members_v2_resize_entries(struct bch_fs *c)
 
                for (int i = c->disk_sb.sb->nr_devices - 1; i >= 0; --i) {
                        void *dst = (void *) mi->_members + (i * sizeof(struct bch_member));
-                       memmove(dst, members_v2_get_mut(mi, i), le16_to_cpu(mi->member_bytes));
+                       memmove(dst, __bch2_members_v2_get_mut(mi, i), le16_to_cpu(mi->member_bytes));
                        memset(dst + le16_to_cpu(mi->member_bytes),
                               0, (sizeof(struct bch_member) - le16_to_cpu(mi->member_bytes)));
                }
@@ -71,7 +79,7 @@ static int sb_members_v2_resize_entries(struct bch_fs *c)
        return 0;
 }
 
-int bch2_members_v2_init(struct bch_fs *c)
+int bch2_sb_members_v2_init(struct bch_fs *c)
 {
        struct bch_sb_field_members_v1 *mi1;
        struct bch_sb_field_members_v2 *mi2;
@@ -91,7 +99,7 @@ int bch2_members_v2_init(struct bch_fs *c)
        return sb_members_v2_resize_entries(c);
 }
 
-int bch_members_cpy_v2_v1(struct bch_sb_handle *disk_sb)
+int bch2_sb_members_cpy_v2_v1(struct bch_sb_handle *disk_sb)
 {
        struct bch_sb_field_members_v1 *mi1;
        struct bch_sb_field_members_v2 *mi2;
@@ -105,7 +113,7 @@ int bch_members_cpy_v2_v1(struct bch_sb_handle *disk_sb)
        mi2 = bch2_sb_field_get(disk_sb->sb, members_v2);
 
        for (unsigned i = 0; i < disk_sb->sb->nr_devices; i++)
-               memcpy(members_v1_get_mut(mi1, i), members_v2_get_mut(mi2, i), BCH_MEMBER_V1_BYTES);
+               memcpy(members_v1_get_mut(mi1, i), __bch2_members_v2_get_mut(mi2, i), BCH_MEMBER_V1_BYTES);
 
        return 0;
 }
@@ -155,6 +163,8 @@ static void member_to_text(struct printbuf *out,
        u64 bucket_size = le16_to_cpu(m.bucket_size);
        u64 device_size = le64_to_cpu(m.nbuckets) * bucket_size;
 
+       if (!bch2_member_exists(&m))
+               return;
 
        prt_printf(out, "Device:");
        prt_tab(out);
@@ -163,6 +173,21 @@ static void member_to_text(struct printbuf *out,
 
        printbuf_indent_add(out, 2);
 
+       prt_printf(out, "Label:");
+       prt_tab(out);
+       if (BCH_MEMBER_GROUP(&m)) {
+               unsigned idx = BCH_MEMBER_GROUP(&m) - 1;
+
+               if (idx < disk_groups_nr(gi))
+                       prt_printf(out, "%s (%u)",
+                                  gi->entries[idx].label, idx);
+               else
+                       prt_printf(out, "(bad disk labels section)");
+       } else {
+               prt_printf(out, "(none)");
+       }
+       prt_newline(out);
+
        prt_printf(out, "UUID:");
        prt_tab(out);
        pr_uuid(out, m.uuid.b);
@@ -173,6 +198,13 @@ static void member_to_text(struct printbuf *out,
        prt_units_u64(out, device_size << 9);
        prt_newline(out);
 
+       for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++) {
+               prt_printf(out, "%s errors:", bch2_member_error_strs[i]);
+               prt_tab(out);
+               prt_u64(out, le64_to_cpu(m.errors[i]));
+               prt_newline(out);
+       }
+
        for (unsigned i = 0; i < BCH_IOPS_NR; i++) {
                prt_printf(out, "%s iops:", bch2_iops_measurements[i]);
                prt_tab(out);
@@ -198,7 +230,7 @@ static void member_to_text(struct printbuf *out,
        prt_printf(out, "Last mount:");
        prt_tab(out);
        if (m.last_mount)
-               pr_time(out, le64_to_cpu(m.last_mount));
+               bch2_prt_datetime(out, le64_to_cpu(m.last_mount));
        else
                prt_printf(out, "(never)");
        prt_newline(out);
@@ -211,21 +243,6 @@ static void member_to_text(struct printbuf *out,
                   : "unknown");
        prt_newline(out);
 
-       prt_printf(out, "Label:");
-       prt_tab(out);
-       if (BCH_MEMBER_GROUP(&m)) {
-               unsigned idx = BCH_MEMBER_GROUP(&m) - 1;
-
-               if (idx < disk_groups_nr(gi))
-                       prt_printf(out, "%s (%u)",
-                                  gi->entries[idx].label, idx);
-               else
-                       prt_printf(out, "(bad disk labels section)");
-       } else {
-               prt_printf(out, "(none)");
-       }
-       prt_newline(out);
-
        prt_printf(out, "Data allowed:");
        prt_tab(out);
        if (BCH_MEMBER_DATA_ALLOWED(&m))
@@ -262,8 +279,7 @@ static int bch2_sb_members_v1_validate(struct bch_sb *sb,
        struct bch_sb_field_members_v1 *mi = field_to_type(f, members_v1);
        unsigned i;
 
-       if ((void *) members_v1_get_mut(mi, sb->nr_devices)  >
-           vstruct_end(&mi->field)) {
+       if ((void *) members_v1_get_mut(mi, sb->nr_devices) > vstruct_end(&mi->field)) {
                prt_printf(err, "too many devices for section size");
                return -BCH_ERR_invalid_sb_members;
        }
@@ -286,10 +302,8 @@ static void bch2_sb_members_v1_to_text(struct printbuf *out, struct bch_sb *sb,
        struct bch_sb_field_disk_groups *gi = bch2_sb_field_get(sb, disk_groups);
        unsigned i;
 
-       for (i = 0; i < sb->nr_devices; i++) {
-               struct bch_member m = members_v1_get(mi, i);
-               member_to_text(out, m, gi, sb, i);
-       }
+       for (i = 0; i < sb->nr_devices; i++)
+               member_to_text(out, members_v1_get(mi, i), gi, sb, i);
 }
 
 const struct bch_sb_field_ops bch_sb_field_ops_members_v1 = {
@@ -304,10 +318,8 @@ static void bch2_sb_members_v2_to_text(struct printbuf *out, struct bch_sb *sb,
        struct bch_sb_field_disk_groups *gi = bch2_sb_field_get(sb, disk_groups);
        unsigned i;
 
-       for (i = 0; i < sb->nr_devices; i++) {
-               struct bch_member m = members_v2_get(mi, i);
-               member_to_text(out, m, gi, sb, i);
-       }
+       for (i = 0; i < sb->nr_devices; i++)
+               member_to_text(out, members_v2_get(mi, i), gi, sb, i);
 }
 
 static int bch2_sb_members_v2_validate(struct bch_sb *sb,
@@ -315,7 +327,7 @@ static int bch2_sb_members_v2_validate(struct bch_sb *sb,
                                       struct printbuf *err)
 {
        struct bch_sb_field_members_v2 *mi = field_to_type(f, members_v2);
-       size_t mi_bytes = (void *) members_v2_get_mut(mi, sb->nr_devices) -
+       size_t mi_bytes = (void *) __bch2_members_v2_get_mut(mi, sb->nr_devices) -
                (void *) mi;
 
        if (mi_bytes > vstruct_bytes(&mi->field)) {
@@ -337,3 +349,72 @@ const struct bch_sb_field_ops bch_sb_field_ops_members_v2 = {
        .validate       = bch2_sb_members_v2_validate,
        .to_text        = bch2_sb_members_v2_to_text,
 };
+
+void bch2_sb_members_from_cpu(struct bch_fs *c)
+{
+       struct bch_sb_field_members_v2 *mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
+       struct bch_dev *ca;
+       unsigned i, e;
+
+       rcu_read_lock();
+       for_each_member_device_rcu(ca, c, i, NULL) {
+               struct bch_member *m = __bch2_members_v2_get_mut(mi, i);
+
+               for (e = 0; e < BCH_MEMBER_ERROR_NR; e++)
+                       m->errors[e] = cpu_to_le64(atomic64_read(&ca->errors[e]));
+       }
+       rcu_read_unlock();
+}
+
+void bch2_dev_io_errors_to_text(struct printbuf *out, struct bch_dev *ca)
+{
+       struct bch_fs *c = ca->fs;
+       struct bch_member m;
+
+       mutex_lock(&ca->fs->sb_lock);
+       m = bch2_sb_member_get(c->disk_sb.sb, ca->dev_idx);
+       mutex_unlock(&ca->fs->sb_lock);
+
+       printbuf_tabstop_push(out, 12);
+
+       prt_str(out, "IO errors since filesystem creation");
+       prt_newline(out);
+
+       printbuf_indent_add(out, 2);
+       for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++) {
+               prt_printf(out, "%s:", bch2_member_error_strs[i]);
+               prt_tab(out);
+               prt_u64(out, atomic64_read(&ca->errors[i]));
+               prt_newline(out);
+       }
+       printbuf_indent_sub(out, 2);
+
+       prt_str(out, "IO errors since ");
+       bch2_pr_time_units(out, (ktime_get_real_seconds() - le64_to_cpu(m.errors_reset_time)) * NSEC_PER_SEC);
+       prt_str(out, " ago");
+       prt_newline(out);
+
+       printbuf_indent_add(out, 2);
+       for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++) {
+               prt_printf(out, "%s:", bch2_member_error_strs[i]);
+               prt_tab(out);
+               prt_u64(out, atomic64_read(&ca->errors[i]) - le64_to_cpu(m.errors_at_reset[i]));
+               prt_newline(out);
+       }
+       printbuf_indent_sub(out, 2);
+}
+
+void bch2_dev_errors_reset(struct bch_dev *ca)
+{
+       struct bch_fs *c = ca->fs;
+       struct bch_member *m;
+
+       mutex_lock(&c->sb_lock);
+       m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
+       for (unsigned i = 0; i < ARRAY_SIZE(m->errors_at_reset); i++)
+               m->errors_at_reset[i] = cpu_to_le64(atomic64_read(&ca->errors[i]));
+       m->errors_reset_time = ktime_get_real_seconds();
+
+       bch2_write_super(c);
+       mutex_unlock(&c->sb_lock);
+}
index 430f3457bfd46b2999fdc724176e2059a51b0d2f..03613e3eb8e3df5bcda99218fb0168cf1f8bf9f8 100644 (file)
@@ -2,8 +2,16 @@
 #ifndef _BCACHEFS_SB_MEMBERS_H
 #define _BCACHEFS_SB_MEMBERS_H
 
-int bch2_members_v2_init(struct bch_fs *c);
-int bch_members_cpy_v2_v1(struct bch_sb_handle *disk_sb);
+extern char * const bch2_member_error_strs[];
+
+static inline struct bch_member *
+__bch2_members_v2_get_mut(struct bch_sb_field_members_v2 *mi, unsigned i)
+{
+       return (void *) mi->_members + (i * le16_to_cpu(mi->member_bytes));
+}
+
+int bch2_sb_members_v2_init(struct bch_fs *c);
+int bch2_sb_members_cpy_v2_v1(struct bch_sb_handle *disk_sb);
 struct bch_member *bch2_members_v2_get_mut(struct bch_sb *sb, int i);
 struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i);
 
@@ -179,4 +187,41 @@ static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c)
 extern const struct bch_sb_field_ops bch_sb_field_ops_members_v1;
 extern const struct bch_sb_field_ops bch_sb_field_ops_members_v2;
 
+static inline bool bch2_member_exists(struct bch_member *m)
+{
+       return !bch2_is_zero(&m->uuid, sizeof(m->uuid));
+}
+
+static inline bool bch2_dev_exists(struct bch_sb *sb, unsigned dev)
+{
+       if (dev < sb->nr_devices) {
+               struct bch_member m = bch2_sb_member_get(sb, dev);
+               return bch2_member_exists(&m);
+       }
+       return false;
+}
+
+static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
+{
+       return (struct bch_member_cpu) {
+               .nbuckets       = le64_to_cpu(mi->nbuckets),
+               .first_bucket   = le16_to_cpu(mi->first_bucket),
+               .bucket_size    = le16_to_cpu(mi->bucket_size),
+               .group          = BCH_MEMBER_GROUP(mi),
+               .state          = BCH_MEMBER_STATE(mi),
+               .discard        = BCH_MEMBER_DISCARD(mi),
+               .data_allowed   = BCH_MEMBER_DATA_ALLOWED(mi),
+               .durability     = BCH_MEMBER_DURABILITY(mi)
+                       ? BCH_MEMBER_DURABILITY(mi) - 1
+                       : 1,
+               .freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED(mi),
+               .valid          = bch2_member_exists(mi),
+       };
+}
+
+void bch2_sb_members_from_cpu(struct bch_fs *);
+
+void bch2_dev_io_errors_to_text(struct printbuf *, struct bch_dev *);
+void bch2_dev_errors_reset(struct bch_dev *);
+
 #endif /* _BCACHEFS_SB_MEMBERS_H */
index b684b9f00c1b2bb731ea9551b1548d9675f42705..b775cf0fb7cbf211a3f388cf78de0de6f33c581c 100644 (file)
@@ -11,6 +11,8 @@
 #include <linux/sched/task.h>
 #include <linux/slab.h>
 
+#include <trace/events/lock.h>
+
 #include "six.h"
 
 #ifdef DEBUG
@@ -462,11 +464,12 @@ static int six_lock_slowpath(struct six_lock *lock, enum six_lock_type type,
                smp_mb__after_atomic();
        }
 
+       trace_contention_begin(lock, 0);
+       lock_contended(&lock->dep_map, ip);
+
        if (six_optimistic_spin(lock, type))
                goto out;
 
-       lock_contended(&lock->dep_map, ip);
-
        wait->task              = current;
        wait->lock_want         = type;
        wait->lock_acquired     = false;
@@ -546,6 +549,7 @@ out:
                six_clear_bitmask(lock, SIX_LOCK_HELD_write);
                six_lock_wakeup(lock, atomic_read(&lock->state), SIX_LOCK_read);
        }
+       trace_contention_end(lock, 0);
 
        return ret;
 }
index 4982468bfe1182910fa6a03e3b691bf7ce8be758..e9af77b384c76c694194c53b348706e354df9a22 100644 (file)
@@ -30,17 +30,18 @@ void bch2_snapshot_tree_to_text(struct printbuf *out, struct bch_fs *c,
                   le32_to_cpu(t.v->root_snapshot));
 }
 
-int bch2_snapshot_tree_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_snapshot_tree_invalid(struct bch_fs *c, struct bkey_s_c k,
                               enum bkey_invalid_flags flags,
                               struct printbuf *err)
 {
-       if (bkey_gt(k.k->p, POS(0, U32_MAX)) ||
-           bkey_lt(k.k->p, POS(0, 1))) {
-               prt_printf(err, "bad pos");
-               return -BCH_ERR_invalid_bkey;
-       }
+       int ret = 0;
 
-       return 0;
+       bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) ||
+                        bkey_lt(k.k->p, POS(0, 1)), c, err,
+                        snapshot_tree_pos_bad,
+                        "bad pos");
+fsck_err:
+       return ret;
 }
 
 int bch2_snapshot_tree_lookup(struct btree_trans *trans, u32 id,
@@ -202,68 +203,60 @@ void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c,
                           le32_to_cpu(s.v->skip[2]));
 }
 
-int bch2_snapshot_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_snapshot_invalid(struct bch_fs *c, struct bkey_s_c k,
                          enum bkey_invalid_flags flags,
                          struct printbuf *err)
 {
        struct bkey_s_c_snapshot s;
        u32 i, id;
+       int ret = 0;
 
-       if (bkey_gt(k.k->p, POS(0, U32_MAX)) ||
-           bkey_lt(k.k->p, POS(0, 1))) {
-               prt_printf(err, "bad pos");
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) ||
+                        bkey_lt(k.k->p, POS(0, 1)), c, err,
+                        snapshot_pos_bad,
+                        "bad pos");
 
        s = bkey_s_c_to_snapshot(k);
 
        id = le32_to_cpu(s.v->parent);
-       if (id && id <= k.k->p.offset) {
-               prt_printf(err, "bad parent node (%u <= %llu)",
-                      id, k.k->p.offset);
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(id && id <= k.k->p.offset, c, err,
+                        snapshot_parent_bad,
+                        "bad parent node (%u <= %llu)",
+                        id, k.k->p.offset);
 
-       if (le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1])) {
-               prt_printf(err, "children not normalized");
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1]), c, err,
+                        snapshot_children_not_normalized,
+                        "children not normalized");
 
-       if (s.v->children[0] &&
-           s.v->children[0] == s.v->children[1]) {
-               prt_printf(err, "duplicate child nodes");
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(s.v->children[0] && s.v->children[0] == s.v->children[1], c, err,
+                        snapshot_child_duplicate,
+                        "duplicate child nodes");
 
        for (i = 0; i < 2; i++) {
                id = le32_to_cpu(s.v->children[i]);
 
-               if (id >= k.k->p.offset) {
-                       prt_printf(err, "bad child node (%u >= %llu)",
-                              id, k.k->p.offset);
-                       return -BCH_ERR_invalid_bkey;
-               }
+               bkey_fsck_err_on(id >= k.k->p.offset, c, err,
+                                snapshot_child_bad,
+                                "bad child node (%u >= %llu)",
+                                id, k.k->p.offset);
        }
 
        if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, skip)) {
-               if (le32_to_cpu(s.v->skip[0]) > le32_to_cpu(s.v->skip[1]) ||
-                   le32_to_cpu(s.v->skip[1]) > le32_to_cpu(s.v->skip[2])) {
-                       prt_printf(err, "skiplist not normalized");
-                       return -BCH_ERR_invalid_bkey;
-               }
+               bkey_fsck_err_on(le32_to_cpu(s.v->skip[0]) > le32_to_cpu(s.v->skip[1]) ||
+                                le32_to_cpu(s.v->skip[1]) > le32_to_cpu(s.v->skip[2]), c, err,
+                                snapshot_skiplist_not_normalized,
+                                "skiplist not normalized");
 
                for (i = 0; i < ARRAY_SIZE(s.v->skip); i++) {
                        id = le32_to_cpu(s.v->skip[i]);
 
-                       if ((id && !s.v->parent) ||
-                           (id && id <= k.k->p.offset)) {
-                               prt_printf(err, "bad skiplist node %u", id);
-                               return -BCH_ERR_invalid_bkey;
-                       }
+                       bkey_fsck_err_on(id && id < le32_to_cpu(s.v->parent), c, err,
+                                        snapshot_skiplist_bad,
+                                        "bad skiplist node %u", id);
                }
        }
-
-       return 0;
+fsck_err:
+       return ret;
 }
 
 static void __set_is_ancestor_bitmap(struct bch_fs *c, u32 id)
@@ -325,8 +318,9 @@ int bch2_mark_snapshot(struct btree_trans *trans,
                __set_is_ancestor_bitmap(c, id);
 
                if (BCH_SNAPSHOT_DELETED(s.v)) {
-                       set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
-                       c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_delete_dead_snapshots);
+                       set_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags);
+                       if (c->curr_recovery_pass > BCH_RECOVERY_PASS_delete_dead_snapshots)
+                               bch2_delete_dead_snapshots_async(c);
                }
        } else {
                memset(t, 0, sizeof(*t));
@@ -529,7 +523,7 @@ static int check_snapshot_tree(struct btree_trans *trans,
        if (fsck_err_on(ret ||
                        root_id != bch2_snapshot_root(c, root_id) ||
                        st.k->p.offset != le32_to_cpu(s.tree),
-                       c,
+                       c, snapshot_tree_to_missing_snapshot,
                        "snapshot tree points to missing/incorrect snapshot:\n  %s",
                        (bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
                ret = bch2_btree_delete_at(trans, iter, 0);
@@ -541,17 +535,20 @@ static int check_snapshot_tree(struct btree_trans *trans,
        if (ret && !bch2_err_matches(ret, ENOENT))
                goto err;
 
-       if (fsck_err_on(ret, c,
+       if (fsck_err_on(ret,
+                       c, snapshot_tree_to_missing_subvol,
                        "snapshot tree points to missing subvolume:\n  %s",
                        (printbuf_reset(&buf),
                         bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
            fsck_err_on(!bch2_snapshot_is_ancestor_early(c,
                                                le32_to_cpu(subvol.snapshot),
-                                               root_id), c,
+                                               root_id),
+                       c, snapshot_tree_to_wrong_subvol,
                        "snapshot tree points to subvolume that does not point to snapshot in this tree:\n  %s",
                        (printbuf_reset(&buf),
                         bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf)) ||
-           fsck_err_on(BCH_SUBVOLUME_SNAP(&subvol), c,
+           fsck_err_on(BCH_SUBVOLUME_SNAP(&subvol),
+                       c, snapshot_tree_to_snapshot_subvol,
                        "snapshot tree points to snapshot subvolume:\n  %s",
                        (printbuf_reset(&buf),
                         bch2_bkey_val_to_text(&buf, c, st.s_c), buf.buf))) {
@@ -787,7 +784,9 @@ static int check_snapshot(struct btree_trans *trans,
                        goto err;
                }
        } else {
-               if (fsck_err_on(s.subvol, c, "snapshot should not point to subvol:\n  %s",
+               if (fsck_err_on(s.subvol,
+                               c, snapshot_should_not_have_subvol,
+                               "snapshot should not point to subvol:\n  %s",
                                (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
                        u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
                        ret = PTR_ERR_OR_ZERO(u);
@@ -803,7 +802,8 @@ static int check_snapshot(struct btree_trans *trans,
        if (ret < 0)
                goto err;
 
-       if (fsck_err_on(!ret, c, "snapshot points to missing/incorrect tree:\n  %s",
+       if (fsck_err_on(!ret, c, snapshot_to_bad_snapshot_tree,
+                       "snapshot points to missing/incorrect tree:\n  %s",
                        (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
                ret = snapshot_tree_ptr_repair(trans, iter, k, &s);
                if (ret)
@@ -815,7 +815,8 @@ static int check_snapshot(struct btree_trans *trans,
 
        if (le32_to_cpu(s.depth) != real_depth &&
            (c->sb.version_upgrade_complete < bcachefs_metadata_version_snapshot_skiplists ||
-            fsck_err(c, "snapshot with incorrect depth field, should be %u:\n  %s",
+            fsck_err(c, snapshot_bad_depth,
+                     "snapshot with incorrect depth field, should be %u:\n  %s",
                      real_depth, (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))) {
                u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
                ret = PTR_ERR_OR_ZERO(u);
@@ -832,7 +833,8 @@ static int check_snapshot(struct btree_trans *trans,
 
        if (!ret &&
            (c->sb.version_upgrade_complete < bcachefs_metadata_version_snapshot_skiplists ||
-            fsck_err(c, "snapshot with bad skiplist field:\n  %s",
+            fsck_err(c, snapshot_bad_skiplist,
+                     "snapshot with bad skiplist field:\n  %s",
                      (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))) {
                u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
                ret = PTR_ERR_OR_ZERO(u);
@@ -1251,13 +1253,7 @@ static int move_key_to_correct_snapshot(struct btree_trans *trans,
        return 0;
 }
 
-/*
- * For a given snapshot, if it doesn't have a subvolume that points to it, and
- * it doesn't have child snapshot nodes - it's now redundant and we can mark it
- * as deleted.
- */
-static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct btree_iter *iter,
-                                         struct bkey_s_c k)
+static int bch2_snapshot_needs_delete(struct btree_trans *trans, struct bkey_s_c k)
 {
        struct bkey_s_c_snapshot snap;
        u32 children[2];
@@ -1278,10 +1274,21 @@ static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct btre
                bch2_snapshot_live(trans, children[1]);
        if (ret < 0)
                return ret;
+       return !ret;
+}
 
-       if (!ret)
-               return bch2_snapshot_node_set_deleted(trans, k.k->p.offset);
-       return 0;
+/*
+ * For a given snapshot, if it doesn't have a subvolume that points to it, and
+ * it doesn't have child snapshot nodes - it's now redundant and we can mark it
+ * as deleted.
+ */
+static int bch2_delete_redundant_snapshot(struct btree_trans *trans, struct bkey_s_c k)
+{
+       int ret = bch2_snapshot_needs_delete(trans, k);
+
+       return ret <= 0
+               ? ret
+               : bch2_snapshot_node_set_deleted(trans, k.k->p.offset);
 }
 
 static inline u32 bch2_snapshot_nth_parent_skip(struct bch_fs *c, u32 id, u32 n,
@@ -1342,12 +1349,12 @@ static int bch2_fix_child_of_deleted_snapshot(struct btree_trans *trans,
                        u32 id = le32_to_cpu(s->v.skip[j]);
 
                        if (snapshot_list_has_id(deleted, id)) {
-                               id = depth > 1
-                                       ? bch2_snapshot_nth_parent_skip(c,
+                               id = bch2_snapshot_nth_parent_skip(c,
                                                        parent,
-                                                       get_random_u32_below(depth - 1),
-                                                       deleted)
-                                       : parent;
+                                                       depth > 1
+                                                       ? get_random_u32_below(depth - 1)
+                                                       : 0,
+                                                       deleted);
                                s->v.skip[j] = cpu_to_le32(id);
                        }
                }
@@ -1369,6 +1376,9 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
        u32 *i, id;
        int ret = 0;
 
+       if (!test_and_clear_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags))
+               return 0;
+
        if (!test_bit(BCH_FS_STARTED, &c->flags)) {
                ret = bch2_fs_read_write_early(c);
                if (ret) {
@@ -1386,7 +1396,7 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
        ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots,
                        POS_MIN, 0, k,
                        NULL, NULL, 0,
-               bch2_delete_redundant_snapshot(trans, &iter, k));
+               bch2_delete_redundant_snapshot(trans, k));
        if (ret) {
                bch_err_msg(c, ret, "deleting redundant snapshots");
                goto err;
@@ -1427,6 +1437,15 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
                if (!btree_type_has_snapshots(id))
                        continue;
 
+               /*
+                * deleted inodes btree is maintained by a trigger on the inodes
+                * btree - no work for us to do here, and it's not safe to scan
+                * it because we'll see out of date keys due to the btree write
+                * buffer:
+                */
+               if (id == BTREE_ID_deleted_inodes)
+                       continue;
+
                ret = for_each_btree_key_commit(trans, iter,
                                id, POS_MIN,
                                BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
@@ -1447,6 +1466,7 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
                }
        }
 
+       bch2_trans_unlock(trans);
        down_write(&c->snapshot_create_lock);
 
        for_each_btree_key(trans, iter, BTREE_ID_snapshots,
@@ -1491,8 +1511,6 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
                        goto err_create_lock;
                }
        }
-
-       clear_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
 err_create_lock:
        up_write(&c->snapshot_create_lock);
 err:
@@ -1508,8 +1526,7 @@ void bch2_delete_dead_snapshots_work(struct work_struct *work)
 {
        struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
 
-       if (test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags))
-               bch2_delete_dead_snapshots(c);
+       bch2_delete_dead_snapshots(c);
        bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
 }
 
@@ -1520,20 +1537,6 @@ void bch2_delete_dead_snapshots_async(struct bch_fs *c)
                bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
 }
 
-int bch2_delete_dead_snapshots_hook(struct btree_trans *trans,
-                                   struct btree_trans_commit_hook *h)
-{
-       struct bch_fs *c = trans->c;
-
-       set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
-
-       if (c->curr_recovery_pass <= BCH_RECOVERY_PASS_delete_dead_snapshots)
-               return 0;
-
-       bch2_delete_dead_snapshots_async(c);
-       return 0;
-}
-
 int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans,
                                       enum btree_id id,
                                       struct bpos pos)
@@ -1664,6 +1667,26 @@ again:
        return ret ?: trans_was_restarted(trans, restart_count);
 }
 
+static int bch2_check_snapshot_needs_deletion(struct btree_trans *trans, struct bkey_s_c k)
+{
+       struct bch_fs *c = trans->c;
+       struct bkey_s_c_snapshot snap;
+       int ret = 0;
+
+       if (k.k->type != KEY_TYPE_snapshot)
+               return 0;
+
+       snap = bkey_s_c_to_snapshot(k);
+       if (BCH_SNAPSHOT_DELETED(snap.v) ||
+           bch2_snapshot_equiv(c, k.k->p.offset) != k.k->p.offset ||
+           (ret = bch2_snapshot_needs_delete(trans, k)) > 0) {
+               set_bit(BCH_FS_NEED_DELETE_DEAD_SNAPSHOTS, &c->flags);
+               return 0;
+       }
+
+       return ret;
+}
+
 int bch2_snapshots_read(struct bch_fs *c)
 {
        struct btree_iter iter;
@@ -1674,7 +1697,8 @@ int bch2_snapshots_read(struct bch_fs *c)
                for_each_btree_key2(trans, iter, BTREE_ID_snapshots,
                           POS_MIN, 0, k,
                        bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?:
-                       bch2_snapshot_set_equiv(trans, k)) ?:
+                       bch2_snapshot_set_equiv(trans, k) ?:
+                       bch2_check_snapshot_needs_deletion(trans, k)) ?:
                for_each_btree_key2(trans, iter, BTREE_ID_snapshots,
                           POS_MIN, 0, k,
                           (set_is_ancestor_bitmap(c, k.k->p.offset), 0)));
index de215d9d1252549db99d7d8c6cea352f90264382..f09a22f4423969024ea29224340f6a1a528d2821 100644 (file)
@@ -5,7 +5,7 @@
 enum bkey_invalid_flags;
 
 void bch2_snapshot_tree_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-int bch2_snapshot_tree_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_snapshot_tree_invalid(struct bch_fs *, struct bkey_s_c,
                               enum bkey_invalid_flags, struct printbuf *);
 
 #define bch2_bkey_ops_snapshot_tree ((struct bkey_ops) {       \
@@ -19,7 +19,7 @@ struct bkey_i_snapshot_tree *__bch2_snapshot_tree_create(struct btree_trans *);
 int bch2_snapshot_tree_lookup(struct btree_trans *, u32, struct bch_snapshot_tree *);
 
 void bch2_snapshot_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-int bch2_snapshot_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_snapshot_invalid(struct bch_fs *, struct bkey_s_c,
                          enum bkey_invalid_flags, struct printbuf *);
 int bch2_mark_snapshot(struct btree_trans *, enum btree_id, unsigned,
                       struct bkey_s_c, struct bkey_s_c, unsigned);
@@ -244,8 +244,6 @@ int bch2_check_snapshot_trees(struct bch_fs *);
 int bch2_check_snapshots(struct bch_fs *);
 
 int bch2_snapshot_node_set_deleted(struct btree_trans *, u32);
-int bch2_delete_dead_snapshots_hook(struct btree_trans *,
-                                   struct btree_trans_commit_hook *);
 void bch2_delete_dead_snapshots_work(struct work_struct *);
 
 int __bch2_key_has_snapshot_overwrites(struct btree_trans *, enum btree_id, struct bpos);
index caf2dd7dafff65e636b32d8893ead65bfa6dd150..fccd25aa32426a4233882a9d97cc214cba9dc6f5 100644 (file)
@@ -62,7 +62,8 @@ static int check_subvol(struct btree_trans *trans,
                if (ret)
                        return ret;
 
-               if (fsck_err_on(le32_to_cpu(st.master_subvol) != subvol.k->p.offset, c,
+               if (fsck_err_on(le32_to_cpu(st.master_subvol) != subvol.k->p.offset,
+                               c, subvol_not_master_and_not_snapshot,
                                "subvolume %llu is not set as snapshot but is not master subvolume",
                                k.k->p.offset)) {
                        struct bkey_i_subvolume *s =
@@ -97,16 +98,17 @@ int bch2_check_subvols(struct bch_fs *c)
 
 /* Subvolumes: */
 
-int bch2_subvolume_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_subvolume_invalid(struct bch_fs *c, struct bkey_s_c k,
                           enum bkey_invalid_flags flags, struct printbuf *err)
 {
-       if (bkey_lt(k.k->p, SUBVOL_POS_MIN) ||
-           bkey_gt(k.k->p, SUBVOL_POS_MAX)) {
-               prt_printf(err, "invalid pos");
-               return -BCH_ERR_invalid_bkey;
-       }
+       int ret = 0;
 
-       return 0;
+       bkey_fsck_err_on(bkey_lt(k.k->p, SUBVOL_POS_MIN) ||
+                        bkey_gt(k.k->p, SUBVOL_POS_MAX), c, err,
+                        subvol_pos_bad,
+                        "invalid pos");
+fsck_err:
+       return ret;
 }
 
 void bch2_subvolume_to_text(struct printbuf *out, struct bch_fs *c,
@@ -230,7 +232,6 @@ static int __bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
 {
        struct btree_iter iter;
        struct bkey_s_c_subvolume subvol;
-       struct btree_trans_commit_hook *h;
        u32 snapid;
        int ret = 0;
 
@@ -246,22 +247,8 @@ static int __bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
 
        snapid = le32_to_cpu(subvol.v->snapshot);
 
-       ret = bch2_btree_delete_at(trans, &iter, 0);
-       if (ret)
-               goto err;
-
-       ret = bch2_snapshot_node_set_deleted(trans, snapid);
-       if (ret)
-               goto err;
-
-       h = bch2_trans_kmalloc(trans, sizeof(*h));
-       ret = PTR_ERR_OR_ZERO(h);
-       if (ret)
-               goto err;
-
-       h->fn = bch2_delete_dead_snapshots_hook;
-       bch2_trans_commit_hook(trans, h);
-err:
+       ret =   bch2_btree_delete_at(trans, &iter, 0) ?:
+               bch2_snapshot_node_set_deleted(trans, snapid);
        bch2_trans_iter_exit(trans, &iter);
        return ret;
 }
index bb14f92e8687185c4a702e643072695f28e2edf8..a1003d30ab0a0c613b644c54fba09964d9ec4b29 100644 (file)
@@ -9,7 +9,7 @@ enum bkey_invalid_flags;
 
 int bch2_check_subvols(struct bch_fs *);
 
-int bch2_subvolume_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_subvolume_invalid(struct bch_fs *, struct bkey_s_c,
                           enum bkey_invalid_flags, struct printbuf *);
 void bch2_subvolume_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
index 332d41e1c0a32082988f70ded38bd004b35444c0..f4cad903f4d69da7776825f50bf561a1980a02a0 100644 (file)
@@ -13,6 +13,7 @@
 #include "replicas.h"
 #include "quota.h"
 #include "sb-clean.h"
+#include "sb-errors.h"
 #include "sb-members.h"
 #include "super-io.h"
 #include "super.h"
@@ -720,7 +721,7 @@ retry:
        if (opt_defined(*opts, sb))
                goto err;
 
-       printk(KERN_ERR "bcachefs (%s): error reading default superblock: %s",
+       printk(KERN_ERR "bcachefs (%s): error reading default superblock: %s\n",
               path, err.buf);
        printbuf_reset(&err);
 
@@ -782,7 +783,7 @@ got_super:
 
        ret = bch2_sb_validate(sb, &err, READ);
        if (ret) {
-               printk(KERN_ERR "bcachefs (%s): error validating superblock: %s",
+               printk(KERN_ERR "bcachefs (%s): error validating superblock: %s\n",
                       path, err.buf);
                goto err_no_print;
        }
@@ -790,7 +791,7 @@ out:
        printbuf_exit(&err);
        return ret;
 err:
-       printk(KERN_ERR "bcachefs (%s): error reading superblock: %s",
+       printk(KERN_ERR "bcachefs (%s): error reading superblock: %s\n",
               path, err.buf);
 err_no_print:
        bch2_free_super(sb);
@@ -805,7 +806,12 @@ static void write_super_endio(struct bio *bio)
 
        /* XXX: return errors directly */
 
-       if (bch2_dev_io_err_on(bio->bi_status, ca, "superblock write error: %s",
+       if (bch2_dev_io_err_on(bio->bi_status, ca,
+                              bio_data_dir(bio)
+                              ? BCH_MEMBER_ERROR_write
+                              : BCH_MEMBER_ERROR_read,
+                              "superblock %s error: %s",
+                              bio_data_dir(bio) ? "write" : "read",
                               bch2_blk_status_to_str(bio->bi_status)))
                ca->sb_write_error = 1;
 
@@ -892,7 +898,9 @@ int bch2_write_super(struct bch_fs *c)
        SET_BCH_SB_BIG_ENDIAN(c->disk_sb.sb, CPU_BIG_ENDIAN);
 
        bch2_sb_counters_from_cpu(c);
-       bch_members_cpy_v2_v1(&c->disk_sb);
+       bch2_sb_members_from_cpu(c);
+       bch2_sb_members_cpy_v2_v1(&c->disk_sb);
+       bch2_sb_errors_from_cpu(c);
 
        for_each_online_member(ca, c, i)
                bch2_sb_from_fs(c, ca);
@@ -1175,7 +1183,7 @@ void bch2_sb_to_text(struct printbuf *out, struct bch_sb *sb,
        prt_printf(out, "Created:");
        prt_tab(out);
        if (sb->time_base_lo)
-               pr_time(out, div_u64(le64_to_cpu(sb->time_base_lo), NSEC_PER_SEC));
+               bch2_prt_datetime(out, div_u64(le64_to_cpu(sb->time_base_lo), NSEC_PER_SEC));
        else
                prt_printf(out, "(not set)");
        prt_newline(out);
index b0d8584f475f1f2290b33e3120500cda726b4473..f5abd102bff7502bd2f142dfde8487c82f8aed29 100644 (file)
@@ -23,6 +23,11 @@ u64 bch2_upgrade_recovery_passes(struct bch_fs *c,
                                 unsigned,
                                 unsigned);
 
+static inline size_t bch2_sb_field_bytes(struct bch_sb_field *f)
+{
+       return le32_to_cpu(f->u64s) * sizeof(u64);
+}
+
 #define field_to_type(_f, _name)                                       \
        container_of_or_null(_f, struct bch_sb_field_##_name, field)
 
@@ -78,41 +83,6 @@ static inline void bch2_check_set_feature(struct bch_fs *c, unsigned feat)
                __bch2_check_set_feature(c, feat);
 }
 
-/* BCH_SB_FIELD_members_v1: */
-
-static inline bool bch2_member_exists(struct bch_member *m)
-{
-       return !bch2_is_zero(&m->uuid, sizeof(m->uuid));
-}
-
-static inline bool bch2_dev_exists(struct bch_sb *sb,
-                                  unsigned dev)
-{
-       if (dev < sb->nr_devices) {
-       struct bch_member m = bch2_sb_member_get(sb, dev);
-               return bch2_member_exists(&m);
-       }
-       return false;
-}
-
-static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
-{
-       return (struct bch_member_cpu) {
-               .nbuckets       = le64_to_cpu(mi->nbuckets),
-               .first_bucket   = le16_to_cpu(mi->first_bucket),
-               .bucket_size    = le16_to_cpu(mi->bucket_size),
-               .group          = BCH_MEMBER_GROUP(mi),
-               .state          = BCH_MEMBER_STATE(mi),
-               .discard        = BCH_MEMBER_DISCARD(mi),
-               .data_allowed   = BCH_MEMBER_DATA_ALLOWED(mi),
-               .durability     = BCH_MEMBER_DURABILITY(mi)
-                       ? BCH_MEMBER_DURABILITY(mi) - 1
-                       : 1,
-               .freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED(mi),
-               .valid          = bch2_member_exists(mi),
-       };
-}
-
 void bch2_sb_maybe_downgrade(struct bch_fs *);
 void bch2_sb_upgrade(struct bch_fs *, unsigned);
 
index 0e85c22672be85fb09a1ce51ed457d65ab1d2d5c..24672bb31cbe9c479964dffe1d1b979dd66013c7 100644 (file)
@@ -49,6 +49,7 @@
 #include "recovery.h"
 #include "replicas.h"
 #include "sb-clean.h"
+#include "sb-errors.h"
 #include "sb-members.h"
 #include "snapshot.h"
 #include "subvolume.h"
@@ -400,7 +401,7 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
 
        bch_info(c, "going read-write");
 
-       ret = bch2_members_v2_init(c);
+       ret = bch2_sb_members_v2_init(c);
        if (ret)
                goto err;
 
@@ -481,6 +482,7 @@ static void __bch2_fs_free(struct bch_fs *c)
                bch2_time_stats_exit(&c->times[i]);
 
        bch2_free_pending_node_rewrites(c);
+       bch2_fs_sb_errors_exit(c);
        bch2_fs_counters_exit(c);
        bch2_fs_snapshots_exit(c);
        bch2_fs_quota_exit(c);
@@ -713,6 +715,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
        bch2_fs_quota_init(c);
        bch2_fs_ec_init_early(c);
        bch2_fs_move_init(c);
+       bch2_fs_sb_errors_init_early(c);
 
        INIT_LIST_HEAD(&c->list);
 
@@ -729,8 +732,8 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
 
        INIT_LIST_HEAD(&c->journal_iters);
 
-       INIT_LIST_HEAD(&c->fsck_errors);
-       mutex_init(&c->fsck_error_lock);
+       INIT_LIST_HEAD(&c->fsck_error_msgs);
+       mutex_init(&c->fsck_error_msgs_lock);
 
        seqcount_init(&c->gc_pos_lock);
 
@@ -840,6 +843,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
        }
 
        ret = bch2_fs_counters_init(c) ?:
+           bch2_fs_sb_errors_init(c) ?:
            bch2_io_clock_init(&c->io_clock[READ]) ?:
            bch2_io_clock_init(&c->io_clock[WRITE]) ?:
            bch2_fs_journal_init(&c->journal) ?:
@@ -942,15 +946,12 @@ int bch2_fs_start(struct bch_fs *c)
 
        mutex_lock(&c->sb_lock);
 
-       ret = bch2_members_v2_init(c);
+       ret = bch2_sb_members_v2_init(c);
        if (ret) {
                mutex_unlock(&c->sb_lock);
                goto err;
        }
 
-       for_each_online_member(ca, c, i)
-               bch2_sb_from_fs(c, ca);
-
        for_each_online_member(ca, c, i)
                bch2_members_v2_get_mut(c->disk_sb.sb, i)->last_mount = cpu_to_le64(now);
 
@@ -960,12 +961,6 @@ int bch2_fs_start(struct bch_fs *c)
                bch2_dev_allocator_add(c, ca);
        bch2_recalc_capacity(c);
 
-       for (i = 0; i < BCH_TRANSACTIONS_NR; i++) {
-               mutex_lock(&c->btree_transaction_stats[i].lock);
-               bch2_time_stats_init(&c->btree_transaction_stats[i].lock_hold_times);
-               mutex_unlock(&c->btree_transaction_stats[i].lock);
-       }
-
        ret = BCH_SB_INITIALIZED(c->disk_sb.sb)
                ? bch2_fs_recovery(c)
                : bch2_fs_initialize(c);
@@ -1140,6 +1135,7 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
                                        struct bch_member *member)
 {
        struct bch_dev *ca;
+       unsigned i;
 
        ca = kzalloc(sizeof(*ca), GFP_KERNEL);
        if (!ca)
@@ -1157,6 +1153,10 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
        bch2_time_stats_init(&ca->io_latency[WRITE]);
 
        ca->mi = bch2_mi_to_cpu(member);
+
+       for (i = 0; i < ARRAY_SIZE(member->errors); i++)
+               atomic64_set(&ca->errors[i], le64_to_cpu(member->errors[i]));
+
        ca->uuid = member->uuid;
 
        ca->nr_btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
@@ -1591,7 +1591,7 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
        dev_mi = bch2_sb_member_get(sb.sb, sb.sb->dev_idx);
 
        if (BCH_MEMBER_GROUP(&dev_mi)) {
-               bch2_disk_path_to_text(&label, sb.sb, BCH_MEMBER_GROUP(&dev_mi) - 1);
+               bch2_disk_path_to_text_sb(&label, sb.sb, BCH_MEMBER_GROUP(&dev_mi) - 1);
                if (label.allocation_failure) {
                        ret = -ENOMEM;
                        goto err;
@@ -1631,16 +1631,6 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
                goto err_unlock;
        }
 
-       mi = bch2_sb_field_get(ca->disk_sb.sb, members_v2);
-
-       if (!bch2_sb_field_resize(&ca->disk_sb, members_v2,
-                               le32_to_cpu(mi->field.u64s) +
-                               sizeof(dev_mi) / sizeof(u64))) {
-               ret = -BCH_ERR_ENOSPC_sb_members;
-               bch_err_msg(c, ret, "setting up new superblock");
-               goto err_unlock;
-       }
-
        if (dynamic_fault("bcachefs:add:no_slot"))
                goto no_slot;
 
@@ -1654,6 +1644,8 @@ no_slot:
 
 have_slot:
        nr_devices = max_t(unsigned, dev_idx + 1, c->sb.nr_devices);
+
+       mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
        u64s = DIV_ROUND_UP(sizeof(struct bch_sb_field_members_v2) +
                            le16_to_cpu(mi->member_bytes) * nr_devices, sizeof(u64));
 
@@ -1689,13 +1681,13 @@ have_slot:
 
        ret = bch2_trans_mark_dev_sb(c, ca);
        if (ret) {
-               bch_err_msg(c, ret, "marking new superblock");
+               bch_err_msg(ca, ret, "marking new superblock");
                goto err_late;
        }
 
        ret = bch2_fs_freespace_init(c);
        if (ret) {
-               bch_err_msg(c, ret, "initializing free space");
+               bch_err_msg(ca, ret, "initializing free space");
                goto err_late;
        }
 
@@ -1763,19 +1755,26 @@ int bch2_dev_online(struct bch_fs *c, const char *path)
        if (ca->mi.state == BCH_MEMBER_STATE_rw)
                __bch2_dev_read_write(c, ca);
 
-       mutex_lock(&c->sb_lock);
-       struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
+       if (!ca->mi.freespace_initialized) {
+               ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
+               bch_err_msg(ca, ret, "initializing free space");
+               if (ret)
+                       goto err;
+       }
 
-       m->last_mount =
-               cpu_to_le64(ktime_get_real_seconds());
+       if (!ca->journal.nr) {
+               ret = bch2_dev_journal_alloc(ca);
+               bch_err_msg(ca, ret, "allocating journal");
+               if (ret)
+                       goto err;
+       }
 
+       mutex_lock(&c->sb_lock);
+       bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount =
+               cpu_to_le64(ktime_get_real_seconds());
        bch2_write_super(c);
        mutex_unlock(&c->sb_lock);
 
-       ret = bch2_fs_freespace_init(c);
-       if (ret)
-               bch_err_msg(c, ret, "initializing free space");
-
        up_write(&c->state_lock);
        return 0;
 err:
@@ -1886,9 +1885,9 @@ found:
 struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
                            struct bch_opts opts)
 {
-       struct bch_sb_handle *sb = NULL;
+       DARRAY(struct bch_sb_handle) sbs = { 0 };
        struct bch_fs *c = NULL;
-       unsigned i, best_sb = 0;
+       struct bch_sb_handle *sb, *best = NULL;
        struct printbuf errbuf = PRINTBUF;
        int ret = 0;
 
@@ -1900,49 +1899,46 @@ struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
                goto err;
        }
 
-       sb = kcalloc(nr_devices, sizeof(*sb), GFP_KERNEL);
-       if (!sb) {
-               ret = -ENOMEM;
+       ret = darray_make_room(&sbs, nr_devices);
+       if (ret)
                goto err;
-       }
 
-       for (i = 0; i < nr_devices; i++) {
-               ret = bch2_read_super(devices[i], &opts, &sb[i]);
+       for (unsigned i = 0; i < nr_devices; i++) {
+               struct bch_sb_handle sb = { NULL };
+
+               ret = bch2_read_super(devices[i], &opts, &sb);
                if (ret)
                        goto err;
 
+               BUG_ON(darray_push(&sbs, sb));
        }
 
-       for (i = 1; i < nr_devices; i++)
-               if (le64_to_cpu(sb[i].sb->seq) >
-                   le64_to_cpu(sb[best_sb].sb->seq))
-                       best_sb = i;
-
-       i = 0;
-       while (i < nr_devices) {
-               if (i != best_sb &&
-                   !bch2_dev_exists(sb[best_sb].sb, sb[i].sb->dev_idx)) {
-                       pr_info("%pg has been removed, skipping", sb[i].bdev);
-                       bch2_free_super(&sb[i]);
-                       array_remove_item(sb, nr_devices, i);
+       darray_for_each(sbs, sb)
+               if (!best || le64_to_cpu(sb->sb->seq) > le64_to_cpu(best->sb->seq))
+                       best = sb;
+
+       darray_for_each_reverse(sbs, sb) {
+               if (sb != best && !bch2_dev_exists(best->sb, sb->sb->dev_idx)) {
+                       pr_info("%pg has been removed, skipping", sb->bdev);
+                       bch2_free_super(sb);
+                       darray_remove_item(&sbs, sb);
+                       best -= best > sb;
                        continue;
                }
 
-               ret = bch2_dev_in_fs(sb[best_sb].sb, sb[i].sb);
+               ret = bch2_dev_in_fs(best->sb, sb->sb);
                if (ret)
                        goto err_print;
-               i++;
        }
 
-       c = bch2_fs_alloc(sb[best_sb].sb, opts);
-       if (IS_ERR(c)) {
-               ret = PTR_ERR(c);
+       c = bch2_fs_alloc(best->sb, opts);
+       ret = PTR_ERR_OR_ZERO(c);
+       if (ret)
                goto err;
-       }
 
        down_write(&c->state_lock);
-       for (i = 0; i < nr_devices; i++) {
-               ret = bch2_dev_attach_bdev(c, &sb[i]);
+       darray_for_each(sbs, sb) {
+               ret = bch2_dev_attach_bdev(c, sb);
                if (ret) {
                        up_write(&c->state_lock);
                        goto err;
@@ -1961,7 +1957,9 @@ struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
                        goto err;
        }
 out:
-       kfree(sb);
+       darray_for_each(sbs, sb)
+               bch2_free_super(sb);
+       darray_exit(&sbs);
        printbuf_exit(&errbuf);
        module_put(THIS_MODULE);
        return c;
@@ -1971,9 +1969,6 @@ err_print:
 err:
        if (!IS_ERR_OR_NULL(c))
                bch2_fs_stop(c);
-       if (sb)
-               for (i = 0; i < nr_devices; i++)
-                       bch2_free_super(&sb[i]);
        c = ERR_PTR(ret);
        goto out;
 }
index 78d6138db62d7e4a1f1fe07bb043ca7ada417986..7dda4985b99fe6cfdde52c6df869e3df446d48d0 100644 (file)
@@ -37,16 +37,4 @@ struct bch_member_cpu {
        u8                      valid;
 };
 
-struct bch_disk_group_cpu {
-       bool                            deleted;
-       u16                             parent;
-       struct bch_devs_mask            devs;
-};
-
-struct bch_disk_groups_cpu {
-       struct rcu_head                 rcu;
-       unsigned                        nr;
-       struct bch_disk_group_cpu       entries[] __counted_by(nr);
-};
-
 #endif /* _BCACHEFS_SUPER_TYPES_H */
index 397116966a7cd40ef629b98cf16670476ff583a6..ab743115f169e5fc1a7c665148d0c877800fefab 100644 (file)
@@ -149,7 +149,9 @@ read_attribute(bucket_size);
 read_attribute(first_bucket);
 read_attribute(nbuckets);
 rw_attribute(durability);
-read_attribute(iodone);
+read_attribute(io_done);
+read_attribute(io_errors);
+write_attribute(io_errors_reset);
 
 read_attribute(io_latency_read);
 read_attribute(io_latency_write);
@@ -212,7 +214,7 @@ read_attribute(copy_gc_wait);
 
 rw_attribute(rebalance_enabled);
 sysfs_pd_controller_attribute(rebalance);
-read_attribute(rebalance_work);
+read_attribute(rebalance_status);
 rw_attribute(promote_whole_extents);
 
 read_attribute(new_stripes);
@@ -341,7 +343,7 @@ static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c
 
 static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c)
 {
-       prt_printf(out, "%s: ", bch2_btree_ids[c->gc_gens_btree]);
+       prt_printf(out, "%s: ", bch2_btree_id_str(c->gc_gens_btree));
        bch2_bpos_to_text(out, c->gc_gens_pos);
        prt_printf(out, "\n");
 }
@@ -386,8 +388,8 @@ SHOW(bch2_fs)
        if (attr == &sysfs_copy_gc_wait)
                bch2_copygc_wait_to_text(out, c);
 
-       if (attr == &sysfs_rebalance_work)
-               bch2_rebalance_work_to_text(out, c);
+       if (attr == &sysfs_rebalance_status)
+               bch2_rebalance_status_to_text(out, c);
 
        sysfs_print(promote_whole_extents,      c->promote_whole_extents);
 
@@ -646,7 +648,7 @@ struct attribute *bch2_fs_internal_files[] = {
        &sysfs_copy_gc_wait,
 
        &sysfs_rebalance_enabled,
-       &sysfs_rebalance_work,
+       &sysfs_rebalance_status,
        sysfs_pd_controller_files(rebalance),
 
        &sysfs_moving_ctxts,
@@ -707,10 +709,8 @@ STORE(bch2_fs_opts_dir)
        bch2_opt_set_by_id(&c->opts, id, v);
 
        if ((id == Opt_background_target ||
-            id == Opt_background_compression) && v) {
-               bch2_rebalance_add_work(c, S64_MAX);
-               rebalance_wakeup(c);
-       }
+            id == Opt_background_compression) && v)
+               bch2_set_rebalance_needs_scan(c, 0);
 
        ret = size;
 err:
@@ -882,7 +882,7 @@ static const char * const bch2_rw[] = {
        NULL
 };
 
-static void dev_iodone_to_text(struct printbuf *out, struct bch_dev *ca)
+static void dev_io_done_to_text(struct printbuf *out, struct bch_dev *ca)
 {
        int rw, i;
 
@@ -910,13 +910,8 @@ SHOW(bch2_dev)
        sysfs_print(discard,            ca->mi.discard);
 
        if (attr == &sysfs_label) {
-               if (ca->mi.group) {
-                       mutex_lock(&c->sb_lock);
-                       bch2_disk_path_to_text(out, c->disk_sb.sb,
-                                              ca->mi.group - 1);
-                       mutex_unlock(&c->sb_lock);
-               }
-
+               if (ca->mi.group)
+                       bch2_disk_path_to_text(out, c, ca->mi.group - 1);
                prt_char(out, '\n');
        }
 
@@ -930,8 +925,11 @@ SHOW(bch2_dev)
                prt_char(out, '\n');
        }
 
-       if (attr == &sysfs_iodone)
-               dev_iodone_to_text(out, ca);
+       if (attr == &sysfs_io_done)
+               dev_io_done_to_text(out, ca);
+
+       if (attr == &sysfs_io_errors)
+               bch2_dev_io_errors_to_text(out, ca);
 
        sysfs_print(io_latency_read,            atomic64_read(&ca->cur_latency[READ]));
        sysfs_print(io_latency_write,           atomic64_read(&ca->cur_latency[WRITE]));
@@ -998,6 +996,9 @@ STORE(bch2_dev)
                        return ret;
        }
 
+       if (attr == &sysfs_io_errors_reset)
+               bch2_dev_errors_reset(ca);
+
        return size;
 }
 SYSFS_OPS(bch2_dev);
@@ -1015,7 +1016,9 @@ struct attribute *bch2_dev_files[] = {
        &sysfs_label,
 
        &sysfs_has_data,
-       &sysfs_iodone,
+       &sysfs_io_done,
+       &sysfs_io_errors,
+       &sysfs_io_errors_reset,
 
        &sysfs_io_latency_read,
        &sysfs_io_latency_write,
index 33efa6005c6f2b1f0885a1f07f146bfd5de0a0a4..dc48b52b01b49c4ed7af877921dd7e2b446d75a8 100644 (file)
@@ -7,6 +7,7 @@
 #include "btree_locking.h"
 #include "btree_update_interior.h"
 #include "keylist.h"
+#include "move_types.h"
 #include "opts.h"
 #include "six.h"
 
index 19264492151b3a2a97edcceb57c66f2d6b31d68d..893304a1f06e6ea03df55020cf7be26f349d8cfe 100644 (file)
@@ -68,7 +68,7 @@ DECLARE_EVENT_CLASS(btree_node,
        TP_printk("%d,%d %u %s %llu:%llu:%u",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  __entry->level,
-                 bch2_btree_ids[__entry->btree_id],
+                 bch2_btree_id_str(__entry->btree_id),
                  __entry->pos_inode, __entry->pos_offset, __entry->pos_snapshot)
 );
 
@@ -461,7 +461,7 @@ TRACE_EVENT(btree_path_relock_fail,
        TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
                  __entry->trans_fn,
                  (void *) __entry->caller_ip,
-                 bch2_btree_ids[__entry->btree_id],
+                 bch2_btree_id_str(__entry->btree_id),
                  __entry->pos_inode,
                  __entry->pos_offset,
                  __entry->pos_snapshot,
@@ -522,7 +522,7 @@ TRACE_EVENT(btree_path_upgrade_fail,
        TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
                  __entry->trans_fn,
                  (void *) __entry->caller_ip,
-                 bch2_btree_ids[__entry->btree_id],
+                 bch2_btree_id_str(__entry->btree_id),
                  __entry->pos_inode,
                  __entry->pos_offset,
                  __entry->pos_snapshot,
@@ -767,25 +767,36 @@ DEFINE_EVENT(bkey, move_extent_alloc_mem_fail,
 );
 
 TRACE_EVENT(move_data,
-       TP_PROTO(struct bch_fs *c, u64 sectors_moved,
-                u64 keys_moved),
-       TP_ARGS(c, sectors_moved, keys_moved),
+       TP_PROTO(struct bch_fs *c,
+                struct bch_move_stats *stats),
+       TP_ARGS(c, stats),
 
        TP_STRUCT__entry(
-               __field(dev_t,          dev                     )
-               __field(u64,            sectors_moved   )
+               __field(dev_t,          dev             )
                __field(u64,            keys_moved      )
+               __field(u64,            keys_raced      )
+               __field(u64,            sectors_seen    )
+               __field(u64,            sectors_moved   )
+               __field(u64,            sectors_raced   )
        ),
 
        TP_fast_assign(
-               __entry->dev                    = c->dev;
-               __entry->sectors_moved = sectors_moved;
-               __entry->keys_moved = keys_moved;
+               __entry->dev            = c->dev;
+               __entry->keys_moved     = atomic64_read(&stats->keys_moved);
+               __entry->keys_raced     = atomic64_read(&stats->keys_raced);
+               __entry->sectors_seen   = atomic64_read(&stats->sectors_seen);
+               __entry->sectors_moved  = atomic64_read(&stats->sectors_moved);
+               __entry->sectors_raced  = atomic64_read(&stats->sectors_raced);
        ),
 
-       TP_printk("%d,%d sectors_moved %llu keys_moved %llu",
+       TP_printk("%d,%d keys moved %llu raced %llu"
+                 "sectors seen %llu moved %llu raced %llu",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
-                 __entry->sectors_moved, __entry->keys_moved)
+                 __entry->keys_moved,
+                 __entry->keys_raced,
+                 __entry->sectors_seen,
+                 __entry->sectors_moved,
+                 __entry->sectors_raced)
 );
 
 TRACE_EVENT(evacuate_bucket,
@@ -1012,7 +1023,7 @@ DECLARE_EVENT_CLASS(transaction_restart_iter,
        TP_printk("%s %pS btree %s pos %llu:%llu:%u",
                  __entry->trans_fn,
                  (void *) __entry->caller_ip,
-                 bch2_btree_ids[__entry->btree_id],
+                 bch2_btree_id_str(__entry->btree_id),
                  __entry->pos_inode,
                  __entry->pos_offset,
                  __entry->pos_snapshot)
@@ -1032,13 +1043,16 @@ DEFINE_EVENT(transaction_restart_iter,  trans_restart_btree_node_split,
        TP_ARGS(trans, caller_ip, path)
 );
 
+struct get_locks_fail;
+
 TRACE_EVENT(trans_restart_upgrade,
        TP_PROTO(struct btree_trans *trans,
                 unsigned long caller_ip,
                 struct btree_path *path,
                 unsigned old_locks_want,
-                unsigned new_locks_want),
-       TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want),
+                unsigned new_locks_want,
+                struct get_locks_fail *f),
+       TP_ARGS(trans, caller_ip, path, old_locks_want, new_locks_want, f),
 
        TP_STRUCT__entry(
                __array(char,                   trans_fn, 32    )
@@ -1046,6 +1060,11 @@ TRACE_EVENT(trans_restart_upgrade,
                __field(u8,                     btree_id        )
                __field(u8,                     old_locks_want  )
                __field(u8,                     new_locks_want  )
+               __field(u8,                     level           )
+               __field(u32,                    path_seq        )
+               __field(u32,                    node_seq        )
+               __field(u32,                    path_alloc_seq  )
+               __field(u32,                    downgrade_seq)
                TRACE_BPOS_entries(pos)
        ),
 
@@ -1055,18 +1074,28 @@ TRACE_EVENT(trans_restart_upgrade,
                __entry->btree_id               = path->btree_id;
                __entry->old_locks_want         = old_locks_want;
                __entry->new_locks_want         = new_locks_want;
+               __entry->level                  = f->l;
+               __entry->path_seq               = path->l[f->l].lock_seq;
+               __entry->node_seq               = IS_ERR_OR_NULL(f->b) ? 0 : f->b->c.lock.seq;
+               __entry->path_alloc_seq         = path->alloc_seq;
+               __entry->downgrade_seq          = path->downgrade_seq;
                TRACE_BPOS_assign(pos, path->pos)
        ),
 
-       TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u",
+       TP_printk("%s %pS btree %s pos %llu:%llu:%u locks_want %u -> %u level %u path seq %u node seq %u alloc_seq %u downgrade_seq %u",
                  __entry->trans_fn,
                  (void *) __entry->caller_ip,
-                 bch2_btree_ids[__entry->btree_id],
+                 bch2_btree_id_str(__entry->btree_id),
                  __entry->pos_inode,
                  __entry->pos_offset,
                  __entry->pos_snapshot,
                  __entry->old_locks_want,
-                 __entry->new_locks_want)
+                 __entry->new_locks_want,
+                 __entry->level,
+                 __entry->path_seq,
+                 __entry->node_seq,
+                 __entry->path_alloc_seq,
+                 __entry->downgrade_seq)
 );
 
 DEFINE_EVENT(transaction_restart_iter, trans_restart_relock,
@@ -1219,7 +1248,7 @@ TRACE_EVENT(trans_restart_key_cache_key_realloced,
        TP_printk("%s %pS btree %s pos %llu:%llu:%u old_u64s %u new_u64s %u",
                  __entry->trans_fn,
                  (void *) __entry->caller_ip,
-                 bch2_btree_ids[__entry->btree_id],
+                 bch2_btree_id_str(__entry->btree_id),
                  __entry->pos_inode,
                  __entry->pos_offset,
                  __entry->pos_snapshot,
@@ -1227,6 +1256,27 @@ TRACE_EVENT(trans_restart_key_cache_key_realloced,
                  __entry->new_u64s)
 );
 
+TRACE_EVENT(path_downgrade,
+       TP_PROTO(struct btree_trans *trans,
+                unsigned long caller_ip,
+                struct btree_path *path),
+       TP_ARGS(trans, caller_ip, path),
+
+       TP_STRUCT__entry(
+               __array(char,                   trans_fn, 32    )
+               __field(unsigned long,          caller_ip       )
+       ),
+
+       TP_fast_assign(
+               strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+               __entry->caller_ip              = caller_ip;
+       ),
+
+       TP_printk("%s %pS",
+                 __entry->trans_fn,
+                 (void *) __entry->caller_ip)
+);
+
 DEFINE_EVENT(transaction_event,        trans_restart_write_buffer_flush,
        TP_PROTO(struct btree_trans *trans,
                 unsigned long caller_ip),
index 08bac0ba8d0b81824eca69176b9c2192ee43c928..84b142fcc3dfce6cfbdb30647d4aa67609511519 100644 (file)
@@ -467,6 +467,24 @@ static void bch2_pr_time_units_aligned(struct printbuf *out, u64 ns)
        prt_printf(out, "%s", u->name);
 }
 
+#ifndef __KERNEL__
+#include <time.h>
+void bch2_prt_datetime(struct printbuf *out, time64_t sec)
+{
+       time_t t = sec;
+       char buf[64];
+       ctime_r(&t, buf);
+       prt_str(out, buf);
+}
+#else
+void bch2_prt_datetime(struct printbuf *out, time64_t sec)
+{
+       char buf[64];
+       snprintf(buf, sizeof(buf), "%ptT", &sec);
+       prt_u64(out, sec);
+}
+#endif
+
 #define TABSTOP_SIZE 12
 
 static inline void pr_name_and_units(struct printbuf *out, const char *name, u64 ns)
index 849a37ae497cc1d1ceacec2161e5e5ab1430a7d4..2984b57b29584f1e4009fea7b88c6115de3d6389 100644 (file)
@@ -245,26 +245,7 @@ do {                                                                       \
 #define prt_bitflags(...)              bch2_prt_bitflags(__VA_ARGS__)
 
 void bch2_pr_time_units(struct printbuf *, u64);
-
-#ifdef __KERNEL__
-static inline void pr_time(struct printbuf *out, u64 time)
-{
-       prt_printf(out, "%llu", time);
-}
-#else
-#include <time.h>
-static inline void pr_time(struct printbuf *out, u64 _time)
-{
-       char time_str[64];
-       time_t time = _time;
-       struct tm *tm = localtime(&time);
-       size_t err = strftime(time_str, sizeof(time_str), "%c", tm);
-       if (!err)
-               prt_printf(out, "(formatting error)");
-       else
-               prt_printf(out, "%s", time_str);
-}
-#endif
+void bch2_prt_datetime(struct printbuf *, time64_t);
 
 #ifdef __KERNEL__
 static inline void uuid_unparse_lower(u8 *uuid, char *out)
index b069b1a62e25186be7fb068255080ca179593868..a39ff0c296ecfb2a000edd6aace20bdbb8db20ea 100644 (file)
@@ -70,46 +70,38 @@ const struct bch_hash_desc bch2_xattr_hash_desc = {
        .cmp_bkey       = xattr_cmp_bkey,
 };
 
-int bch2_xattr_invalid(const struct bch_fs *c, struct bkey_s_c k,
+int bch2_xattr_invalid(struct bch_fs *c, struct bkey_s_c k,
                       enum bkey_invalid_flags flags,
                       struct printbuf *err)
 {
-       const struct xattr_handler *handler;
        struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
+       unsigned val_u64s = xattr_val_u64s(xattr.v->x_name_len,
+                                          le16_to_cpu(xattr.v->x_val_len));
+       int ret = 0;
 
-       if (bkey_val_u64s(k.k) <
-           xattr_val_u64s(xattr.v->x_name_len,
-                          le16_to_cpu(xattr.v->x_val_len))) {
-               prt_printf(err, "value too small (%zu < %u)",
-                      bkey_val_u64s(k.k),
-                      xattr_val_u64s(xattr.v->x_name_len,
-                                     le16_to_cpu(xattr.v->x_val_len)));
-               return -BCH_ERR_invalid_bkey;
-       }
+       bkey_fsck_err_on(bkey_val_u64s(k.k) < val_u64s, c, err,
+                        xattr_val_size_too_small,
+                        "value too small (%zu < %u)",
+                        bkey_val_u64s(k.k), val_u64s);
 
        /* XXX why +4 ? */
-       if (bkey_val_u64s(k.k) >
-           xattr_val_u64s(xattr.v->x_name_len,
-                          le16_to_cpu(xattr.v->x_val_len) + 4)) {
-               prt_printf(err, "value too big (%zu > %u)",
-                      bkey_val_u64s(k.k),
-                      xattr_val_u64s(xattr.v->x_name_len,
-                                     le16_to_cpu(xattr.v->x_val_len) + 4));
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       handler = bch2_xattr_type_to_handler(xattr.v->x_type);
-       if (!handler) {
-               prt_printf(err, "invalid type (%u)", xattr.v->x_type);
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       if (memchr(xattr.v->x_name, '\0', xattr.v->x_name_len)) {
-               prt_printf(err, "xattr name has invalid characters");
-               return -BCH_ERR_invalid_bkey;
-       }
-
-       return 0;
+       val_u64s = xattr_val_u64s(xattr.v->x_name_len,
+                                 le16_to_cpu(xattr.v->x_val_len) + 4);
+
+       bkey_fsck_err_on(bkey_val_u64s(k.k) > val_u64s, c, err,
+                        xattr_val_size_too_big,
+                        "value too big (%zu > %u)",
+                        bkey_val_u64s(k.k), val_u64s);
+
+       bkey_fsck_err_on(!bch2_xattr_type_to_handler(xattr.v->x_type), c, err,
+                        xattr_invalid_type,
+                        "invalid type (%u)", xattr.v->x_type);
+
+       bkey_fsck_err_on(memchr(xattr.v->x_name, '\0', xattr.v->x_name_len), c, err,
+                        xattr_name_invalid_chars,
+                        "xattr name has invalid characters");
+fsck_err:
+       return ret;
 }
 
 void bch2_xattr_to_text(struct printbuf *out, struct bch_fs *c,
@@ -590,7 +582,7 @@ err:
        if (value &&
            (opt_id == Opt_background_compression ||
             opt_id == Opt_background_target))
-               bch2_rebalance_add_work(c, inode->v.i_blocks);
+               bch2_set_rebalance_needs_scan(c, inode->ei_inode.bi_inum);
 
        return bch2_err_class(ret);
 }
index f5a52e3a6016e7e67db20d01f1c78c18f1b5a228..1337f31a5c492c8401eefefdcd56c4a450d73514 100644 (file)
@@ -6,7 +6,7 @@
 
 extern const struct bch_hash_desc bch2_xattr_hash_desc;
 
-int bch2_xattr_invalid(const struct bch_fs *, struct bkey_s_c,
+int bch2_xattr_invalid(struct bch_fs *, struct bkey_s_c,
                       enum bkey_invalid_flags, struct printbuf *);
 void bch2_xattr_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
 
index 9acdec56f626fdb3a5becd2c11fa9f4723bc8fb1..a93d76df8ed8bb1502e6bcd261c1ef9cee60cb36 100644 (file)
@@ -96,6 +96,7 @@ static const struct address_space_operations befs_symlink_aops = {
 };
 
 static const struct export_operations befs_export_operations = {
+       .encode_fh      = generic_encode_ino32_fh,
        .fh_to_dentry   = befs_fh_to_dentry,
        .fh_to_parent   = befs_fh_to_parent,
        .get_parent     = befs_get_parent,
index c53a1d2206225ca26f669642a22cfde767e45bc1..1564eacc253dfd42a91d8046a1c8c7688ba691b2 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/slab.h>
 
 #include "super.h"
+#include "mds_client.h"
 
 static inline void ceph_set_cached_acl(struct inode *inode,
                                        int type, struct posix_acl *acl)
@@ -31,6 +32,7 @@ static inline void ceph_set_cached_acl(struct inode *inode,
 
 struct posix_acl *ceph_get_acl(struct inode *inode, int type, bool rcu)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        int size;
        unsigned int retry_cnt = 0;
        const char *name;
@@ -72,8 +74,8 @@ retry:
        } else if (size == -ENODATA || size == 0) {
                acl = NULL;
        } else {
-               pr_err_ratelimited("get acl %llx.%llx failed, err=%d\n",
-                                  ceph_vinop(inode), size);
+               pr_err_ratelimited_client(cl, "%llx.%llx failed, err=%d\n",
+                                         ceph_vinop(inode), size);
                acl = ERR_PTR(-EIO);
        }
 
@@ -105,7 +107,7 @@ int ceph_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
        case ACL_TYPE_ACCESS:
                name = XATTR_NAME_POSIX_ACL_ACCESS;
                if (acl) {
-                       ret = posix_acl_update_mode(&nop_mnt_idmap, inode,
+                       ret = posix_acl_update_mode(idmap, inode,
                                                    &new_mode, &acl);
                        if (ret)
                                goto out;
@@ -140,7 +142,7 @@ int ceph_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
                newattrs.ia_ctime = current_time(inode);
                newattrs.ia_mode = new_mode;
                newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
-               ret = __ceph_setattr(inode, &newattrs, NULL);
+               ret = __ceph_setattr(idmap, inode, &newattrs, NULL);
                if (ret)
                        goto out_free;
        }
@@ -151,7 +153,7 @@ int ceph_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
                        newattrs.ia_ctime = old_ctime;
                        newattrs.ia_mode = old_mode;
                        newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
-                       __ceph_setattr(inode, &newattrs, NULL);
+                       __ceph_setattr(idmap, inode, &newattrs, NULL);
                }
                goto out_free;
        }
index 936b9e0b351df72974b2567e59a6a6cac9a8bada..85be3bf18cdf38834de3141dfe0d6d75393c6d94 100644 (file)
@@ -79,18 +79,18 @@ static inline struct ceph_snap_context *page_snap_context(struct page *page)
  */
 static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
 {
-       struct inode *inode;
+       struct inode *inode = mapping->host;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci;
        struct ceph_snap_context *snapc;
 
        if (folio_test_dirty(folio)) {
-               dout("%p dirty_folio %p idx %lu -- already dirty\n",
-                    mapping->host, folio, folio->index);
+               doutc(cl, "%llx.%llx %p idx %lu -- already dirty\n",
+                     ceph_vinop(inode), folio, folio->index);
                VM_BUG_ON_FOLIO(!folio_test_private(folio), folio);
                return false;
        }
 
-       inode = mapping->host;
        ci = ceph_inode(inode);
 
        /* dirty the head */
@@ -111,12 +111,12 @@ static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
        if (ci->i_wrbuffer_ref == 0)
                ihold(inode);
        ++ci->i_wrbuffer_ref;
-       dout("%p dirty_folio %p idx %lu head %d/%d -> %d/%d "
-            "snapc %p seq %lld (%d snaps)\n",
-            mapping->host, folio, folio->index,
-            ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
-            ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
-            snapc, snapc->seq, snapc->num_snaps);
+       doutc(cl, "%llx.%llx %p idx %lu head %d/%d -> %d/%d "
+             "snapc %p seq %lld (%d snaps)\n",
+             ceph_vinop(inode), folio, folio->index,
+             ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
+             ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
+             snapc, snapc->seq, snapc->num_snaps);
        spin_unlock(&ci->i_ceph_lock);
 
        /*
@@ -137,23 +137,22 @@ static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
 static void ceph_invalidate_folio(struct folio *folio, size_t offset,
                                size_t length)
 {
-       struct inode *inode;
-       struct ceph_inode_info *ci;
+       struct inode *inode = folio->mapping->host;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
+       struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_snap_context *snapc;
 
-       inode = folio->mapping->host;
-       ci = ceph_inode(inode);
 
        if (offset != 0 || length != folio_size(folio)) {
-               dout("%p invalidate_folio idx %lu partial dirty page %zu~%zu\n",
-                    inode, folio->index, offset, length);
+               doutc(cl, "%llx.%llx idx %lu partial dirty page %zu~%zu\n",
+                     ceph_vinop(inode), folio->index, offset, length);
                return;
        }
 
        WARN_ON(!folio_test_locked(folio));
        if (folio_test_private(folio)) {
-               dout("%p invalidate_folio idx %lu full dirty page\n",
-                    inode, folio->index);
+               doutc(cl, "%llx.%llx idx %lu full dirty page\n",
+                     ceph_vinop(inode), folio->index);
 
                snapc = folio_detach_private(folio);
                ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
@@ -166,10 +165,10 @@ static void ceph_invalidate_folio(struct folio *folio, size_t offset,
 static bool ceph_release_folio(struct folio *folio, gfp_t gfp)
 {
        struct inode *inode = folio->mapping->host;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
 
-       dout("%llx:%llx release_folio idx %lu (%sdirty)\n",
-            ceph_vinop(inode),
-            folio->index, folio_test_dirty(folio) ? "" : "not ");
+       doutc(cl, "%llx.%llx idx %lu (%sdirty)\n", ceph_vinop(inode),
+             folio->index, folio_test_dirty(folio) ? "" : "not ");
 
        if (folio_test_private(folio))
                return false;
@@ -229,7 +228,7 @@ static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
 static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq)
 {
        struct inode *inode = subreq->rreq->inode;
-       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
        u64 objno, objoff;
        u32 xlen;
@@ -244,7 +243,8 @@ static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq)
 static void finish_netfs_read(struct ceph_osd_request *req)
 {
        struct inode *inode = req->r_inode;
-       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+       struct ceph_client *cl = fsc->client;
        struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
        struct netfs_io_subrequest *subreq = req->r_priv;
        struct ceph_osd_req_op *op = &req->r_ops[0];
@@ -254,8 +254,8 @@ static void finish_netfs_read(struct ceph_osd_request *req)
        ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency,
                                 req->r_end_latency, osd_data->length, err);
 
-       dout("%s: result %d subreq->len=%zu i_size=%lld\n", __func__, req->r_result,
-            subreq->len, i_size_read(req->r_inode));
+       doutc(cl, "result %d subreq->len=%zu i_size=%lld\n", req->r_result,
+             subreq->len, i_size_read(req->r_inode));
 
        /* no object means success but no data */
        if (err == -ENOENT)
@@ -348,7 +348,8 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
        struct netfs_io_request *rreq = subreq->rreq;
        struct inode *inode = rreq->inode;
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+       struct ceph_client *cl = fsc->client;
        struct ceph_osd_request *req = NULL;
        struct ceph_vino vino = ceph_vino(inode);
        struct iov_iter iter;
@@ -383,7 +384,8 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
                        goto out;
        }
 
-       dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len);
+       doutc(cl, "%llx.%llx pos=%llu orig_len=%zu len=%llu\n",
+             ceph_vinop(inode), subreq->start, subreq->len, len);
 
        iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len);
 
@@ -400,8 +402,8 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
 
                err = iov_iter_get_pages_alloc2(&iter, &pages, len, &page_off);
                if (err < 0) {
-                       dout("%s: iov_ter_get_pages_alloc returned %d\n",
-                            __func__, err);
+                       doutc(cl, "%llx.%llx failed to allocate pages, %d\n",
+                             ceph_vinop(inode), err);
                        goto out;
                }
 
@@ -429,12 +431,13 @@ out:
        ceph_osdc_put_request(req);
        if (err)
                netfs_subreq_terminated(subreq, err, false);
-       dout("%s: result %d\n", __func__, err);
+       doutc(cl, "%llx.%llx result %d\n", ceph_vinop(inode), err);
 }
 
 static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
 {
        struct inode *inode = rreq->inode;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        int got = 0, want = CEPH_CAP_FILE_CACHE;
        struct ceph_netfs_request_data *priv;
        int ret = 0;
@@ -466,12 +469,12 @@ static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
         */
        ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got);
        if (ret < 0) {
-               dout("start_read %p, error getting cap\n", inode);
+               doutc(cl, "%llx.%llx, error getting cap\n", ceph_vinop(inode));
                goto out;
        }
 
        if (!(got & want)) {
-               dout("start_read %p, no cache cap\n", inode);
+               doutc(cl, "%llx.%llx, no cache cap\n", ceph_vinop(inode));
                ret = -EACCES;
                goto out;
        }
@@ -563,13 +566,14 @@ get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl,
                   struct ceph_snap_context *page_snapc)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_snap_context *snapc = NULL;
        struct ceph_cap_snap *capsnap = NULL;
 
        spin_lock(&ci->i_ceph_lock);
        list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
-               dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
-                    capsnap->context, capsnap->dirty_pages);
+               doutc(cl, " capsnap %p snapc %p has %d dirty pages\n",
+                     capsnap, capsnap->context, capsnap->dirty_pages);
                if (!capsnap->dirty_pages)
                        continue;
 
@@ -601,8 +605,8 @@ get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl,
        }
        if (!snapc && ci->i_wrbuffer_ref_head) {
                snapc = ceph_get_snap_context(ci->i_head_snapc);
-               dout(" head snapc %p has %d dirty pages\n",
-                    snapc, ci->i_wrbuffer_ref_head);
+               doutc(cl, " head snapc %p has %d dirty pages\n", snapc,
+                     ci->i_wrbuffer_ref_head);
                if (ctl) {
                        ctl->i_size = i_size_read(inode);
                        ctl->truncate_size = ci->i_truncate_size;
@@ -658,7 +662,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
        struct folio *folio = page_folio(page);
        struct inode *inode = page->mapping->host;
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+       struct ceph_client *cl = fsc->client;
        struct ceph_snap_context *snapc, *oldest;
        loff_t page_off = page_offset(page);
        int err;
@@ -670,7 +675,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
        bool caching = ceph_is_cache_enabled(inode);
        struct page *bounce_page = NULL;
 
-       dout("writepage %p idx %lu\n", page, page->index);
+       doutc(cl, "%llx.%llx page %p idx %lu\n", ceph_vinop(inode), page,
+             page->index);
 
        if (ceph_inode_is_shutdown(inode))
                return -EIO;
@@ -678,13 +684,14 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
        /* verify this is a writeable snap context */
        snapc = page_snap_context(page);
        if (!snapc) {
-               dout("writepage %p page %p not dirty?\n", inode, page);
+               doutc(cl, "%llx.%llx page %p not dirty?\n", ceph_vinop(inode),
+                     page);
                return 0;
        }
        oldest = get_oldest_context(inode, &ceph_wbc, snapc);
        if (snapc->seq > oldest->seq) {
-               dout("writepage %p page %p snapc %p not writeable - noop\n",
-                    inode, page, snapc);
+               doutc(cl, "%llx.%llx page %p snapc %p not writeable - noop\n",
+                     ceph_vinop(inode), page, snapc);
                /* we should only noop if called by kswapd */
                WARN_ON(!(current->flags & PF_MEMALLOC));
                ceph_put_snap_context(oldest);
@@ -695,8 +702,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
 
        /* is this a partial page at end of file? */
        if (page_off >= ceph_wbc.i_size) {
-               dout("folio at %lu beyond eof %llu\n", folio->index,
-                               ceph_wbc.i_size);
+               doutc(cl, "%llx.%llx folio at %lu beyond eof %llu\n",
+                     ceph_vinop(inode), folio->index, ceph_wbc.i_size);
                folio_invalidate(folio, 0, folio_size(folio));
                return 0;
        }
@@ -705,8 +712,9 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
                len = ceph_wbc.i_size - page_off;
 
        wlen = IS_ENCRYPTED(inode) ? round_up(len, CEPH_FSCRYPT_BLOCK_SIZE) : len;
-       dout("writepage %p page %p index %lu on %llu~%llu snapc %p seq %lld\n",
-            inode, page, page->index, page_off, wlen, snapc, snapc->seq);
+       doutc(cl, "%llx.%llx page %p index %lu on %llu~%llu snapc %p seq %lld\n",
+             ceph_vinop(inode), page, page->index, page_off, wlen, snapc,
+             snapc->seq);
 
        if (atomic_long_inc_return(&fsc->writeback_count) >
            CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
@@ -747,8 +755,9 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
        osd_req_op_extent_osd_data_pages(req, 0,
                        bounce_page ? &bounce_page : &page, wlen, 0,
                        false, false);
-       dout("writepage %llu~%llu (%llu bytes, %sencrypted)\n",
-            page_off, len, wlen, IS_ENCRYPTED(inode) ? "" : "not ");
+       doutc(cl, "%llx.%llx %llu~%llu (%llu bytes, %sencrypted)\n",
+             ceph_vinop(inode), page_off, len, wlen,
+             IS_ENCRYPTED(inode) ? "" : "not ");
 
        req->r_mtime = inode_get_mtime(inode);
        ceph_osdc_start_request(osdc, req);
@@ -767,19 +776,21 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
                        wbc = &tmp_wbc;
                if (err == -ERESTARTSYS) {
                        /* killed by SIGKILL */
-                       dout("writepage interrupted page %p\n", page);
+                       doutc(cl, "%llx.%llx interrupted page %p\n",
+                             ceph_vinop(inode), page);
                        redirty_page_for_writepage(wbc, page);
                        end_page_writeback(page);
                        return err;
                }
                if (err == -EBLOCKLISTED)
                        fsc->blocklisted = true;
-               dout("writepage setting page/mapping error %d %p\n",
-                    err, page);
+               doutc(cl, "%llx.%llx setting page/mapping error %d %p\n",
+                     ceph_vinop(inode), err, page);
                mapping_set_error(&inode->i_data, err);
                wbc->pages_skipped++;
        } else {
-               dout("writepage cleaned page %p\n", page);
+               doutc(cl, "%llx.%llx cleaned page %p\n",
+                     ceph_vinop(inode), page);
                err = 0;  /* vfs expects us to return 0 */
        }
        oldest = detach_page_private(page);
@@ -803,7 +814,7 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
        ihold(inode);
 
        if (wbc->sync_mode == WB_SYNC_NONE &&
-           ceph_inode_to_client(inode)->write_congested)
+           ceph_inode_to_fs_client(inode)->write_congested)
                return AOP_WRITEPAGE_ACTIVATE;
 
        wait_on_page_fscache(page);
@@ -829,6 +840,7 @@ static void writepages_finish(struct ceph_osd_request *req)
 {
        struct inode *inode = req->r_inode;
        struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_osd_data *osd_data;
        struct page *page;
        int num_pages, total_pages = 0;
@@ -836,11 +848,11 @@ static void writepages_finish(struct ceph_osd_request *req)
        int rc = req->r_result;
        struct ceph_snap_context *snapc = req->r_snapc;
        struct address_space *mapping = inode->i_mapping;
-       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
        unsigned int len = 0;
        bool remove_page;
 
-       dout("writepages_finish %p rc %d\n", inode, rc);
+       doutc(cl, "%llx.%llx rc %d\n", ceph_vinop(inode), rc);
        if (rc < 0) {
                mapping_set_error(mapping, rc);
                ceph_set_error_write(ci);
@@ -862,8 +874,10 @@ static void writepages_finish(struct ceph_osd_request *req)
        /* clean all pages */
        for (i = 0; i < req->r_num_ops; i++) {
                if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) {
-                       pr_warn("%s incorrect op %d req %p index %d tid %llu\n",
-                               __func__, req->r_ops[i].op, req, i, req->r_tid);
+                       pr_warn_client(cl,
+                               "%llx.%llx incorrect op %d req %p index %d tid %llu\n",
+                               ceph_vinop(inode), req->r_ops[i].op, req, i,
+                               req->r_tid);
                        break;
                }
 
@@ -890,7 +904,7 @@ static void writepages_finish(struct ceph_osd_request *req)
 
                        ceph_put_snap_context(detach_page_private(page));
                        end_page_writeback(page);
-                       dout("unlocking %p\n", page);
+                       doutc(cl, "unlocking %p\n", page);
 
                        if (remove_page)
                                generic_error_remove_page(inode->i_mapping,
@@ -898,8 +912,9 @@ static void writepages_finish(struct ceph_osd_request *req)
 
                        unlock_page(page);
                }
-               dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n",
-                    inode, osd_data->length, rc >= 0 ? num_pages : 0);
+               doutc(cl, "%llx.%llx wrote %llu bytes cleaned %d pages\n",
+                     ceph_vinop(inode), osd_data->length,
+                     rc >= 0 ? num_pages : 0);
 
                release_pages(osd_data->pages, num_pages);
        }
@@ -926,7 +941,8 @@ static int ceph_writepages_start(struct address_space *mapping,
 {
        struct inode *inode = mapping->host;
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+       struct ceph_client *cl = fsc->client;
        struct ceph_vino vino = ceph_vino(inode);
        pgoff_t index, start_index, end = -1;
        struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
@@ -944,15 +960,15 @@ static int ceph_writepages_start(struct address_space *mapping,
            fsc->write_congested)
                return 0;
 
-       dout("writepages_start %p (mode=%s)\n", inode,
-            wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
-            (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
+       doutc(cl, "%llx.%llx (mode=%s)\n", ceph_vinop(inode),
+             wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
+             (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
 
        if (ceph_inode_is_shutdown(inode)) {
                if (ci->i_wrbuffer_ref > 0) {
-                       pr_warn_ratelimited(
-                               "writepage_start %p %lld forced umount\n",
-                               inode, ceph_ino(inode));
+                       pr_warn_ratelimited_client(cl,
+                               "%llx.%llx %lld forced umount\n",
+                               ceph_vinop(inode), ceph_ino(inode));
                }
                mapping_set_error(mapping, -EIO);
                return -EIO; /* we're in a forced umount, don't write! */
@@ -976,11 +992,11 @@ retry:
        if (!snapc) {
                /* hmm, why does writepages get called when there
                   is no dirty data? */
-               dout(" no snap context with dirty data?\n");
+               doutc(cl, " no snap context with dirty data?\n");
                goto out;
        }
-       dout(" oldest snapc is %p seq %lld (%d snaps)\n",
-            snapc, snapc->seq, snapc->num_snaps);
+       doutc(cl, " oldest snapc is %p seq %lld (%d snaps)\n", snapc,
+             snapc->seq, snapc->num_snaps);
 
        should_loop = false;
        if (ceph_wbc.head_snapc && snapc != last_snapc) {
@@ -990,13 +1006,13 @@ retry:
                        end = -1;
                        if (index > 0)
                                should_loop = true;
-                       dout(" cyclic, start at %lu\n", index);
+                       doutc(cl, " cyclic, start at %lu\n", index);
                } else {
                        index = wbc->range_start >> PAGE_SHIFT;
                        end = wbc->range_end >> PAGE_SHIFT;
                        if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
                                range_whole = true;
-                       dout(" not cyclic, %lu to %lu\n", index, end);
+                       doutc(cl, " not cyclic, %lu to %lu\n", index, end);
                }
        } else if (!ceph_wbc.head_snapc) {
                /* Do not respect wbc->range_{start,end}. Dirty pages
@@ -1005,7 +1021,7 @@ retry:
                 * associated with 'snapc' get written */
                if (index > 0)
                        should_loop = true;
-               dout(" non-head snapc, range whole\n");
+               doutc(cl, " non-head snapc, range whole\n");
        }
 
        if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
@@ -1028,12 +1044,12 @@ retry:
 get_more_pages:
                nr_folios = filemap_get_folios_tag(mapping, &index,
                                                   end, tag, &fbatch);
-               dout("pagevec_lookup_range_tag got %d\n", nr_folios);
+               doutc(cl, "pagevec_lookup_range_tag got %d\n", nr_folios);
                if (!nr_folios && !locked_pages)
                        break;
                for (i = 0; i < nr_folios && locked_pages < max_pages; i++) {
                        page = &fbatch.folios[i]->page;
-                       dout("? %p idx %lu\n", page, page->index);
+                       doutc(cl, "? %p idx %lu\n", page, page->index);
                        if (locked_pages == 0)
                                lock_page(page);  /* first page */
                        else if (!trylock_page(page))
@@ -1042,15 +1058,15 @@ get_more_pages:
                        /* only dirty pages, or our accounting breaks */
                        if (unlikely(!PageDirty(page)) ||
                            unlikely(page->mapping != mapping)) {
-                               dout("!dirty or !mapping %p\n", page);
+                               doutc(cl, "!dirty or !mapping %p\n", page);
                                unlock_page(page);
                                continue;
                        }
                        /* only if matching snap context */
                        pgsnapc = page_snap_context(page);
                        if (pgsnapc != snapc) {
-                               dout("page snapc %p %lld != oldest %p %lld\n",
-                                    pgsnapc, pgsnapc->seq, snapc, snapc->seq);
+                               doutc(cl, "page snapc %p %lld != oldest %p %lld\n",
+                                     pgsnapc, pgsnapc->seq, snapc, snapc->seq);
                                if (!should_loop &&
                                    !ceph_wbc.head_snapc &&
                                    wbc->sync_mode != WB_SYNC_NONE)
@@ -1061,8 +1077,8 @@ get_more_pages:
                        if (page_offset(page) >= ceph_wbc.i_size) {
                                struct folio *folio = page_folio(page);
 
-                               dout("folio at %lu beyond eof %llu\n",
-                                    folio->index, ceph_wbc.i_size);
+                               doutc(cl, "folio at %lu beyond eof %llu\n",
+                                     folio->index, ceph_wbc.i_size);
                                if ((ceph_wbc.size_stable ||
                                    folio_pos(folio) >= i_size_read(inode)) &&
                                    folio_clear_dirty_for_io(folio))
@@ -1072,23 +1088,23 @@ get_more_pages:
                                continue;
                        }
                        if (strip_unit_end && (page->index > strip_unit_end)) {
-                               dout("end of strip unit %p\n", page);
+                               doutc(cl, "end of strip unit %p\n", page);
                                unlock_page(page);
                                break;
                        }
                        if (PageWriteback(page) || PageFsCache(page)) {
                                if (wbc->sync_mode == WB_SYNC_NONE) {
-                                       dout("%p under writeback\n", page);
+                                       doutc(cl, "%p under writeback\n", page);
                                        unlock_page(page);
                                        continue;
                                }
-                               dout("waiting on writeback %p\n", page);
+                               doutc(cl, "waiting on writeback %p\n", page);
                                wait_on_page_writeback(page);
                                wait_on_page_fscache(page);
                        }
 
                        if (!clear_page_dirty_for_io(page)) {
-                               dout("%p !clear_page_dirty_for_io\n", page);
+                               doutc(cl, "%p !clear_page_dirty_for_io\n", page);
                                unlock_page(page);
                                continue;
                        }
@@ -1143,8 +1159,8 @@ get_more_pages:
                        }
 
                        /* note position of first page in fbatch */
-                       dout("%p will write page %p idx %lu\n",
-                            inode, page, page->index);
+                       doutc(cl, "%llx.%llx will write page %p idx %lu\n",
+                             ceph_vinop(inode), page, page->index);
 
                        if (atomic_long_inc_return(&fsc->writeback_count) >
                            CONGESTION_ON_THRESH(
@@ -1158,8 +1174,9 @@ get_more_pages:
                                                locked_pages ? GFP_NOWAIT : GFP_NOFS);
                                if (IS_ERR(pages[locked_pages])) {
                                        if (PTR_ERR(pages[locked_pages]) == -EINVAL)
-                                               pr_err("%s: inode->i_blkbits=%hhu\n",
-                                                       __func__, inode->i_blkbits);
+                                               pr_err_client(cl,
+                                                       "inode->i_blkbits=%hhu\n",
+                                                       inode->i_blkbits);
                                        /* better not fail on first page! */
                                        BUG_ON(locked_pages == 0);
                                        pages[locked_pages] = NULL;
@@ -1193,7 +1210,7 @@ get_more_pages:
 
                        if (nr_folios && i == nr_folios &&
                            locked_pages < max_pages) {
-                               dout("reached end fbatch, trying for more\n");
+                               doutc(cl, "reached end fbatch, trying for more\n");
                                folio_batch_release(&fbatch);
                                goto get_more_pages;
                        }
@@ -1254,8 +1271,8 @@ new_request:
                                /* Start a new extent */
                                osd_req_op_extent_dup_last(req, op_idx,
                                                           cur_offset - offset);
-                               dout("writepages got pages at %llu~%llu\n",
-                                    offset, len);
+                               doutc(cl, "got pages at %llu~%llu\n", offset,
+                                     len);
                                osd_req_op_extent_osd_data_pages(req, op_idx,
                                                        data_pages, len, 0,
                                                        from_pool, false);
@@ -1288,12 +1305,13 @@ new_request:
                if (IS_ENCRYPTED(inode))
                        len = round_up(len, CEPH_FSCRYPT_BLOCK_SIZE);
 
-               dout("writepages got pages at %llu~%llu\n", offset, len);
+               doutc(cl, "got pages at %llu~%llu\n", offset, len);
 
                if (IS_ENCRYPTED(inode) &&
                    ((offset | len) & ~CEPH_FSCRYPT_BLOCK_MASK))
-                       pr_warn("%s: bad encrypted write offset=%lld len=%llu\n",
-                               __func__, offset, len);
+                       pr_warn_client(cl,
+                               "bad encrypted write offset=%lld len=%llu\n",
+                               offset, len);
 
                osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len,
                                                 0, from_pool, false);
@@ -1345,14 +1363,14 @@ new_request:
                        done = true;
 
 release_folios:
-               dout("folio_batch release on %d folios (%p)\n", (int)fbatch.nr,
-                    fbatch.nr ? fbatch.folios[0] : NULL);
+               doutc(cl, "folio_batch release on %d folios (%p)\n",
+                     (int)fbatch.nr, fbatch.nr ? fbatch.folios[0] : NULL);
                folio_batch_release(&fbatch);
        }
 
        if (should_loop && !done) {
                /* more to do; loop back to beginning of file */
-               dout("writepages looping back to beginning of file\n");
+               doutc(cl, "looping back to beginning of file\n");
                end = start_index - 1; /* OK even when start_index == 0 */
 
                /* to write dirty pages associated with next snapc,
@@ -1390,7 +1408,8 @@ release_folios:
 out:
        ceph_osdc_put_request(req);
        ceph_put_snap_context(last_snapc);
-       dout("writepages dend - startone, rc = %d\n", rc);
+       doutc(cl, "%llx.%llx dend - startone, rc = %d\n", ceph_vinop(inode),
+             rc);
        return rc;
 }
 
@@ -1424,11 +1443,12 @@ static struct ceph_snap_context *
 ceph_find_incompatible(struct page *page)
 {
        struct inode *inode = page->mapping->host;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
 
        if (ceph_inode_is_shutdown(inode)) {
-               dout(" page %p %llx:%llx is shutdown\n", page,
-                    ceph_vinop(inode));
+               doutc(cl, " %llx.%llx page %p is shutdown\n",
+                     ceph_vinop(inode), page);
                return ERR_PTR(-ESTALE);
        }
 
@@ -1449,13 +1469,15 @@ ceph_find_incompatible(struct page *page)
                if (snapc->seq > oldest->seq) {
                        /* not writeable -- return it for the caller to deal with */
                        ceph_put_snap_context(oldest);
-                       dout(" page %p snapc %p not current or oldest\n", page, snapc);
+                       doutc(cl, " %llx.%llx page %p snapc %p not current or oldest\n",
+                             ceph_vinop(inode), page, snapc);
                        return ceph_get_snap_context(snapc);
                }
                ceph_put_snap_context(oldest);
 
                /* yay, writeable, do it now (without dropping page lock) */
-               dout(" page %p snapc %p not current, but oldest\n", page, snapc);
+               doutc(cl, " %llx.%llx page %p snapc %p not current, but oldest\n",
+                     ceph_vinop(inode), page, snapc);
                if (clear_page_dirty_for_io(page)) {
                        int r = writepage_nounlock(page, NULL);
                        if (r < 0)
@@ -1524,10 +1546,11 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
 {
        struct folio *folio = page_folio(subpage);
        struct inode *inode = file_inode(file);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        bool check_cap = false;
 
-       dout("write_end file %p inode %p folio %p %d~%d (%d)\n", file,
-            inode, folio, (int)pos, (int)copied, (int)len);
+       doutc(cl, "%llx.%llx file %p folio %p %d~%d (%d)\n", ceph_vinop(inode),
+             file, folio, (int)pos, (int)copied, (int)len);
 
        if (!folio_test_uptodate(folio)) {
                /* just return that nothing was copied on a short copy */
@@ -1587,6 +1610,7 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
        struct vm_area_struct *vma = vmf->vma;
        struct inode *inode = file_inode(vma->vm_file);
        struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_file_info *fi = vma->vm_file->private_data;
        loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT;
        int want, got, err;
@@ -1598,8 +1622,8 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
 
        ceph_block_sigs(&oldset);
 
-       dout("filemap_fault %p %llx.%llx %llu trying to get caps\n",
-            inode, ceph_vinop(inode), off);
+       doutc(cl, "%llx.%llx %llu trying to get caps\n",
+             ceph_vinop(inode), off);
        if (fi->fmode & CEPH_FILE_MODE_LAZY)
                want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
        else
@@ -1610,8 +1634,8 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
        if (err < 0)
                goto out_restore;
 
-       dout("filemap_fault %p %llu got cap refs on %s\n",
-            inode, off, ceph_cap_string(got));
+       doutc(cl, "%llx.%llx %llu got cap refs on %s\n", ceph_vinop(inode),
+             off, ceph_cap_string(got));
 
        if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
            !ceph_has_inline_data(ci)) {
@@ -1619,8 +1643,8 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
                ceph_add_rw_context(fi, &rw_ctx);
                ret = filemap_fault(vmf);
                ceph_del_rw_context(fi, &rw_ctx);
-               dout("filemap_fault %p %llu drop cap refs %s ret %x\n",
-                    inode, off, ceph_cap_string(got), ret);
+               doutc(cl, "%llx.%llx %llu drop cap refs %s ret %x\n",
+                     ceph_vinop(inode), off, ceph_cap_string(got), ret);
        } else
                err = -EAGAIN;
 
@@ -1661,8 +1685,8 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
                ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
 out_inline:
                filemap_invalidate_unlock_shared(mapping);
-               dout("filemap_fault %p %llu read inline data ret %x\n",
-                    inode, off, ret);
+               doutc(cl, "%llx.%llx %llu read inline data ret %x\n",
+                     ceph_vinop(inode), off, ret);
        }
 out_restore:
        ceph_restore_sigs(&oldset);
@@ -1676,6 +1700,7 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct inode *inode = file_inode(vma->vm_file);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_file_info *fi = vma->vm_file->private_data;
        struct ceph_cap_flush *prealloc_cf;
@@ -1702,8 +1727,8 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
        else
                len = offset_in_thp(page, size);
 
-       dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n",
-            inode, ceph_vinop(inode), off, len, size);
+       doutc(cl, "%llx.%llx %llu~%zd getting caps i_size %llu\n",
+             ceph_vinop(inode), off, len, size);
        if (fi->fmode & CEPH_FILE_MODE_LAZY)
                want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
        else
@@ -1714,8 +1739,8 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
        if (err < 0)
                goto out_free;
 
-       dout("page_mkwrite %p %llu~%zd got cap refs on %s\n",
-            inode, off, len, ceph_cap_string(got));
+       doutc(cl, "%llx.%llx %llu~%zd got cap refs on %s\n", ceph_vinop(inode),
+             off, len, ceph_cap_string(got));
 
        /* Update time before taking page lock */
        file_update_time(vma->vm_file);
@@ -1763,8 +1788,8 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
                        __mark_inode_dirty(inode, dirty);
        }
 
-       dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %x\n",
-            inode, off, len, ceph_cap_string(got), ret);
+       doutc(cl, "%llx.%llx %llu~%zd dropping cap refs on %s ret %x\n",
+             ceph_vinop(inode), off, len, ceph_cap_string(got), ret);
        ceph_put_cap_refs_async(ci, got);
 out_free:
        ceph_restore_sigs(&oldset);
@@ -1778,6 +1803,7 @@ out_free:
 void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
                           char *data, size_t len)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct address_space *mapping = inode->i_mapping;
        struct page *page;
 
@@ -1798,8 +1824,8 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
                }
        }
 
-       dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n",
-            inode, ceph_vinop(inode), len, locked_page);
+       doutc(cl, "%p %llx.%llx len %zu locked_page %p\n", inode,
+             ceph_vinop(inode), len, locked_page);
 
        if (len > 0) {
                void *kaddr = kmap_atomic(page);
@@ -1823,7 +1849,8 @@ int ceph_uninline_data(struct file *file)
 {
        struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+       struct ceph_client *cl = fsc->client;
        struct ceph_osd_request *req = NULL;
        struct ceph_cap_flush *prealloc_cf = NULL;
        struct folio *folio = NULL;
@@ -1836,8 +1863,8 @@ int ceph_uninline_data(struct file *file)
        inline_version = ci->i_inline_version;
        spin_unlock(&ci->i_ceph_lock);
 
-       dout("uninline_data %p %llx.%llx inline_version %llu\n",
-            inode, ceph_vinop(inode), inline_version);
+       doutc(cl, "%llx.%llx inline_version %llu\n", ceph_vinop(inode),
+             inline_version);
 
        if (ceph_inode_is_shutdown(inode)) {
                err = -EIO;
@@ -1949,8 +1976,8 @@ out_unlock:
        }
 out:
        ceph_free_cap_flush(prealloc_cf);
-       dout("uninline_data %p %llx.%llx inline_version %llu = %d\n",
-            inode, ceph_vinop(inode), inline_version, err);
+       doutc(cl, "%llx.%llx inline_version %llu = %d\n",
+             ceph_vinop(inode), inline_version, err);
        return err;
 }
 
@@ -1977,8 +2004,9 @@ enum {
 static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
                                s64 pool, struct ceph_string *pool_ns)
 {
-       struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->netfs.inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(&ci->netfs.inode);
        struct ceph_mds_client *mdsc = fsc->mdsc;
+       struct ceph_client *cl = fsc->client;
        struct ceph_osd_request *rd_req = NULL, *wr_req = NULL;
        struct rb_node **p, *parent;
        struct ceph_pool_perm *perm;
@@ -2013,10 +2041,10 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
                goto out;
 
        if (pool_ns)
-               dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n",
-                    pool, (int)pool_ns->len, pool_ns->str);
+               doutc(cl, "pool %lld ns %.*s no perm cached\n", pool,
+                     (int)pool_ns->len, pool_ns->str);
        else
-               dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool);
+               doutc(cl, "pool %lld no perm cached\n", pool);
 
        down_write(&mdsc->pool_perm_rwsem);
        p = &mdsc->pool_perm_tree.rb_node;
@@ -2141,15 +2169,16 @@ out:
        if (!err)
                err = have;
        if (pool_ns)
-               dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n",
-                    pool, (int)pool_ns->len, pool_ns->str, err);
+               doutc(cl, "pool %lld ns %.*s result = %d\n", pool,
+                     (int)pool_ns->len, pool_ns->str, err);
        else
-               dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err);
+               doutc(cl, "pool %lld result = %d\n", pool, err);
        return err;
 }
 
 int ceph_pool_perm_check(struct inode *inode, int need)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_string *pool_ns;
        s64 pool;
@@ -2168,7 +2197,7 @@ int ceph_pool_perm_check(struct inode *inode, int need)
                return 0;
        }
 
-       if (ceph_test_mount_opt(ceph_inode_to_client(inode),
+       if (ceph_test_mount_opt(ceph_inode_to_fs_client(inode),
                                NOPOOLPERM))
                return 0;
 
@@ -2179,13 +2208,11 @@ int ceph_pool_perm_check(struct inode *inode, int need)
 check:
        if (flags & CEPH_I_POOL_PERM) {
                if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) {
-                       dout("ceph_pool_perm_check pool %lld no read perm\n",
-                            pool);
+                       doutc(cl, "pool %lld no read perm\n", pool);
                        return -EPERM;
                }
                if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) {
-                       dout("ceph_pool_perm_check pool %lld no write perm\n",
-                            pool);
+                       doutc(cl, "pool %lld no write perm\n", pool);
                        return -EPERM;
                }
                return 0;
index de1dee46d3df72e0a5069a1318a1ad212419ff67..930fbd54d2c8c8fe9ec194f3d8e53d21f510f6f8 100644 (file)
@@ -15,7 +15,7 @@
 void ceph_fscache_register_inode_cookie(struct inode *inode)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
 
        /* No caching for filesystem? */
        if (!fsc->fscache)
index a104669fcf4c7f473e75152123f7ca3caaf5eb4f..2c0b8dc3dd0d80314b04c0717501f066079b97eb 100644 (file)
@@ -186,10 +186,10 @@ static void __ceph_unreserve_caps(struct ceph_mds_client *mdsc, int nr_caps)
                        mdsc->caps_avail_count += nr_caps;
                }
 
-               dout("%s: caps %d = %d used + %d resv + %d avail\n",
-                    __func__,
-                    mdsc->caps_total_count, mdsc->caps_use_count,
-                    mdsc->caps_reserve_count, mdsc->caps_avail_count);
+               doutc(mdsc->fsc->client,
+                     "caps %d = %d used + %d resv + %d avail\n",
+                     mdsc->caps_total_count, mdsc->caps_use_count,
+                     mdsc->caps_reserve_count, mdsc->caps_avail_count);
                BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
                                                 mdsc->caps_reserve_count +
                                                 mdsc->caps_avail_count);
@@ -202,6 +202,7 @@ static void __ceph_unreserve_caps(struct ceph_mds_client *mdsc, int nr_caps)
 int ceph_reserve_caps(struct ceph_mds_client *mdsc,
                      struct ceph_cap_reservation *ctx, int need)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        int i, j;
        struct ceph_cap *cap;
        int have;
@@ -212,7 +213,7 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
        struct ceph_mds_session *s;
        LIST_HEAD(newcaps);
 
-       dout("reserve caps ctx=%p need=%d\n", ctx, need);
+       doutc(cl, "ctx=%p need=%d\n", ctx, need);
 
        /* first reserve any caps that are already allocated */
        spin_lock(&mdsc->caps_list_lock);
@@ -272,8 +273,8 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
                        continue;
                }
 
-               pr_warn("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
-                       ctx, need, have + alloc);
+               pr_warn_client(cl, "ctx=%p ENOMEM need=%d got=%d\n", ctx, need,
+                              have + alloc);
                err = -ENOMEM;
                break;
        }
@@ -298,20 +299,21 @@ int ceph_reserve_caps(struct ceph_mds_client *mdsc,
 
        spin_unlock(&mdsc->caps_list_lock);
 
-       dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
-            ctx, mdsc->caps_total_count, mdsc->caps_use_count,
-            mdsc->caps_reserve_count, mdsc->caps_avail_count);
+       doutc(cl, "ctx=%p %d = %d used + %d resv + %d avail\n", ctx,
+             mdsc->caps_total_count, mdsc->caps_use_count,
+             mdsc->caps_reserve_count, mdsc->caps_avail_count);
        return err;
 }
 
 void ceph_unreserve_caps(struct ceph_mds_client *mdsc,
                         struct ceph_cap_reservation *ctx)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        bool reclaim = false;
        if (!ctx->count)
                return;
 
-       dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
+       doutc(cl, "ctx=%p count=%d\n", ctx, ctx->count);
        spin_lock(&mdsc->caps_list_lock);
        __ceph_unreserve_caps(mdsc, ctx->count);
        ctx->count = 0;
@@ -328,6 +330,7 @@ void ceph_unreserve_caps(struct ceph_mds_client *mdsc,
 struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
                              struct ceph_cap_reservation *ctx)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_cap *cap = NULL;
 
        /* temporary, until we do something about cap import/export */
@@ -359,9 +362,9 @@ struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
        }
 
        spin_lock(&mdsc->caps_list_lock);
-       dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
-            ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count,
-            mdsc->caps_reserve_count, mdsc->caps_avail_count);
+       doutc(cl, "ctx=%p (%d) %d = %d used + %d resv + %d avail\n", ctx,
+             ctx->count, mdsc->caps_total_count, mdsc->caps_use_count,
+             mdsc->caps_reserve_count, mdsc->caps_avail_count);
        BUG_ON(!ctx->count);
        BUG_ON(ctx->count > mdsc->caps_reserve_count);
        BUG_ON(list_empty(&mdsc->caps_list));
@@ -382,10 +385,12 @@ struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
 
 void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
+
        spin_lock(&mdsc->caps_list_lock);
-       dout("put_cap %p %d = %d used + %d resv + %d avail\n",
-            cap, mdsc->caps_total_count, mdsc->caps_use_count,
-            mdsc->caps_reserve_count, mdsc->caps_avail_count);
+       doutc(cl, "%p %d = %d used + %d resv + %d avail\n", cap,
+             mdsc->caps_total_count, mdsc->caps_use_count,
+             mdsc->caps_reserve_count, mdsc->caps_avail_count);
        mdsc->caps_use_count--;
        /*
         * Keep some preallocated caps around (ceph_min_count), to
@@ -491,11 +496,13 @@ static void __insert_cap_node(struct ceph_inode_info *ci,
 static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
                               struct ceph_inode_info *ci)
 {
+       struct inode *inode = &ci->netfs.inode;
        struct ceph_mount_options *opt = mdsc->fsc->mount_options;
+
        ci->i_hold_caps_max = round_jiffies(jiffies +
                                            opt->caps_wanted_delay_max * HZ);
-       dout("__cap_set_timeouts %p %lu\n", &ci->netfs.inode,
-            ci->i_hold_caps_max - jiffies);
+       doutc(mdsc->fsc->client, "%p %llx.%llx %lu\n", inode,
+             ceph_vinop(inode), ci->i_hold_caps_max - jiffies);
 }
 
 /*
@@ -509,8 +516,11 @@ static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
 static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
                                struct ceph_inode_info *ci)
 {
-       dout("__cap_delay_requeue %p flags 0x%lx at %lu\n", &ci->netfs.inode,
-            ci->i_ceph_flags, ci->i_hold_caps_max);
+       struct inode *inode = &ci->netfs.inode;
+
+       doutc(mdsc->fsc->client, "%p %llx.%llx flags 0x%lx at %lu\n",
+             inode, ceph_vinop(inode), ci->i_ceph_flags,
+             ci->i_hold_caps_max);
        if (!mdsc->stopping) {
                spin_lock(&mdsc->cap_delay_lock);
                if (!list_empty(&ci->i_cap_delay_list)) {
@@ -533,7 +543,9 @@ no_change:
 static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
                                      struct ceph_inode_info *ci)
 {
-       dout("__cap_delay_requeue_front %p\n", &ci->netfs.inode);
+       struct inode *inode = &ci->netfs.inode;
+
+       doutc(mdsc->fsc->client, "%p %llx.%llx\n", inode, ceph_vinop(inode));
        spin_lock(&mdsc->cap_delay_lock);
        ci->i_ceph_flags |= CEPH_I_FLUSH;
        if (!list_empty(&ci->i_cap_delay_list))
@@ -550,7 +562,9 @@ static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
 static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
                               struct ceph_inode_info *ci)
 {
-       dout("__cap_delay_cancel %p\n", &ci->netfs.inode);
+       struct inode *inode = &ci->netfs.inode;
+
+       doutc(mdsc->fsc->client, "%p %llx.%llx\n", inode, ceph_vinop(inode));
        if (list_empty(&ci->i_cap_delay_list))
                return;
        spin_lock(&mdsc->cap_delay_lock);
@@ -562,6 +576,9 @@ static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
 static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
                              unsigned issued)
 {
+       struct inode *inode = &ci->netfs.inode;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
+
        unsigned had = __ceph_caps_issued(ci, NULL);
 
        lockdep_assert_held(&ci->i_ceph_lock);
@@ -586,7 +603,7 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
                if (issued & CEPH_CAP_FILE_SHARED)
                        atomic_inc(&ci->i_shared_gen);
                if (S_ISDIR(ci->netfs.inode.i_mode)) {
-                       dout(" marking %p NOT complete\n", &ci->netfs.inode);
+                       doutc(cl, " marking %p NOT complete\n", inode);
                        __ceph_dir_clear_complete(ci);
                }
        }
@@ -635,7 +652,8 @@ void ceph_add_cap(struct inode *inode,
                  unsigned seq, unsigned mseq, u64 realmino, int flags,
                  struct ceph_cap **new_cap)
 {
-       struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_cap *cap;
        int mds = session->s_mds;
@@ -644,8 +662,9 @@ void ceph_add_cap(struct inode *inode,
 
        lockdep_assert_held(&ci->i_ceph_lock);
 
-       dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
-            session->s_mds, cap_id, ceph_cap_string(issued), seq);
+       doutc(cl, "%p %llx.%llx mds%d cap %llx %s seq %d\n", inode,
+             ceph_vinop(inode), session->s_mds, cap_id,
+             ceph_cap_string(issued), seq);
 
        gen = atomic_read(&session->s_cap_gen);
 
@@ -723,9 +742,9 @@ void ceph_add_cap(struct inode *inode,
        actual_wanted = __ceph_caps_wanted(ci);
        if ((wanted & ~actual_wanted) ||
            (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
-               dout(" issued %s, mds wanted %s, actual %s, queueing\n",
-                    ceph_cap_string(issued), ceph_cap_string(wanted),
-                    ceph_cap_string(actual_wanted));
+               doutc(cl, "issued %s, mds wanted %s, actual %s, queueing\n",
+                     ceph_cap_string(issued), ceph_cap_string(wanted),
+                     ceph_cap_string(actual_wanted));
                __cap_delay_requeue(mdsc, ci);
        }
 
@@ -742,9 +761,9 @@ void ceph_add_cap(struct inode *inode,
                WARN_ON(ci->i_auth_cap == cap);
        }
 
-       dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
-            inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
-            ceph_cap_string(issued|cap->issued), seq, mds);
+       doutc(cl, "inode %p %llx.%llx cap %p %s now %s seq %d mds%d\n",
+             inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
+             ceph_cap_string(issued|cap->issued), seq, mds);
        cap->cap_id = cap_id;
        cap->issued = issued;
        cap->implemented |= issued;
@@ -766,6 +785,8 @@ void ceph_add_cap(struct inode *inode,
  */
 static int __cap_is_valid(struct ceph_cap *cap)
 {
+       struct inode *inode = &cap->ci->netfs.inode;
+       struct ceph_client *cl = cap->session->s_mdsc->fsc->client;
        unsigned long ttl;
        u32 gen;
 
@@ -773,9 +794,9 @@ static int __cap_is_valid(struct ceph_cap *cap)
        ttl = cap->session->s_cap_ttl;
 
        if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
-               dout("__cap_is_valid %p cap %p issued %s "
-                    "but STALE (gen %u vs %u)\n", &cap->ci->netfs.inode,
-                    cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
+               doutc(cl, "%p %llx.%llx cap %p issued %s but STALE (gen %u vs %u)\n",
+                     inode, ceph_vinop(inode), cap,
+                     ceph_cap_string(cap->issued), cap->cap_gen, gen);
                return 0;
        }
 
@@ -789,6 +810,8 @@ static int __cap_is_valid(struct ceph_cap *cap)
  */
 int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
 {
+       struct inode *inode = &ci->netfs.inode;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        int have = ci->i_snap_caps;
        struct ceph_cap *cap;
        struct rb_node *p;
@@ -799,8 +822,8 @@ int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
                cap = rb_entry(p, struct ceph_cap, ci_node);
                if (!__cap_is_valid(cap))
                        continue;
-               dout("__ceph_caps_issued %p cap %p issued %s\n",
-                    &ci->netfs.inode, cap, ceph_cap_string(cap->issued));
+               doutc(cl, "%p %llx.%llx cap %p issued %s\n", inode,
+                     ceph_vinop(inode), cap, ceph_cap_string(cap->issued));
                have |= cap->issued;
                if (implemented)
                        *implemented |= cap->implemented;
@@ -843,16 +866,18 @@ int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
  */
 static void __touch_cap(struct ceph_cap *cap)
 {
+       struct inode *inode = &cap->ci->netfs.inode;
        struct ceph_mds_session *s = cap->session;
+       struct ceph_client *cl = s->s_mdsc->fsc->client;
 
        spin_lock(&s->s_cap_lock);
        if (!s->s_cap_iterator) {
-               dout("__touch_cap %p cap %p mds%d\n", &cap->ci->netfs.inode, cap,
-                    s->s_mds);
+               doutc(cl, "%p %llx.%llx cap %p mds%d\n", inode,
+                     ceph_vinop(inode), cap, s->s_mds);
                list_move_tail(&cap->session_caps, &s->s_caps);
        } else {
-               dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
-                    &cap->ci->netfs.inode, cap, s->s_mds);
+               doutc(cl, "%p %llx.%llx cap %p mds%d NOP, iterating over caps\n",
+                     inode, ceph_vinop(inode), cap, s->s_mds);
        }
        spin_unlock(&s->s_cap_lock);
 }
@@ -864,15 +889,16 @@ static void __touch_cap(struct ceph_cap *cap)
  */
 int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
 {
+       struct inode *inode = &ci->netfs.inode;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_cap *cap;
        struct rb_node *p;
        int have = ci->i_snap_caps;
 
        if ((have & mask) == mask) {
-               dout("__ceph_caps_issued_mask ino 0x%llx snap issued %s"
-                    " (mask %s)\n", ceph_ino(&ci->netfs.inode),
-                    ceph_cap_string(have),
-                    ceph_cap_string(mask));
+               doutc(cl, "mask %p %llx.%llx snap issued %s (mask %s)\n",
+                     inode, ceph_vinop(inode), ceph_cap_string(have),
+                     ceph_cap_string(mask));
                return 1;
        }
 
@@ -881,10 +907,10 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
                if (!__cap_is_valid(cap))
                        continue;
                if ((cap->issued & mask) == mask) {
-                       dout("__ceph_caps_issued_mask ino 0x%llx cap %p issued %s"
-                            " (mask %s)\n", ceph_ino(&ci->netfs.inode), cap,
-                            ceph_cap_string(cap->issued),
-                            ceph_cap_string(mask));
+                       doutc(cl, "mask %p %llx.%llx cap %p issued %s (mask %s)\n",
+                             inode, ceph_vinop(inode), cap,
+                             ceph_cap_string(cap->issued),
+                             ceph_cap_string(mask));
                        if (touch)
                                __touch_cap(cap);
                        return 1;
@@ -893,10 +919,10 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
                /* does a combination of caps satisfy mask? */
                have |= cap->issued;
                if ((have & mask) == mask) {
-                       dout("__ceph_caps_issued_mask ino 0x%llx combo issued %s"
-                            " (mask %s)\n", ceph_ino(&ci->netfs.inode),
-                            ceph_cap_string(cap->issued),
-                            ceph_cap_string(mask));
+                       doutc(cl, "mask %p %llx.%llx combo issued %s (mask %s)\n",
+                             inode, ceph_vinop(inode),
+                             ceph_cap_string(cap->issued),
+                             ceph_cap_string(mask));
                        if (touch) {
                                struct rb_node *q;
 
@@ -922,7 +948,7 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
 int __ceph_caps_issued_mask_metric(struct ceph_inode_info *ci, int mask,
                                   int touch)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
        int r;
 
        r = __ceph_caps_issued_mask(ci, mask, touch);
@@ -954,13 +980,14 @@ int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
 int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
 {
        struct inode *inode = &ci->netfs.inode;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        int ret;
 
        spin_lock(&ci->i_ceph_lock);
        ret = __ceph_caps_revoking_other(ci, NULL, mask);
        spin_unlock(&ci->i_ceph_lock);
-       dout("ceph_caps_revoking %p %s = %d\n", inode,
-            ceph_cap_string(mask), ret);
+       doutc(cl, "%p %llx.%llx %s = %d\n", inode, ceph_vinop(inode),
+             ceph_cap_string(mask), ret);
        return ret;
 }
 
@@ -996,7 +1023,7 @@ int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
        const int WR_SHIFT = ffs(CEPH_FILE_MODE_WR);
        const int LAZY_SHIFT = ffs(CEPH_FILE_MODE_LAZY);
        struct ceph_mount_options *opt =
-               ceph_inode_to_client(&ci->netfs.inode)->mount_options;
+               ceph_inode_to_fs_client(&ci->netfs.inode)->mount_options;
        unsigned long used_cutoff = jiffies - opt->caps_wanted_delay_max * HZ;
        unsigned long idle_cutoff = jiffies - opt->caps_wanted_delay_min * HZ;
 
@@ -1107,21 +1134,23 @@ int ceph_is_any_caps(struct inode *inode)
 void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
 {
        struct ceph_mds_session *session = cap->session;
+       struct ceph_client *cl = session->s_mdsc->fsc->client;
        struct ceph_inode_info *ci = cap->ci;
+       struct inode *inode = &ci->netfs.inode;
        struct ceph_mds_client *mdsc;
        int removed = 0;
 
        /* 'ci' being NULL means the remove have already occurred */
        if (!ci) {
-               dout("%s: cap inode is NULL\n", __func__);
+               doutc(cl, "inode is NULL\n");
                return;
        }
 
        lockdep_assert_held(&ci->i_ceph_lock);
 
-       dout("__ceph_remove_cap %p from %p\n", cap, &ci->netfs.inode);
+       doutc(cl, "%p from %p %llx.%llx\n", cap, inode, ceph_vinop(inode));
 
-       mdsc = ceph_inode_to_client(&ci->netfs.inode)->mdsc;
+       mdsc = ceph_inode_to_fs_client(&ci->netfs.inode)->mdsc;
 
        /* remove from inode's cap rbtree, and clear auth cap */
        rb_erase(&cap->ci_node, &ci->i_caps);
@@ -1132,8 +1161,8 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
        spin_lock(&session->s_cap_lock);
        if (session->s_cap_iterator == cap) {
                /* not yet, we are iterating over this very cap */
-               dout("__ceph_remove_cap  delaying %p removal from session %p\n",
-                    cap, cap->session);
+               doutc(cl, "delaying %p removal from session %p\n", cap,
+                     cap->session);
        } else {
                list_del_init(&cap->session_caps);
                session->s_nr_caps--;
@@ -1178,20 +1207,21 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
        }
 }
 
-void ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
+void ceph_remove_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
+                    bool queue_release)
 {
        struct ceph_inode_info *ci = cap->ci;
        struct ceph_fs_client *fsc;
 
        /* 'ci' being NULL means the remove have already occurred */
        if (!ci) {
-               dout("%s: cap inode is NULL\n", __func__);
+               doutc(mdsc->fsc->client, "inode is NULL\n");
                return;
        }
 
        lockdep_assert_held(&ci->i_ceph_lock);
 
-       fsc = ceph_inode_to_client(&ci->netfs.inode);
+       fsc = ceph_inode_to_fs_client(&ci->netfs.inode);
        WARN_ON_ONCE(ci->i_auth_cap == cap &&
                     !list_empty(&ci->i_dirty_item) &&
                     !fsc->blocklisted &&
@@ -1227,15 +1257,19 @@ static void encode_cap_msg(struct ceph_msg *msg, struct cap_msg_args *arg)
 {
        struct ceph_mds_caps *fc;
        void *p;
-       struct ceph_osd_client *osdc = &arg->session->s_mdsc->fsc->client->osdc;
-
-       dout("%s %s %llx %llx caps %s wanted %s dirty %s seq %u/%u tid %llu/%llu mseq %u follows %lld size %llu/%llu xattr_ver %llu xattr_len %d\n",
-            __func__, ceph_cap_op_name(arg->op), arg->cid, arg->ino,
-            ceph_cap_string(arg->caps), ceph_cap_string(arg->wanted),
-            ceph_cap_string(arg->dirty), arg->seq, arg->issue_seq,
-            arg->flush_tid, arg->oldest_flush_tid, arg->mseq, arg->follows,
-            arg->size, arg->max_size, arg->xattr_version,
-            arg->xattr_buf ? (int)arg->xattr_buf->vec.iov_len : 0);
+       struct ceph_mds_client *mdsc = arg->session->s_mdsc;
+       struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc;
+
+       doutc(mdsc->fsc->client,
+             "%s %llx %llx caps %s wanted %s dirty %s seq %u/%u"
+             " tid %llu/%llu mseq %u follows %lld size %llu/%llu"
+             " xattr_ver %llu xattr_len %d\n",
+             ceph_cap_op_name(arg->op), arg->cid, arg->ino,
+             ceph_cap_string(arg->caps), ceph_cap_string(arg->wanted),
+             ceph_cap_string(arg->dirty), arg->seq, arg->issue_seq,
+             arg->flush_tid, arg->oldest_flush_tid, arg->mseq, arg->follows,
+             arg->size, arg->max_size, arg->xattr_version,
+             arg->xattr_buf ? (int)arg->xattr_buf->vec.iov_len : 0);
 
        msg->hdr.version = cpu_to_le16(12);
        msg->hdr.tid = cpu_to_le64(arg->flush_tid);
@@ -1342,6 +1376,8 @@ static void encode_cap_msg(struct ceph_msg *msg, struct cap_msg_args *arg)
  */
 void __ceph_remove_caps(struct ceph_inode_info *ci)
 {
+       struct inode *inode = &ci->netfs.inode;
+       struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
        struct rb_node *p;
 
        /* lock i_ceph_lock, because ceph_d_revalidate(..., LOOKUP_RCU)
@@ -1351,7 +1387,7 @@ void __ceph_remove_caps(struct ceph_inode_info *ci)
        while (p) {
                struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
                p = rb_next(p);
-               ceph_remove_cap(cap, true);
+               ceph_remove_cap(mdsc, cap, true);
        }
        spin_unlock(&ci->i_ceph_lock);
 }
@@ -1370,6 +1406,7 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
 {
        struct ceph_inode_info *ci = cap->ci;
        struct inode *inode = &ci->netfs.inode;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        int held, revoking;
 
        lockdep_assert_held(&ci->i_ceph_lock);
@@ -1378,10 +1415,10 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap,
        revoking = cap->implemented & ~cap->issued;
        retain &= ~revoking;
 
-       dout("%s %p cap %p session %p %s -> %s (revoking %s)\n",
-            __func__, inode, cap, cap->session,
-            ceph_cap_string(held), ceph_cap_string(held & retain),
-            ceph_cap_string(revoking));
+       doutc(cl, "%p %llx.%llx cap %p session %p %s -> %s (revoking %s)\n",
+             inode, ceph_vinop(inode), cap, cap->session,
+             ceph_cap_string(held), ceph_cap_string(held & retain),
+             ceph_cap_string(revoking));
        BUG_ON((retain & CEPH_CAP_PIN) == 0);
 
        ci->i_ceph_flags &= ~CEPH_I_FLUSH;
@@ -1497,13 +1534,16 @@ static void __send_cap(struct cap_msg_args *arg, struct ceph_inode_info *ci)
 {
        struct ceph_msg *msg;
        struct inode *inode = &ci->netfs.inode;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
 
        msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, cap_msg_size(arg), GFP_NOFS,
                           false);
        if (!msg) {
-               pr_err("error allocating cap msg: ino (%llx.%llx) flushing %s tid %llu, requeuing cap.\n",
-                      ceph_vinop(inode), ceph_cap_string(arg->dirty),
-                      arg->flush_tid);
+               pr_err_client(cl,
+                             "error allocating cap msg: ino (%llx.%llx)"
+                             " flushing %s tid %llu, requeuing cap.\n",
+                             ceph_vinop(inode), ceph_cap_string(arg->dirty),
+                             arg->flush_tid);
                spin_lock(&ci->i_ceph_lock);
                __cap_delay_requeue(arg->session->s_mdsc, ci);
                spin_unlock(&ci->i_ceph_lock);
@@ -1592,11 +1632,13 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci,
 {
        struct inode *inode = &ci->netfs.inode;
        struct ceph_mds_client *mdsc = session->s_mdsc;
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_cap_snap *capsnap;
        u64 oldest_flush_tid = 0;
        u64 first_tid = 1, last_tid = 0;
 
-       dout("__flush_snaps %p session %p\n", inode, session);
+       doutc(cl, "%p %llx.%llx session %p\n", inode, ceph_vinop(inode),
+             session);
 
        list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
                /*
@@ -1611,7 +1653,7 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci,
 
                /* only flush each capsnap once */
                if (capsnap->cap_flush.tid > 0) {
-                       dout(" already flushed %p, skipping\n", capsnap);
+                       doutc(cl, "already flushed %p, skipping\n", capsnap);
                        continue;
                }
 
@@ -1643,8 +1685,8 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci,
                int ret;
 
                if (!(cap && cap->session == session)) {
-                       dout("__flush_snaps %p auth cap %p not mds%d, "
-                            "stop\n", inode, cap, session->s_mds);
+                       doutc(cl, "%p %llx.%llx auth cap %p not mds%d, stop\n",
+                             inode, ceph_vinop(inode), cap, session->s_mds);
                        break;
                }
 
@@ -1665,15 +1707,17 @@ static void __ceph_flush_snaps(struct ceph_inode_info *ci,
                refcount_inc(&capsnap->nref);
                spin_unlock(&ci->i_ceph_lock);
 
-               dout("__flush_snaps %p capsnap %p tid %llu %s\n",
-                    inode, capsnap, cf->tid, ceph_cap_string(capsnap->dirty));
+               doutc(cl, "%p %llx.%llx capsnap %p tid %llu %s\n", inode,
+                     ceph_vinop(inode), capsnap, cf->tid,
+                     ceph_cap_string(capsnap->dirty));
 
                ret = __send_flush_snap(inode, session, capsnap, cap->mseq,
                                        oldest_flush_tid);
                if (ret < 0) {
-                       pr_err("__flush_snaps: error sending cap flushsnap, "
-                              "ino (%llx.%llx) tid %llu follows %llu\n",
-                               ceph_vinop(inode), cf->tid, capsnap->follows);
+                       pr_err_client(cl, "error sending cap flushsnap, "
+                                     "ino (%llx.%llx) tid %llu follows %llu\n",
+                                     ceph_vinop(inode), cf->tid,
+                                     capsnap->follows);
                }
 
                ceph_put_cap_snap(capsnap);
@@ -1685,28 +1729,29 @@ void ceph_flush_snaps(struct ceph_inode_info *ci,
                      struct ceph_mds_session **psession)
 {
        struct inode *inode = &ci->netfs.inode;
-       struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_mds_session *session = NULL;
        bool need_put = false;
        int mds;
 
-       dout("ceph_flush_snaps %p\n", inode);
+       doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
        if (psession)
                session = *psession;
 retry:
        spin_lock(&ci->i_ceph_lock);
        if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) {
-               dout(" no capsnap needs flush, doing nothing\n");
+               doutc(cl, " no capsnap needs flush, doing nothing\n");
                goto out;
        }
        if (!ci->i_auth_cap) {
-               dout(" no auth cap (migrating?), doing nothing\n");
+               doutc(cl, " no auth cap (migrating?), doing nothing\n");
                goto out;
        }
 
        mds = ci->i_auth_cap->session->s_mds;
        if (session && session->s_mds != mds) {
-               dout(" oops, wrong session %p mutex\n", session);
+               doutc(cl, " oops, wrong session %p mutex\n", session);
                ceph_put_mds_session(session);
                session = NULL;
        }
@@ -1750,23 +1795,25 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
                           struct ceph_cap_flush **pcf)
 {
        struct ceph_mds_client *mdsc =
-               ceph_sb_to_client(ci->netfs.inode.i_sb)->mdsc;
+               ceph_sb_to_fs_client(ci->netfs.inode.i_sb)->mdsc;
        struct inode *inode = &ci->netfs.inode;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        int was = ci->i_dirty_caps;
        int dirty = 0;
 
        lockdep_assert_held(&ci->i_ceph_lock);
 
        if (!ci->i_auth_cap) {
-               pr_warn("__mark_dirty_caps %p %llx mask %s, "
-                       "but no auth cap (session was closed?)\n",
-                       inode, ceph_ino(inode), ceph_cap_string(mask));
+               pr_warn_client(cl, "%p %llx.%llx mask %s, "
+                              "but no auth cap (session was closed?)\n",
+                               inode, ceph_vinop(inode),
+                               ceph_cap_string(mask));
                return 0;
        }
 
-       dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->netfs.inode,
-            ceph_cap_string(mask), ceph_cap_string(was),
-            ceph_cap_string(was | mask));
+       doutc(cl, "%p %llx.%llx %s dirty %s -> %s\n", inode,
+             ceph_vinop(inode), ceph_cap_string(mask),
+             ceph_cap_string(was), ceph_cap_string(was | mask));
        ci->i_dirty_caps |= mask;
        if (was == 0) {
                struct ceph_mds_session *session = ci->i_auth_cap->session;
@@ -1779,8 +1826,9 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
                        ci->i_head_snapc = ceph_get_snap_context(
                                ci->i_snap_realm->cached_context);
                }
-               dout(" inode %p now dirty snapc %p auth cap %p\n",
-                    &ci->netfs.inode, ci->i_head_snapc, ci->i_auth_cap);
+               doutc(cl, "%p %llx.%llx now dirty snapc %p auth cap %p\n",
+                     inode, ceph_vinop(inode), ci->i_head_snapc,
+                     ci->i_auth_cap);
                BUG_ON(!list_empty(&ci->i_dirty_item));
                spin_lock(&mdsc->cap_dirty_lock);
                list_add(&ci->i_dirty_item, &session->s_cap_dirty);
@@ -1873,7 +1921,8 @@ static u64 __mark_caps_flushing(struct inode *inode,
                                struct ceph_mds_session *session, bool wake,
                                u64 *oldest_flush_tid)
 {
-       struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_cap_flush *cf = NULL;
        int flushing;
@@ -1884,13 +1933,13 @@ static u64 __mark_caps_flushing(struct inode *inode,
        BUG_ON(!ci->i_prealloc_cap_flush);
 
        flushing = ci->i_dirty_caps;
-       dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
-            ceph_cap_string(flushing),
-            ceph_cap_string(ci->i_flushing_caps),
-            ceph_cap_string(ci->i_flushing_caps | flushing));
+       doutc(cl, "flushing %s, flushing_caps %s -> %s\n",
+             ceph_cap_string(flushing),
+             ceph_cap_string(ci->i_flushing_caps),
+             ceph_cap_string(ci->i_flushing_caps | flushing));
        ci->i_flushing_caps |= flushing;
        ci->i_dirty_caps = 0;
-       dout(" inode %p now !dirty\n", inode);
+       doutc(cl, "%p %llx.%llx now !dirty\n", inode, ceph_vinop(inode));
 
        swap(cf, ci->i_prealloc_cap_flush);
        cf->caps = flushing;
@@ -1921,6 +1970,7 @@ static int try_nonblocking_invalidate(struct inode *inode)
        __releases(ci->i_ceph_lock)
        __acquires(ci->i_ceph_lock)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
        u32 invalidating_gen = ci->i_rdcache_gen;
 
@@ -1932,12 +1982,13 @@ static int try_nonblocking_invalidate(struct inode *inode)
        if (inode->i_data.nrpages == 0 &&
            invalidating_gen == ci->i_rdcache_gen) {
                /* success. */
-               dout("try_nonblocking_invalidate %p success\n", inode);
+               doutc(cl, "%p %llx.%llx success\n", inode,
+                     ceph_vinop(inode));
                /* save any racing async invalidate some trouble */
                ci->i_rdcache_revoking = ci->i_rdcache_gen - 1;
                return 0;
        }
-       dout("try_nonblocking_invalidate %p failed\n", inode);
+       doutc(cl, "%p %llx.%llx failed\n", inode, ceph_vinop(inode));
        return -1;
 }
 
@@ -1969,6 +2020,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags)
 {
        struct inode *inode = &ci->netfs.inode;
        struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_cap *cap;
        u64 flush_tid, oldest_flush_tid;
        int file_wanted, used, cap_used;
@@ -2043,9 +2095,9 @@ retry:
                }
        }
 
-       dout("check_caps %llx.%llx file_want %s used %s dirty %s flushing %s"
-            " issued %s revoking %s retain %s %s%s%s\n", ceph_vinop(inode),
-            ceph_cap_string(file_wanted),
+       doutc(cl, "%p %llx.%llx file_want %s used %s dirty %s "
+             "flushing %s issued %s revoking %s retain %s %s%s%s\n",
+            inode, ceph_vinop(inode), ceph_cap_string(file_wanted),
             ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
             ceph_cap_string(ci->i_flushing_caps),
             ceph_cap_string(issued), ceph_cap_string(revoking),
@@ -2066,10 +2118,10 @@ retry:
            (revoking & (CEPH_CAP_FILE_CACHE|
                         CEPH_CAP_FILE_LAZYIO)) && /*  or revoking cache */
            !tried_invalidate) {
-               dout("check_caps trying to invalidate on %llx.%llx\n",
-                    ceph_vinop(inode));
+               doutc(cl, "trying to invalidate on %p %llx.%llx\n",
+                     inode, ceph_vinop(inode));
                if (try_nonblocking_invalidate(inode) < 0) {
-                       dout("check_caps queuing invalidate\n");
+                       doutc(cl, "queuing invalidate\n");
                        queue_invalidate = true;
                        ci->i_rdcache_revoking = ci->i_rdcache_gen;
                }
@@ -2097,35 +2149,35 @@ retry:
                        cap_used &= ~ci->i_auth_cap->issued;
 
                revoking = cap->implemented & ~cap->issued;
-               dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n",
-                    cap->mds, cap, ceph_cap_string(cap_used),
-                    ceph_cap_string(cap->issued),
-                    ceph_cap_string(cap->implemented),
-                    ceph_cap_string(revoking));
+               doutc(cl, " mds%d cap %p used %s issued %s implemented %s revoking %s\n",
+                     cap->mds, cap, ceph_cap_string(cap_used),
+                     ceph_cap_string(cap->issued),
+                     ceph_cap_string(cap->implemented),
+                     ceph_cap_string(revoking));
 
                if (cap == ci->i_auth_cap &&
                    (cap->issued & CEPH_CAP_FILE_WR)) {
                        /* request larger max_size from MDS? */
                        if (ci->i_wanted_max_size > ci->i_max_size &&
                            ci->i_wanted_max_size > ci->i_requested_max_size) {
-                               dout("requesting new max_size\n");
+                               doutc(cl, "requesting new max_size\n");
                                goto ack;
                        }
 
                        /* approaching file_max? */
                        if (__ceph_should_report_size(ci)) {
-                               dout("i_size approaching max_size\n");
+                               doutc(cl, "i_size approaching max_size\n");
                                goto ack;
                        }
                }
                /* flush anything dirty? */
                if (cap == ci->i_auth_cap) {
                        if ((flags & CHECK_CAPS_FLUSH) && ci->i_dirty_caps) {
-                               dout("flushing dirty caps\n");
+                               doutc(cl, "flushing dirty caps\n");
                                goto ack;
                        }
                        if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS) {
-                               dout("flushing snap caps\n");
+                               doutc(cl, "flushing snap caps\n");
                                goto ack;
                        }
                }
@@ -2133,7 +2185,7 @@ retry:
                /* completed revocation? going down and there are no caps? */
                if (revoking) {
                        if ((revoking & cap_used) == 0) {
-                               dout("completed revocation of %s\n",
+                               doutc(cl, "completed revocation of %s\n",
                                      ceph_cap_string(cap->implemented & ~cap->issued));
                                goto ack;
                        }
@@ -2232,7 +2284,7 @@ ack:
  */
 static int try_flush_caps(struct inode *inode, u64 *ptid)
 {
-       struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
        struct ceph_inode_info *ci = ceph_inode(inode);
        int flushing = 0;
        u64 flush_tid = 0, oldest_flush_tid = 0;
@@ -2310,7 +2362,8 @@ static int caps_are_flushed(struct inode *inode, u64 flush_tid)
  */
 static int flush_mdlog_and_wait_inode_unsafe_requests(struct inode *inode)
 {
-       struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_mds_request *req1 = NULL, *req2 = NULL;
        int ret, err = 0;
@@ -2400,8 +2453,9 @@ static int flush_mdlog_and_wait_inode_unsafe_requests(struct inode *inode)
                kfree(sessions);
        }
 
-       dout("%s %p wait on tid %llu %llu\n", __func__,
-            inode, req1 ? req1->r_tid : 0ULL, req2 ? req2->r_tid : 0ULL);
+       doutc(cl, "%p %llx.%llx wait on tid %llu %llu\n", inode,
+             ceph_vinop(inode), req1 ? req1->r_tid : 0ULL,
+             req2 ? req2->r_tid : 0ULL);
        if (req1) {
                ret = !wait_for_completion_timeout(&req1->r_safe_completion,
                                        ceph_timeout_jiffies(req1->r_timeout));
@@ -2427,11 +2481,13 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
        struct inode *inode = file->f_mapping->host;
        struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        u64 flush_tid;
        int ret, err;
        int dirty;
 
-       dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
+       doutc(cl, "%p %llx.%llx%s\n", inode, ceph_vinop(inode),
+             datasync ? " datasync" : "");
 
        ret = file_write_and_wait_range(file, start, end);
        if (datasync)
@@ -2442,7 +2498,7 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
                goto out;
 
        dirty = try_flush_caps(inode, &flush_tid);
-       dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
+       doutc(cl, "dirty caps are %s\n", ceph_cap_string(dirty));
 
        err = flush_mdlog_and_wait_inode_unsafe_requests(inode);
 
@@ -2463,7 +2519,8 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
        if (err < 0)
                ret = err;
 out:
-       dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
+       doutc(cl, "%p %llx.%llx%s result=%d\n", inode, ceph_vinop(inode),
+             datasync ? " datasync" : "", ret);
        return ret;
 }
 
@@ -2476,12 +2533,13 @@ out:
 int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        u64 flush_tid;
        int err = 0;
        int dirty;
        int wait = (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync);
 
-       dout("write_inode %p wait=%d\n", inode, wait);
+       doutc(cl, "%p %llx.%llx wait=%d\n", inode, ceph_vinop(inode), wait);
        ceph_fscache_unpin_writeback(inode, wbc);
        if (wait) {
                err = ceph_wait_on_async_create(inode);
@@ -2493,7 +2551,7 @@ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
                                       caps_are_flushed(inode, flush_tid));
        } else {
                struct ceph_mds_client *mdsc =
-                       ceph_sb_to_client(inode->i_sb)->mdsc;
+                       ceph_sb_to_fs_client(inode->i_sb)->mdsc;
 
                spin_lock(&ci->i_ceph_lock);
                if (__ceph_caps_dirty(ci))
@@ -2511,6 +2569,7 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
        __acquires(ci->i_ceph_lock)
 {
        struct inode *inode = &ci->netfs.inode;
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_cap *cap;
        struct ceph_cap_flush *cf;
        int ret;
@@ -2536,8 +2595,8 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
 
                cap = ci->i_auth_cap;
                if (!(cap && cap->session == session)) {
-                       pr_err("%p auth cap %p not mds%d ???\n",
-                              inode, cap, session->s_mds);
+                       pr_err_client(cl, "%p auth cap %p not mds%d ???\n",
+                                     inode, cap, session->s_mds);
                        break;
                }
 
@@ -2546,8 +2605,9 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
                if (!cf->is_capsnap) {
                        struct cap_msg_args arg;
 
-                       dout("kick_flushing_caps %p cap %p tid %llu %s\n",
-                            inode, cap, cf->tid, ceph_cap_string(cf->caps));
+                       doutc(cl, "%p %llx.%llx cap %p tid %llu %s\n",
+                             inode, ceph_vinop(inode), cap, cf->tid,
+                             ceph_cap_string(cf->caps));
                        __prep_cap(&arg, cap, CEPH_CAP_OP_FLUSH,
                                         (cf->tid < last_snap_flush ?
                                          CEPH_CLIENT_CAPS_PENDING_CAPSNAP : 0),
@@ -2561,9 +2621,9 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
                        struct ceph_cap_snap *capsnap =
                                        container_of(cf, struct ceph_cap_snap,
                                                    cap_flush);
-                       dout("kick_flushing_caps %p capsnap %p tid %llu %s\n",
-                            inode, capsnap, cf->tid,
-                            ceph_cap_string(capsnap->dirty));
+                       doutc(cl, "%p %llx.%llx capsnap %p tid %llu %s\n",
+                             inode, ceph_vinop(inode), capsnap, cf->tid,
+                             ceph_cap_string(capsnap->dirty));
 
                        refcount_inc(&capsnap->nref);
                        spin_unlock(&ci->i_ceph_lock);
@@ -2571,11 +2631,10 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
                        ret = __send_flush_snap(inode, session, capsnap, cap->mseq,
                                                oldest_flush_tid);
                        if (ret < 0) {
-                               pr_err("kick_flushing_caps: error sending "
-                                       "cap flushsnap, ino (%llx.%llx) "
-                                       "tid %llu follows %llu\n",
-                                       ceph_vinop(inode), cf->tid,
-                                       capsnap->follows);
+                               pr_err_client(cl, "error sending cap flushsnap,"
+                                             " %p %llx.%llx tid %llu follows %llu\n",
+                                             inode, ceph_vinop(inode), cf->tid,
+                                             capsnap->follows);
                        }
 
                        ceph_put_cap_snap(capsnap);
@@ -2588,22 +2647,26 @@ static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
 void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
                                   struct ceph_mds_session *session)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_inode_info *ci;
        struct ceph_cap *cap;
        u64 oldest_flush_tid;
 
-       dout("early_kick_flushing_caps mds%d\n", session->s_mds);
+       doutc(cl, "mds%d\n", session->s_mds);
 
        spin_lock(&mdsc->cap_dirty_lock);
        oldest_flush_tid = __get_oldest_flush_tid(mdsc);
        spin_unlock(&mdsc->cap_dirty_lock);
 
        list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
+               struct inode *inode = &ci->netfs.inode;
+
                spin_lock(&ci->i_ceph_lock);
                cap = ci->i_auth_cap;
                if (!(cap && cap->session == session)) {
-                       pr_err("%p auth cap %p not mds%d ???\n",
-                               &ci->netfs.inode, cap, session->s_mds);
+                       pr_err_client(cl, "%p %llx.%llx auth cap %p not mds%d ???\n",
+                                     inode, ceph_vinop(inode), cap,
+                                     session->s_mds);
                        spin_unlock(&ci->i_ceph_lock);
                        continue;
                }
@@ -2636,24 +2699,28 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
 void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
                             struct ceph_mds_session *session)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_inode_info *ci;
        struct ceph_cap *cap;
        u64 oldest_flush_tid;
 
        lockdep_assert_held(&session->s_mutex);
 
-       dout("kick_flushing_caps mds%d\n", session->s_mds);
+       doutc(cl, "mds%d\n", session->s_mds);
 
        spin_lock(&mdsc->cap_dirty_lock);
        oldest_flush_tid = __get_oldest_flush_tid(mdsc);
        spin_unlock(&mdsc->cap_dirty_lock);
 
        list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
+               struct inode *inode = &ci->netfs.inode;
+
                spin_lock(&ci->i_ceph_lock);
                cap = ci->i_auth_cap;
                if (!(cap && cap->session == session)) {
-                       pr_err("%p auth cap %p not mds%d ???\n",
-                               &ci->netfs.inode, cap, session->s_mds);
+                       pr_err_client(cl, "%p %llx.%llx auth cap %p not mds%d ???\n",
+                                     inode, ceph_vinop(inode), cap,
+                                     session->s_mds);
                        spin_unlock(&ci->i_ceph_lock);
                        continue;
                }
@@ -2670,11 +2737,13 @@ void ceph_kick_flushing_inode_caps(struct ceph_mds_session *session,
 {
        struct ceph_mds_client *mdsc = session->s_mdsc;
        struct ceph_cap *cap = ci->i_auth_cap;
+       struct inode *inode = &ci->netfs.inode;
 
        lockdep_assert_held(&ci->i_ceph_lock);
 
-       dout("%s %p flushing %s\n", __func__, &ci->netfs.inode,
-            ceph_cap_string(ci->i_flushing_caps));
+       doutc(mdsc->fsc->client, "%p %llx.%llx flushing %s\n",
+             inode, ceph_vinop(inode),
+             ceph_cap_string(ci->i_flushing_caps));
 
        if (!list_empty(&ci->i_cap_flush_list)) {
                u64 oldest_flush_tid;
@@ -2696,6 +2765,9 @@ void ceph_kick_flushing_inode_caps(struct ceph_mds_session *session,
 void ceph_take_cap_refs(struct ceph_inode_info *ci, int got,
                            bool snap_rwsem_locked)
 {
+       struct inode *inode = &ci->netfs.inode;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
+
        lockdep_assert_held(&ci->i_ceph_lock);
 
        if (got & CEPH_CAP_PIN)
@@ -2716,10 +2788,10 @@ void ceph_take_cap_refs(struct ceph_inode_info *ci, int got,
        }
        if (got & CEPH_CAP_FILE_BUFFER) {
                if (ci->i_wb_ref == 0)
-                       ihold(&ci->netfs.inode);
+                       ihold(inode);
                ci->i_wb_ref++;
-               dout("%s %p wb %d -> %d (?)\n", __func__,
-                    &ci->netfs.inode, ci->i_wb_ref-1, ci->i_wb_ref);
+               doutc(cl, "%p %llx.%llx wb %d -> %d (?)\n", inode,
+                     ceph_vinop(inode), ci->i_wb_ref-1, ci->i_wb_ref);
        }
 }
 
@@ -2746,20 +2818,23 @@ static int try_get_cap_refs(struct inode *inode, int need, int want,
                            loff_t endoff, int flags, int *got)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        int ret = 0;
        int have, implemented;
        bool snap_rwsem_locked = false;
 
-       dout("get_cap_refs %p need %s want %s\n", inode,
-            ceph_cap_string(need), ceph_cap_string(want));
+       doutc(cl, "%p %llx.%llx need %s want %s\n", inode,
+             ceph_vinop(inode), ceph_cap_string(need),
+             ceph_cap_string(want));
 
 again:
        spin_lock(&ci->i_ceph_lock);
 
        if ((flags & CHECK_FILELOCK) &&
            (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK)) {
-               dout("try_get_cap_refs %p error filelock\n", inode);
+               doutc(cl, "%p %llx.%llx error filelock\n", inode,
+                     ceph_vinop(inode));
                ret = -EIO;
                goto out_unlock;
        }
@@ -2779,8 +2854,8 @@ again:
 
        if (have & need & CEPH_CAP_FILE_WR) {
                if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
-                       dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
-                            inode, endoff, ci->i_max_size);
+                       doutc(cl, "%p %llx.%llx endoff %llu > maxsize %llu\n",
+                             inode, ceph_vinop(inode), endoff, ci->i_max_size);
                        if (endoff > ci->i_requested_max_size)
                                ret = ci->i_auth_cap ? -EFBIG : -EUCLEAN;
                        goto out_unlock;
@@ -2790,7 +2865,8 @@ again:
                 * can get a final snapshot value for size+mtime.
                 */
                if (__ceph_have_pending_cap_snap(ci)) {
-                       dout("get_cap_refs %p cap_snap_pending\n", inode);
+                       doutc(cl, "%p %llx.%llx cap_snap_pending\n", inode,
+                             ceph_vinop(inode));
                        goto out_unlock;
                }
        }
@@ -2808,9 +2884,9 @@ again:
                int not = want & ~(have & need);
                int revoking = implemented & ~have;
                int exclude = revoking & not;
-               dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
-                    inode, ceph_cap_string(have), ceph_cap_string(not),
-                    ceph_cap_string(revoking));
+               doutc(cl, "%p %llx.%llx have %s but not %s (revoking %s)\n",
+                     inode, ceph_vinop(inode), ceph_cap_string(have),
+                     ceph_cap_string(not), ceph_cap_string(revoking));
                if (!exclude || !(exclude & CEPH_CAP_FILE_BUFFER)) {
                        if (!snap_rwsem_locked &&
                            !ci->i_head_snapc &&
@@ -2850,28 +2926,31 @@ again:
                        spin_unlock(&s->s_cap_lock);
                }
                if (session_readonly) {
-                       dout("get_cap_refs %p need %s but mds%d readonly\n",
-                            inode, ceph_cap_string(need), ci->i_auth_cap->mds);
+                       doutc(cl, "%p %llx.%llx need %s but mds%d readonly\n",
+                             inode, ceph_vinop(inode), ceph_cap_string(need),
+                             ci->i_auth_cap->mds);
                        ret = -EROFS;
                        goto out_unlock;
                }
 
                if (ceph_inode_is_shutdown(inode)) {
-                       dout("get_cap_refs %p inode is shutdown\n", inode);
+                       doutc(cl, "%p %llx.%llx inode is shutdown\n",
+                             inode, ceph_vinop(inode));
                        ret = -ESTALE;
                        goto out_unlock;
                }
                mds_wanted = __ceph_caps_mds_wanted(ci, false);
                if (need & ~mds_wanted) {
-                       dout("get_cap_refs %p need %s > mds_wanted %s\n",
-                            inode, ceph_cap_string(need),
-                            ceph_cap_string(mds_wanted));
+                       doutc(cl, "%p %llx.%llx need %s > mds_wanted %s\n",
+                             inode, ceph_vinop(inode), ceph_cap_string(need),
+                             ceph_cap_string(mds_wanted));
                        ret = -EUCLEAN;
                        goto out_unlock;
                }
 
-               dout("get_cap_refs %p have %s need %s\n", inode,
-                    ceph_cap_string(have), ceph_cap_string(need));
+               doutc(cl, "%p %llx.%llx have %s need %s\n", inode,
+                     ceph_vinop(inode), ceph_cap_string(have),
+                     ceph_cap_string(need));
        }
 out_unlock:
 
@@ -2886,8 +2965,8 @@ out_unlock:
        else if (ret == 1)
                ceph_update_cap_hit(&mdsc->metric);
 
-       dout("get_cap_refs %p ret %d got %s\n", inode,
-            ret, ceph_cap_string(*got));
+       doutc(cl, "%p %llx.%llx ret %d got %s\n", inode,
+             ceph_vinop(inode), ret, ceph_cap_string(*got));
        return ret;
 }
 
@@ -2899,13 +2978,14 @@ out_unlock:
 static void check_max_size(struct inode *inode, loff_t endoff)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        int check = 0;
 
        /* do we need to explicitly request a larger max_size? */
        spin_lock(&ci->i_ceph_lock);
        if (endoff >= ci->i_max_size && endoff > ci->i_wanted_max_size) {
-               dout("write %p at large endoff %llu, req max_size\n",
-                    inode, endoff);
+               doutc(cl, "write %p %llx.%llx at large endoff %llu, req max_size\n",
+                     inode, ceph_vinop(inode), endoff);
                ci->i_wanted_max_size = endoff;
        }
        /* duplicate ceph_check_caps()'s logic */
@@ -2964,7 +3044,7 @@ int __ceph_get_caps(struct inode *inode, struct ceph_file_info *fi, int need,
                    int want, loff_t endoff, int *got)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
        int ret, _got, flags;
 
        ret = ceph_pool_perm_check(inode, need);
@@ -3115,10 +3195,12 @@ void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
 static int ceph_try_drop_cap_snap(struct ceph_inode_info *ci,
                                  struct ceph_cap_snap *capsnap)
 {
+       struct inode *inode = &ci->netfs.inode;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
+
        if (!capsnap->need_flush &&
            !capsnap->writing && !capsnap->dirty_pages) {
-               dout("dropping cap_snap %p follows %llu\n",
-                    capsnap, capsnap->follows);
+               doutc(cl, "%p follows %llu\n", capsnap, capsnap->follows);
                BUG_ON(capsnap->cap_flush.tid > 0);
                ceph_put_snap_context(capsnap->context);
                if (!list_is_last(&capsnap->ci_item, &ci->i_cap_snaps))
@@ -3150,6 +3232,7 @@ static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
                                enum put_cap_refs_mode mode)
 {
        struct inode *inode = &ci->netfs.inode;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        int last = 0, put = 0, flushsnaps = 0, wake = 0;
        bool check_flushsnaps = false;
 
@@ -3172,8 +3255,8 @@ static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
                        put++;
                        check_flushsnaps = true;
                }
-               dout("put_cap_refs %p wb %d -> %d (?)\n",
-                    inode, ci->i_wb_ref+1, ci->i_wb_ref);
+               doutc(cl, "%p %llx.%llx wb %d -> %d (?)\n", inode,
+                     ceph_vinop(inode), ci->i_wb_ref+1, ci->i_wb_ref);
        }
        if (had & CEPH_CAP_FILE_WR) {
                if (--ci->i_wr_ref == 0) {
@@ -3213,8 +3296,8 @@ static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had,
        }
        spin_unlock(&ci->i_ceph_lock);
 
-       dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
-            last ? " last" : "", put ? " put" : "");
+       doutc(cl, "%p %llx.%llx had %s%s%s\n", inode, ceph_vinop(inode),
+             ceph_cap_string(had), last ? " last" : "", put ? " put" : "");
 
        switch (mode) {
        case PUT_CAP_REFS_SYNC:
@@ -3264,6 +3347,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
                                struct ceph_snap_context *snapc)
 {
        struct inode *inode = &ci->netfs.inode;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_cap_snap *capsnap = NULL, *iter;
        int put = 0;
        bool last = false;
@@ -3287,11 +3371,10 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
                        ceph_put_snap_context(ci->i_head_snapc);
                        ci->i_head_snapc = NULL;
                }
-               dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
-                    inode,
-                    ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
-                    ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
-                    last ? " LAST" : "");
+               doutc(cl, "on %p %llx.%llx head %d/%d -> %d/%d %s\n",
+                     inode, ceph_vinop(inode), ci->i_wrbuffer_ref+nr,
+                     ci->i_wrbuffer_ref_head+nr, ci->i_wrbuffer_ref,
+                     ci->i_wrbuffer_ref_head, last ? " LAST" : "");
        } else {
                list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) {
                        if (iter->context == snapc) {
@@ -3321,13 +3404,12 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
                                }
                        }
                }
-               dout("put_wrbuffer_cap_refs on %p cap_snap %p "
-                    " snap %lld %d/%d -> %d/%d %s%s\n",
-                    inode, capsnap, capsnap->context->seq,
-                    ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
-                    ci->i_wrbuffer_ref, capsnap->dirty_pages,
-                    last ? " (wrbuffer last)" : "",
-                    complete_capsnap ? " (complete capsnap)" : "");
+               doutc(cl, "%p %llx.%llx cap_snap %p snap %lld %d/%d -> %d/%d %s%s\n",
+                     inode, ceph_vinop(inode), capsnap, capsnap->context->seq,
+                     ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
+                     ci->i_wrbuffer_ref, capsnap->dirty_pages,
+                     last ? " (wrbuffer last)" : "",
+                     complete_capsnap ? " (complete capsnap)" : "");
        }
 
 unlock:
@@ -3350,9 +3432,10 @@ unlock:
  */
 static void invalidate_aliases(struct inode *inode)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct dentry *dn, *prev = NULL;
 
-       dout("invalidate_aliases inode %p\n", inode);
+       doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
        d_prune_aliases(inode);
        /*
         * For non-directory inode, d_find_alias() only returns
@@ -3411,6 +3494,7 @@ static void handle_cap_grant(struct inode *inode,
        __releases(ci->i_ceph_lock)
        __releases(session->s_mdsc->snap_rwsem)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
        int seq = le32_to_cpu(grant->seq);
        int newcaps = le32_to_cpu(grant->caps);
@@ -3434,10 +3518,11 @@ static void handle_cap_grant(struct inode *inode,
        if (IS_ENCRYPTED(inode) && size)
                size = extra_info->fscrypt_file_size;
 
-       dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
-            inode, cap, session->s_mds, seq, ceph_cap_string(newcaps));
-       dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
-               i_size_read(inode));
+       doutc(cl, "%p %llx.%llx cap %p mds%d seq %d %s\n", inode,
+             ceph_vinop(inode), cap, session->s_mds, seq,
+             ceph_cap_string(newcaps));
+       doutc(cl, " size %llu max_size %llu, i_size %llu\n", size,
+             max_size, i_size_read(inode));
 
 
        /*
@@ -3497,15 +3582,17 @@ static void handle_cap_grant(struct inode *inode,
                inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(grant->uid));
                inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(grant->gid));
                ci->i_btime = extra_info->btime;
-               dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
-                    from_kuid(&init_user_ns, inode->i_uid),
-                    from_kgid(&init_user_ns, inode->i_gid));
+               doutc(cl, "%p %llx.%llx mode 0%o uid.gid %d.%d\n", inode,
+                     ceph_vinop(inode), inode->i_mode,
+                     from_kuid(&init_user_ns, inode->i_uid),
+                     from_kgid(&init_user_ns, inode->i_gid));
 #if IS_ENABLED(CONFIG_FS_ENCRYPTION)
                if (ci->fscrypt_auth_len != extra_info->fscrypt_auth_len ||
                    memcmp(ci->fscrypt_auth, extra_info->fscrypt_auth,
                           ci->fscrypt_auth_len))
-                       pr_warn_ratelimited("%s: cap grant attempt to change fscrypt_auth on non-I_NEW inode (old len %d new len %d)\n",
-                               __func__, ci->fscrypt_auth_len,
+                       pr_warn_ratelimited_client(cl,
+                               "cap grant attempt to change fscrypt_auth on non-I_NEW inode (old len %d new len %d)\n",
+                               ci->fscrypt_auth_len,
                                extra_info->fscrypt_auth_len);
 #endif
        }
@@ -3523,8 +3610,8 @@ static void handle_cap_grant(struct inode *inode,
                u64 version = le64_to_cpu(grant->xattr_version);
 
                if (version > ci->i_xattrs.version) {
-                       dout(" got new xattrs v%llu on %p len %d\n",
-                            version, inode, len);
+                       doutc(cl, " got new xattrs v%llu on %p %llx.%llx len %d\n",
+                             version, inode, ceph_vinop(inode), len);
                        if (ci->i_xattrs.blob)
                                ceph_buffer_put(ci->i_xattrs.blob);
                        ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
@@ -3575,8 +3662,8 @@ static void handle_cap_grant(struct inode *inode,
 
        if (ci->i_auth_cap == cap && (newcaps & CEPH_CAP_ANY_FILE_WR)) {
                if (max_size != ci->i_max_size) {
-                       dout("max_size %lld -> %llu\n",
-                            ci->i_max_size, max_size);
+                       doutc(cl, "max_size %lld -> %llu\n", ci->i_max_size,
+                             max_size);
                        ci->i_max_size = max_size;
                        if (max_size >= ci->i_wanted_max_size) {
                                ci->i_wanted_max_size = 0;  /* reset */
@@ -3590,10 +3677,9 @@ static void handle_cap_grant(struct inode *inode,
        wanted = __ceph_caps_wanted(ci);
        used = __ceph_caps_used(ci);
        dirty = __ceph_caps_dirty(ci);
-       dout(" my wanted = %s, used = %s, dirty %s\n",
-            ceph_cap_string(wanted),
-            ceph_cap_string(used),
-            ceph_cap_string(dirty));
+       doutc(cl, " my wanted = %s, used = %s, dirty %s\n",
+             ceph_cap_string(wanted), ceph_cap_string(used),
+             ceph_cap_string(dirty));
 
        if ((was_stale || le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) &&
            (wanted & ~(cap->mds_wanted | newcaps))) {
@@ -3614,10 +3700,9 @@ static void handle_cap_grant(struct inode *inode,
        if (cap->issued & ~newcaps) {
                int revoking = cap->issued & ~newcaps;
 
-               dout("revocation: %s -> %s (revoking %s)\n",
-                    ceph_cap_string(cap->issued),
-                    ceph_cap_string(newcaps),
-                    ceph_cap_string(revoking));
+               doutc(cl, "revocation: %s -> %s (revoking %s)\n",
+                     ceph_cap_string(cap->issued), ceph_cap_string(newcaps),
+                     ceph_cap_string(revoking));
                if (S_ISREG(inode->i_mode) &&
                    (revoking & used & CEPH_CAP_FILE_BUFFER))
                        writeback = true;  /* initiate writeback; will delay ack */
@@ -3635,11 +3720,12 @@ static void handle_cap_grant(struct inode *inode,
                cap->issued = newcaps;
                cap->implemented |= newcaps;
        } else if (cap->issued == newcaps) {
-               dout("caps unchanged: %s -> %s\n",
-                    ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
+               doutc(cl, "caps unchanged: %s -> %s\n",
+                     ceph_cap_string(cap->issued),
+                     ceph_cap_string(newcaps));
        } else {
-               dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
-                    ceph_cap_string(newcaps));
+               doutc(cl, "grant: %s -> %s\n", ceph_cap_string(cap->issued),
+                     ceph_cap_string(newcaps));
                /* non-auth MDS is revoking the newly grant caps ? */
                if (cap == ci->i_auth_cap &&
                    __ceph_caps_revoking_other(ci, cap, newcaps))
@@ -3727,7 +3813,8 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
        __releases(ci->i_ceph_lock)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_cap_flush *cf, *tmp_cf;
        LIST_HEAD(to_remove);
        unsigned seq = le32_to_cpu(m->seq);
@@ -3764,11 +3851,11 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
                }
        }
 
-       dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
-            " flushing %s -> %s\n",
-            inode, session->s_mds, seq, ceph_cap_string(dirty),
-            ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
-            ceph_cap_string(ci->i_flushing_caps & ~cleaned));
+       doutc(cl, "%p %llx.%llx mds%d seq %d on %s cleaned %s, flushing %s -> %s\n",
+             inode, ceph_vinop(inode), session->s_mds, seq,
+             ceph_cap_string(dirty), ceph_cap_string(cleaned),
+             ceph_cap_string(ci->i_flushing_caps),
+             ceph_cap_string(ci->i_flushing_caps & ~cleaned));
 
        if (list_empty(&to_remove) && !cleaned)
                goto out;
@@ -3784,18 +3871,21 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
                if (list_empty(&ci->i_cap_flush_list)) {
                        list_del_init(&ci->i_flushing_item);
                        if (!list_empty(&session->s_cap_flushing)) {
-                               dout(" mds%d still flushing cap on %p\n",
-                                    session->s_mds,
-                                    &list_first_entry(&session->s_cap_flushing,
-                                               struct ceph_inode_info,
-                                               i_flushing_item)->netfs.inode);
+                               struct inode *inode =
+                                           &list_first_entry(&session->s_cap_flushing,
+                                                             struct ceph_inode_info,
+                                                             i_flushing_item)->netfs.inode;
+                               doutc(cl, " mds%d still flushing cap on %p %llx.%llx\n",
+                                     session->s_mds, inode, ceph_vinop(inode));
                        }
                }
                mdsc->num_cap_flushing--;
-               dout(" inode %p now !flushing\n", inode);
+               doutc(cl, " %p %llx.%llx now !flushing\n", inode,
+                     ceph_vinop(inode));
 
                if (ci->i_dirty_caps == 0) {
-                       dout(" inode %p now clean\n", inode);
+                       doutc(cl, " %p %llx.%llx now clean\n", inode,
+                             ceph_vinop(inode));
                        BUG_ON(!list_empty(&ci->i_dirty_item));
                        drop = true;
                        if (ci->i_wr_ref == 0 &&
@@ -3833,12 +3923,14 @@ void __ceph_remove_capsnap(struct inode *inode, struct ceph_cap_snap *capsnap,
                           bool *wake_ci, bool *wake_mdsc)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+       struct ceph_client *cl = mdsc->fsc->client;
        bool ret;
 
        lockdep_assert_held(&ci->i_ceph_lock);
 
-       dout("removing capsnap %p, inode %p ci %p\n", capsnap, inode, ci);
+       doutc(cl, "removing capsnap %p, %p %llx.%llx ci %p\n", capsnap,
+             inode, ceph_vinop(inode), ci);
 
        list_del_init(&capsnap->ci_item);
        ret = __detach_cap_flush_from_ci(ci, &capsnap->cap_flush);
@@ -3877,29 +3969,31 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
                                     struct ceph_mds_session *session)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+       struct ceph_client *cl = mdsc->fsc->client;
        u64 follows = le64_to_cpu(m->snap_follows);
        struct ceph_cap_snap *capsnap = NULL, *iter;
        bool wake_ci = false;
        bool wake_mdsc = false;
 
-       dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
-            inode, ci, session->s_mds, follows);
+       doutc(cl, "%p %llx.%llx ci %p mds%d follows %lld\n", inode,
+             ceph_vinop(inode), ci, session->s_mds, follows);
 
        spin_lock(&ci->i_ceph_lock);
        list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) {
                if (iter->follows == follows) {
                        if (iter->cap_flush.tid != flush_tid) {
-                               dout(" cap_snap %p follows %lld tid %lld !="
-                                    " %lld\n", iter, follows,
-                                    flush_tid, iter->cap_flush.tid);
+                               doutc(cl, " cap_snap %p follows %lld "
+                                     "tid %lld != %lld\n", iter,
+                                     follows, flush_tid,
+                                     iter->cap_flush.tid);
                                break;
                        }
                        capsnap = iter;
                        break;
                } else {
-                       dout(" skipping cap_snap %p follows %lld\n",
-                            iter, iter->follows);
+                       doutc(cl, " skipping cap_snap %p follows %lld\n",
+                             iter, iter->follows);
                }
        }
        if (capsnap)
@@ -3928,6 +4022,7 @@ static bool handle_cap_trunc(struct inode *inode,
                             struct cap_extra_info *extra_info)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        int mds = session->s_mds;
        int seq = le32_to_cpu(trunc->seq);
        u32 truncate_seq = le32_to_cpu(trunc->truncate_seq);
@@ -3950,8 +4045,8 @@ static bool handle_cap_trunc(struct inode *inode,
        if (IS_ENCRYPTED(inode) && size)
                size = extra_info->fscrypt_file_size;
 
-       dout("%s inode %p mds%d seq %d to %lld truncate seq %d\n",
-            __func__, inode, mds, seq, truncate_size, truncate_seq);
+       doutc(cl, "%p %llx.%llx mds%d seq %d to %lld truncate seq %d\n",
+             inode, ceph_vinop(inode), mds, seq, truncate_size, truncate_seq);
        queue_trunc = ceph_fill_file_size(inode, issued,
                                          truncate_seq, truncate_size, size);
        return queue_trunc;
@@ -3969,7 +4064,8 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
                              struct ceph_mds_cap_peer *ph,
                              struct ceph_mds_session *session)
 {
-       struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_mds_session *tsession = NULL;
        struct ceph_cap *cap, *tcap, *new_cap = NULL;
        struct ceph_inode_info *ci = ceph_inode(inode);
@@ -3989,8 +4085,8 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
                target = -1;
        }
 
-       dout("handle_cap_export inode %p ci %p mds%d mseq %d target %d\n",
-            inode, ci, mds, mseq, target);
+       doutc(cl, "%p %llx.%llx ci %p mds%d mseq %d target %d\n",
+             inode, ceph_vinop(inode), ci, mds, mseq, target);
 retry:
        down_read(&mdsc->snap_rwsem);
        spin_lock(&ci->i_ceph_lock);
@@ -3999,7 +4095,7 @@ retry:
                goto out_unlock;
 
        if (target < 0) {
-               ceph_remove_cap(cap, false);
+               ceph_remove_cap(mdsc, cap, false);
                goto out_unlock;
        }
 
@@ -4010,12 +4106,13 @@ retry:
 
        issued = cap->issued;
        if (issued != cap->implemented)
-               pr_err_ratelimited("handle_cap_export: issued != implemented: "
-                               "ino (%llx.%llx) mds%d seq %d mseq %d "
-                               "issued %s implemented %s\n",
-                               ceph_vinop(inode), mds, cap->seq, cap->mseq,
-                               ceph_cap_string(issued),
-                               ceph_cap_string(cap->implemented));
+               pr_err_ratelimited_client(cl, "issued != implemented: "
+                                         "%p %llx.%llx mds%d seq %d mseq %d"
+                                         " issued %s implemented %s\n",
+                                         inode, ceph_vinop(inode), mds,
+                                         cap->seq, cap->mseq,
+                                         ceph_cap_string(issued),
+                                         ceph_cap_string(cap->implemented));
 
 
        tcap = __get_cap_for_mds(ci, target);
@@ -4023,7 +4120,8 @@ retry:
                /* already have caps from the target */
                if (tcap->cap_id == t_cap_id &&
                    ceph_seq_cmp(tcap->seq, t_seq) < 0) {
-                       dout(" updating import cap %p mds%d\n", tcap, target);
+                       doutc(cl, " updating import cap %p mds%d\n", tcap,
+                             target);
                        tcap->cap_id = t_cap_id;
                        tcap->seq = t_seq - 1;
                        tcap->issue_seq = t_seq - 1;
@@ -4034,7 +4132,7 @@ retry:
                                change_auth_cap_ses(ci, tcap->session);
                        }
                }
-               ceph_remove_cap(cap, false);
+               ceph_remove_cap(mdsc, cap, false);
                goto out_unlock;
        } else if (tsession) {
                /* add placeholder for the export tagert */
@@ -4051,7 +4149,7 @@ retry:
                        spin_unlock(&mdsc->cap_dirty_lock);
                }
 
-               ceph_remove_cap(cap, false);
+               ceph_remove_cap(mdsc, cap, false);
                goto out_unlock;
        }
 
@@ -4104,6 +4202,7 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
                              struct ceph_cap **target_cap, int *old_issued)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_cap *cap, *ocap, *new_cap = NULL;
        int mds = session->s_mds;
        int issued;
@@ -4124,8 +4223,8 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
                peer = -1;
        }
 
-       dout("handle_cap_import inode %p ci %p mds%d mseq %d peer %d\n",
-            inode, ci, mds, mseq, peer);
+       doutc(cl, "%p %llx.%llx ci %p mds%d mseq %d peer %d\n",
+             inode, ceph_vinop(inode), ci, mds, mseq, peer);
 retry:
        cap = __get_cap_for_mds(ci, mds);
        if (!cap) {
@@ -4151,20 +4250,20 @@ retry:
 
        ocap = peer >= 0 ? __get_cap_for_mds(ci, peer) : NULL;
        if (ocap && ocap->cap_id == p_cap_id) {
-               dout(" remove export cap %p mds%d flags %d\n",
-                    ocap, peer, ph->flags);
+               doutc(cl, " remove export cap %p mds%d flags %d\n",
+                     ocap, peer, ph->flags);
                if ((ph->flags & CEPH_CAP_FLAG_AUTH) &&
                    (ocap->seq != le32_to_cpu(ph->seq) ||
                     ocap->mseq != le32_to_cpu(ph->mseq))) {
-                       pr_err_ratelimited("handle_cap_import: "
-                                       "mismatched seq/mseq: ino (%llx.%llx) "
-                                       "mds%d seq %d mseq %d importer mds%d "
-                                       "has peer seq %d mseq %d\n",
-                                       ceph_vinop(inode), peer, ocap->seq,
-                                       ocap->mseq, mds, le32_to_cpu(ph->seq),
+                       pr_err_ratelimited_client(cl, "mismatched seq/mseq: "
+                                       "%p %llx.%llx mds%d seq %d mseq %d"
+                                       " importer mds%d has peer seq %d mseq %d\n",
+                                       inode, ceph_vinop(inode), peer,
+                                       ocap->seq, ocap->mseq, mds,
+                                       le32_to_cpu(ph->seq),
                                        le32_to_cpu(ph->mseq));
                }
-               ceph_remove_cap(ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
+               ceph_remove_cap(mdsc, ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
        }
 
        *old_issued = issued;
@@ -4227,6 +4326,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
                      struct ceph_msg *msg)
 {
        struct ceph_mds_client *mdsc = session->s_mdsc;
+       struct ceph_client *cl = mdsc->fsc->client;
        struct inode *inode;
        struct ceph_inode_info *ci;
        struct ceph_cap *cap;
@@ -4245,7 +4345,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
        bool close_sessions = false;
        bool do_cap_release = false;
 
-       dout("handle_caps from mds%d\n", session->s_mds);
+       doutc(cl, "from mds%d\n", session->s_mds);
 
        if (!ceph_inc_mds_stopping_blocker(mdsc, session))
                return;
@@ -4347,15 +4447,15 @@ void ceph_handle_caps(struct ceph_mds_session *session,
 
        /* lookup ino */
        inode = ceph_find_inode(mdsc->fsc->sb, vino);
-       dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
-            vino.snap, inode);
+       doutc(cl, " op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op),
+             vino.ino, vino.snap, inode);
 
        mutex_lock(&session->s_mutex);
-       dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
-            (unsigned)seq);
+       doutc(cl, " mds%d seq %lld cap seq %u\n", session->s_mds,
+             session->s_seq, (unsigned)seq);
 
        if (!inode) {
-               dout(" i don't have ino %llx\n", vino.ino);
+               doutc(cl, " i don't have ino %llx\n", vino.ino);
 
                switch (op) {
                case CEPH_CAP_OP_IMPORT:
@@ -4410,9 +4510,9 @@ void ceph_handle_caps(struct ceph_mds_session *session,
        spin_lock(&ci->i_ceph_lock);
        cap = __get_cap_for_mds(ceph_inode(inode), session->s_mds);
        if (!cap) {
-               dout(" no cap on %p ino %llx.%llx from mds%d\n",
-                    inode, ceph_ino(inode), ceph_snap(inode),
-                    session->s_mds);
+               doutc(cl, " no cap on %p ino %llx.%llx from mds%d\n",
+                     inode, ceph_ino(inode), ceph_snap(inode),
+                     session->s_mds);
                spin_unlock(&ci->i_ceph_lock);
                switch (op) {
                case CEPH_CAP_OP_REVOKE:
@@ -4450,8 +4550,8 @@ void ceph_handle_caps(struct ceph_mds_session *session,
 
        default:
                spin_unlock(&ci->i_ceph_lock);
-               pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
-                      ceph_cap_op_name(op));
+               pr_err_client(cl, "unknown cap op %d %s\n", op,
+                             ceph_cap_op_name(op));
        }
 
 done:
@@ -4492,7 +4592,7 @@ flush_cap_releases:
        goto done;
 
 bad:
-       pr_err("ceph_handle_caps: corrupt message\n");
+       pr_err_client(cl, "corrupt message\n");
        ceph_msg_dump(msg);
        goto out;
 }
@@ -4506,6 +4606,7 @@ bad:
  */
 unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct inode *inode;
        struct ceph_inode_info *ci;
        struct ceph_mount_options *opt = mdsc->fsc->mount_options;
@@ -4513,14 +4614,14 @@ unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
        unsigned long loop_start = jiffies;
        unsigned long delay = 0;
 
-       dout("check_delayed_caps\n");
+       doutc(cl, "begin\n");
        spin_lock(&mdsc->cap_delay_lock);
        while (!list_empty(&mdsc->cap_delay_list)) {
                ci = list_first_entry(&mdsc->cap_delay_list,
                                      struct ceph_inode_info,
                                      i_cap_delay_list);
                if (time_before(loop_start, ci->i_hold_caps_max - delay_max)) {
-                       dout("%s caps added recently.  Exiting loop", __func__);
+                       doutc(cl, "caps added recently.  Exiting loop");
                        delay = ci->i_hold_caps_max;
                        break;
                }
@@ -4532,13 +4633,15 @@ unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
                inode = igrab(&ci->netfs.inode);
                if (inode) {
                        spin_unlock(&mdsc->cap_delay_lock);
-                       dout("check_delayed_caps on %p\n", inode);
+                       doutc(cl, "on %p %llx.%llx\n", inode,
+                             ceph_vinop(inode));
                        ceph_check_caps(ci, 0);
                        iput(inode);
                        spin_lock(&mdsc->cap_delay_lock);
                }
        }
        spin_unlock(&mdsc->cap_delay_lock);
+       doutc(cl, "done\n");
 
        return delay;
 }
@@ -4549,17 +4652,18 @@ unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
 static void flush_dirty_session_caps(struct ceph_mds_session *s)
 {
        struct ceph_mds_client *mdsc = s->s_mdsc;
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_inode_info *ci;
        struct inode *inode;
 
-       dout("flush_dirty_caps\n");
+       doutc(cl, "begin\n");
        spin_lock(&mdsc->cap_dirty_lock);
        while (!list_empty(&s->s_cap_dirty)) {
                ci = list_first_entry(&s->s_cap_dirty, struct ceph_inode_info,
                                      i_dirty_item);
                inode = &ci->netfs.inode;
                ihold(inode);
-               dout("flush_dirty_caps %llx.%llx\n", ceph_vinop(inode));
+               doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
                spin_unlock(&mdsc->cap_dirty_lock);
                ceph_wait_on_async_create(inode);
                ceph_check_caps(ci, CHECK_CAPS_FLUSH);
@@ -4567,7 +4671,7 @@ static void flush_dirty_session_caps(struct ceph_mds_session *s)
                spin_lock(&mdsc->cap_dirty_lock);
        }
        spin_unlock(&mdsc->cap_dirty_lock);
-       dout("flush_dirty_caps done\n");
+       doutc(cl, "done\n");
 }
 
 void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
@@ -4672,7 +4776,7 @@ int ceph_drop_caps_for_unlink(struct inode *inode)
 
                if (__ceph_caps_dirty(ci)) {
                        struct ceph_mds_client *mdsc =
-                               ceph_inode_to_client(inode)->mdsc;
+                               ceph_inode_to_fs_client(inode)->mdsc;
                        __cap_delay_requeue_front(mdsc, ci);
                }
        }
@@ -4692,6 +4796,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
                              int mds, int drop, int unless, int force)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_cap *cap;
        struct ceph_mds_request_release *rel = *p;
        int used, dirty;
@@ -4701,9 +4806,9 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
        used = __ceph_caps_used(ci);
        dirty = __ceph_caps_dirty(ci);
 
-       dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n",
-            inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop),
-            ceph_cap_string(unless));
+       doutc(cl, "%p %llx.%llx mds%d used|dirty %s drop %s unless %s\n",
+             inode, ceph_vinop(inode), mds, ceph_cap_string(used|dirty),
+             ceph_cap_string(drop), ceph_cap_string(unless));
 
        /* only drop unused, clean caps */
        drop &= ~(used | dirty);
@@ -4725,12 +4830,13 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
                if (force || (cap->issued & drop)) {
                        if (cap->issued & drop) {
                                int wanted = __ceph_caps_wanted(ci);
-                               dout("encode_inode_release %p cap %p "
-                                    "%s -> %s, wanted %s -> %s\n", inode, cap,
-                                    ceph_cap_string(cap->issued),
-                                    ceph_cap_string(cap->issued & ~drop),
-                                    ceph_cap_string(cap->mds_wanted),
-                                    ceph_cap_string(wanted));
+                               doutc(cl, "%p %llx.%llx cap %p %s -> %s, "
+                                     "wanted %s -> %s\n", inode,
+                                     ceph_vinop(inode), cap,
+                                     ceph_cap_string(cap->issued),
+                                     ceph_cap_string(cap->issued & ~drop),
+                                     ceph_cap_string(cap->mds_wanted),
+                                     ceph_cap_string(wanted));
 
                                cap->issued &= ~drop;
                                cap->implemented &= ~drop;
@@ -4739,9 +4845,9 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
                                    !(wanted & CEPH_CAP_ANY_FILE_WR))
                                        ci->i_requested_max_size = 0;
                        } else {
-                               dout("encode_inode_release %p cap %p %s"
-                                    " (force)\n", inode, cap,
-                                    ceph_cap_string(cap->issued));
+                               doutc(cl, "%p %llx.%llx cap %p %s (force)\n",
+                                     inode, ceph_vinop(inode), cap,
+                                     ceph_cap_string(cap->issued));
                        }
 
                        rel->ino = cpu_to_le64(ceph_ino(inode));
@@ -4756,8 +4862,9 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
                        *p += sizeof(*rel);
                        ret = 1;
                } else {
-                       dout("encode_inode_release %p cap %p %s (noop)\n",
-                            inode, cap, ceph_cap_string(cap->issued));
+                       doutc(cl, "%p %llx.%llx cap %p %s (noop)\n",
+                             inode, ceph_vinop(inode), cap,
+                             ceph_cap_string(cap->issued));
                }
        }
        spin_unlock(&ci->i_ceph_lock);
@@ -4783,6 +4890,7 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
        struct dentry *parent = NULL;
        struct ceph_mds_request_release *rel = *p;
        struct ceph_dentry_info *di = ceph_dentry(dentry);
+       struct ceph_client *cl;
        int force = 0;
        int ret;
 
@@ -4804,10 +4912,11 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
        ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
        dput(parent);
 
+       cl = ceph_inode_to_client(dir);
        spin_lock(&dentry->d_lock);
        if (ret && di->lease_session && di->lease_session->s_mds == mds) {
-               dout("encode_dentry_release %p mds%d seq %d\n",
-                    dentry, mds, (int)di->lease_seq);
+               doutc(cl, "%p mds%d seq %d\n",  dentry, mds,
+                     (int)di->lease_seq);
                rel->dname_seq = cpu_to_le32(di->lease_seq);
                __ceph_mdsc_drop_dentry_lease(dentry);
                spin_unlock(&dentry->d_lock);
@@ -4833,12 +4942,14 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
 static int remove_capsnaps(struct ceph_mds_client *mdsc, struct inode *inode)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_cap_snap *capsnap;
        int capsnap_release = 0;
 
        lockdep_assert_held(&ci->i_ceph_lock);
 
-       dout("removing capsnaps, ci is %p, inode is %p\n", ci, inode);
+       doutc(cl, "removing capsnaps, ci is %p, %p %llx.%llx\n",
+             ci, inode, ceph_vinop(inode));
 
        while (!list_empty(&ci->i_cap_snaps)) {
                capsnap = list_first_entry(&ci->i_cap_snaps,
@@ -4855,8 +4966,9 @@ static int remove_capsnaps(struct ceph_mds_client *mdsc, struct inode *inode)
 
 int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invalidate)
 {
-       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
        struct ceph_mds_client *mdsc = fsc->mdsc;
+       struct ceph_client *cl = fsc->client;
        struct ceph_inode_info *ci = ceph_inode(inode);
        bool is_auth;
        bool dirty_dropped = false;
@@ -4864,8 +4976,8 @@ int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invali
 
        lockdep_assert_held(&ci->i_ceph_lock);
 
-       dout("removing cap %p, ci is %p, inode is %p\n",
-            cap, ci, &ci->netfs.inode);
+       doutc(cl, "removing cap %p, ci is %p, %p %llx.%llx\n",
+             cap, ci, inode, ceph_vinop(inode));
 
        is_auth = (cap == ci->i_auth_cap);
        __ceph_remove_cap(cap, false);
@@ -4892,19 +5004,19 @@ int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invali
                }
 
                if (!list_empty(&ci->i_dirty_item)) {
-                       pr_warn_ratelimited(
-                               " dropping dirty %s state for %p %lld\n",
+                       pr_warn_ratelimited_client(cl,
+                               " dropping dirty %s state for %p %llx.%llx\n",
                                ceph_cap_string(ci->i_dirty_caps),
-                               inode, ceph_ino(inode));
+                               inode, ceph_vinop(inode));
                        ci->i_dirty_caps = 0;
                        list_del_init(&ci->i_dirty_item);
                        dirty_dropped = true;
                }
                if (!list_empty(&ci->i_flushing_item)) {
-                       pr_warn_ratelimited(
-                               " dropping dirty+flushing %s state for %p %lld\n",
+                       pr_warn_ratelimited_client(cl,
+                               " dropping dirty+flushing %s state for %p %llx.%llx\n",
                                ceph_cap_string(ci->i_flushing_caps),
-                               inode, ceph_ino(inode));
+                               inode, ceph_vinop(inode));
                        ci->i_flushing_caps = 0;
                        list_del_init(&ci->i_flushing_item);
                        mdsc->num_cap_flushing--;
@@ -4927,8 +5039,9 @@ int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invali
                if (atomic_read(&ci->i_filelock_ref) > 0) {
                        /* make further file lock syscall return -EIO */
                        ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK;
-                       pr_warn_ratelimited(" dropping file locks for %p %lld\n",
-                                           inode, ceph_ino(inode));
+                       pr_warn_ratelimited_client(cl,
+                               " dropping file locks for %p %llx.%llx\n",
+                               inode, ceph_vinop(inode));
                }
 
                if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
index e3b1c3fab4127f94d8d4177ad0f431afebff40cf..3b3c4d8d401ece05205609eddeb4b4a230238300 100644 (file)
@@ -113,7 +113,7 @@ static int ceph_crypt_set_context(struct inode *inode, const void *ctx,
 
        cia.fscrypt_auth = cfa;
 
-       ret = __ceph_setattr(inode, &attr, &cia);
+       ret = __ceph_setattr(&nop_mnt_idmap, inode, &attr, &cia);
        if (ret == 0)
                inode_set_flags(inode, S_ENCRYPTED, S_ENCRYPTED);
        kfree(cia.fscrypt_auth);
@@ -129,7 +129,7 @@ static bool ceph_crypt_empty_dir(struct inode *inode)
 
 static const union fscrypt_policy *ceph_get_dummy_policy(struct super_block *sb)
 {
-       return ceph_sb_to_client(sb)->fsc_dummy_enc_policy.policy;
+       return ceph_sb_to_fs_client(sb)->fsc_dummy_enc_policy.policy;
 }
 
 static struct fscrypt_operations ceph_fscrypt_ops = {
@@ -212,6 +212,7 @@ void ceph_fscrypt_as_ctx_to_req(struct ceph_mds_request *req,
 static struct inode *parse_longname(const struct inode *parent,
                                    const char *name, int *name_len)
 {
+       struct ceph_client *cl = ceph_inode_to_client(parent);
        struct inode *dir = NULL;
        struct ceph_vino vino = { .snap = CEPH_NOSNAP };
        char *inode_number;
@@ -223,12 +224,12 @@ static struct inode *parse_longname(const struct inode *parent,
        name++;
        name_end = strrchr(name, '_');
        if (!name_end) {
-               dout("Failed to parse long snapshot name: %s\n", name);
+               doutc(cl, "failed to parse long snapshot name: %s\n", name);
                return ERR_PTR(-EIO);
        }
        *name_len = (name_end - name);
        if (*name_len <= 0) {
-               pr_err("Failed to parse long snapshot name\n");
+               pr_err_client(cl, "failed to parse long snapshot name\n");
                return ERR_PTR(-EIO);
        }
 
@@ -240,7 +241,7 @@ static struct inode *parse_longname(const struct inode *parent,
                return ERR_PTR(-ENOMEM);
        ret = kstrtou64(inode_number, 10, &vino.ino);
        if (ret) {
-               dout("Failed to parse inode number: %s\n", name);
+               doutc(cl, "failed to parse inode number: %s\n", name);
                dir = ERR_PTR(ret);
                goto out;
        }
@@ -251,7 +252,7 @@ static struct inode *parse_longname(const struct inode *parent,
                /* This can happen if we're not mounting cephfs on the root */
                dir = ceph_get_inode(parent->i_sb, vino, NULL);
                if (IS_ERR(dir))
-                       dout("Can't find inode %s (%s)\n", inode_number, name);
+                       doutc(cl, "can't find inode %s (%s)\n", inode_number, name);
        }
 
 out:
@@ -262,6 +263,7 @@ out:
 int ceph_encode_encrypted_dname(struct inode *parent, struct qstr *d_name,
                                char *buf)
 {
+       struct ceph_client *cl = ceph_inode_to_client(parent);
        struct inode *dir = parent;
        struct qstr iname;
        u32 len;
@@ -330,7 +332,7 @@ int ceph_encode_encrypted_dname(struct inode *parent, struct qstr *d_name,
 
        /* base64 encode the encrypted name */
        elen = ceph_base64_encode(cryptbuf, len, buf);
-       dout("base64-encoded ciphertext name = %.*s\n", elen, buf);
+       doutc(cl, "base64-encoded ciphertext name = %.*s\n", elen, buf);
 
        /* To understand the 240 limit, see CEPH_NOHASH_NAME_MAX comments */
        WARN_ON(elen > 240);
@@ -505,7 +507,10 @@ int ceph_fscrypt_decrypt_block_inplace(const struct inode *inode,
                                  struct page *page, unsigned int len,
                                  unsigned int offs, u64 lblk_num)
 {
-       dout("%s: len %u offs %u blk %llu\n", __func__, len, offs, lblk_num);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
+
+       doutc(cl, "%p %llx.%llx len %u offs %u blk %llu\n", inode,
+             ceph_vinop(inode), len, offs, lblk_num);
        return fscrypt_decrypt_block_inplace(inode, page, len, offs, lblk_num);
 }
 
@@ -514,7 +519,10 @@ int ceph_fscrypt_encrypt_block_inplace(const struct inode *inode,
                                  unsigned int offs, u64 lblk_num,
                                  gfp_t gfp_flags)
 {
-       dout("%s: len %u offs %u blk %llu\n", __func__, len, offs, lblk_num);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
+
+       doutc(cl, "%p %llx.%llx len %u offs %u blk %llu\n", inode,
+             ceph_vinop(inode), len, offs, lblk_num);
        return fscrypt_encrypt_block_inplace(inode, page, len, offs, lblk_num,
                                             gfp_flags);
 }
@@ -583,6 +591,7 @@ int ceph_fscrypt_decrypt_extents(struct inode *inode, struct page **page,
                                 u64 off, struct ceph_sparse_extent *map,
                                 u32 ext_cnt)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        int i, ret = 0;
        struct ceph_inode_info *ci = ceph_inode(inode);
        u64 objno, objoff;
@@ -590,7 +599,8 @@ int ceph_fscrypt_decrypt_extents(struct inode *inode, struct page **page,
 
        /* Nothing to do for empty array */
        if (ext_cnt == 0) {
-               dout("%s: empty array, ret 0\n", __func__);
+               doutc(cl, "%p %llx.%llx empty array, ret 0\n", inode,
+                     ceph_vinop(inode));
                return 0;
        }
 
@@ -604,14 +614,17 @@ int ceph_fscrypt_decrypt_extents(struct inode *inode, struct page **page,
                int fret;
 
                if ((ext->off | ext->len) & ~CEPH_FSCRYPT_BLOCK_MASK) {
-                       pr_warn("%s: bad encrypted sparse extent idx %d off %llx len %llx\n",
-                               __func__, i, ext->off, ext->len);
+                       pr_warn_client(cl,
+                               "%p %llx.%llx bad encrypted sparse extent "
+                               "idx %d off %llx len %llx\n",
+                               inode, ceph_vinop(inode), i, ext->off,
+                               ext->len);
                        return -EIO;
                }
                fret = ceph_fscrypt_decrypt_pages(inode, &page[pgidx],
                                                 off + pgsoff, ext->len);
-               dout("%s: [%d] 0x%llx~0x%llx fret %d\n", __func__, i,
-                               ext->off, ext->len, fret);
+               doutc(cl, "%p %llx.%llx [%d] 0x%llx~0x%llx fret %d\n", inode,
+                     ceph_vinop(inode), i, ext->off, ext->len, fret);
                if (fret < 0) {
                        if (ret == 0)
                                ret = fret;
@@ -619,7 +632,7 @@ int ceph_fscrypt_decrypt_extents(struct inode *inode, struct page **page,
                }
                ret = pgsoff + fret;
        }
-       dout("%s: ret %d\n", __func__, ret);
+       doutc(cl, "ret %d\n", ret);
        return ret;
 }
 
index 3904333fa6c38b80f4a99db72d354709dbba1d8e..24c08078f5aa3e49d6d1bea454e503cbc1810019 100644 (file)
@@ -81,7 +81,7 @@ static int mdsc_show(struct seq_file *s, void *p)
                if (req->r_inode) {
                        seq_printf(s, " #%llx", ceph_ino(req->r_inode));
                } else if (req->r_dentry) {
-                       path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
+                       path = ceph_mdsc_build_path(mdsc, req->r_dentry, &pathlen,
                                                    &pathbase, 0);
                        if (IS_ERR(path))
                                path = NULL;
@@ -100,7 +100,7 @@ static int mdsc_show(struct seq_file *s, void *p)
                }
 
                if (req->r_old_dentry) {
-                       path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen,
+                       path = ceph_mdsc_build_path(mdsc, req->r_old_dentry, &pathlen,
                                                    &pathbase, 0);
                        if (IS_ERR(path))
                                path = NULL;
@@ -398,7 +398,7 @@ DEFINE_SIMPLE_ATTRIBUTE(congestion_kb_fops, congestion_kb_get,
 
 void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
 {
-       dout("ceph_fs_debugfs_cleanup\n");
+       doutc(fsc->client, "begin\n");
        debugfs_remove(fsc->debugfs_bdi);
        debugfs_remove(fsc->debugfs_congestion_kb);
        debugfs_remove(fsc->debugfs_mdsmap);
@@ -407,13 +407,14 @@ void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
        debugfs_remove(fsc->debugfs_status);
        debugfs_remove(fsc->debugfs_mdsc);
        debugfs_remove_recursive(fsc->debugfs_metrics_dir);
+       doutc(fsc->client, "done\n");
 }
 
 void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
 {
        char name[100];
 
-       dout("ceph_fs_debugfs_init\n");
+       doutc(fsc->client, "begin\n");
        fsc->debugfs_congestion_kb =
                debugfs_create_file("writeback_congestion_kb",
                                    0600,
@@ -469,6 +470,7 @@ void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
                            &metrics_size_fops);
        debugfs_create_file("caps", 0400, fsc->debugfs_metrics_dir, fsc,
                            &metrics_caps_fops);
+       doutc(fsc->client, "done\n");
 }
 
 
index 854cbdd666619aeef65f1ecca063112a68e2ac7e..91709934c8b14371a6eb41b35302559d26f94823 100644 (file)
@@ -109,7 +109,9 @@ static int fpos_cmp(loff_t l, loff_t r)
  * regardless of what dir changes take place on the
  * server.
  */
-static int note_last_dentry(struct ceph_dir_file_info *dfi, const char *name,
+static int note_last_dentry(struct ceph_fs_client *fsc,
+                           struct ceph_dir_file_info *dfi,
+                           const char *name,
                            int len, unsigned next_offset)
 {
        char *buf = kmalloc(len+1, GFP_KERNEL);
@@ -120,7 +122,7 @@ static int note_last_dentry(struct ceph_dir_file_info *dfi, const char *name,
        memcpy(dfi->last_name, name, len);
        dfi->last_name[len] = 0;
        dfi->next_offset = next_offset;
-       dout("note_last_dentry '%s'\n", dfi->last_name);
+       doutc(fsc->client, "'%s'\n", dfi->last_name);
        return 0;
 }
 
@@ -130,6 +132,7 @@ __dcache_find_get_entry(struct dentry *parent, u64 idx,
                        struct ceph_readdir_cache_control *cache_ctl)
 {
        struct inode *dir = d_inode(parent);
+       struct ceph_client *cl = ceph_inode_to_client(dir);
        struct dentry *dentry;
        unsigned idx_mask = (PAGE_SIZE / sizeof(struct dentry *)) - 1;
        loff_t ptr_pos = idx * sizeof(struct dentry *);
@@ -142,7 +145,7 @@ __dcache_find_get_entry(struct dentry *parent, u64 idx,
                ceph_readdir_cache_release(cache_ctl);
                cache_ctl->page = find_lock_page(&dir->i_data, ptr_pgoff);
                if (!cache_ctl->page) {
-                       dout(" page %lu not found\n", ptr_pgoff);
+                       doutc(cl, " page %lu not found\n", ptr_pgoff);
                        return ERR_PTR(-EAGAIN);
                }
                /* reading/filling the cache are serialized by
@@ -185,13 +188,16 @@ static int __dcache_readdir(struct file *file,  struct dir_context *ctx,
        struct ceph_dir_file_info *dfi = file->private_data;
        struct dentry *parent = file->f_path.dentry;
        struct inode *dir = d_inode(parent);
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(dir);
+       struct ceph_client *cl = ceph_inode_to_client(dir);
        struct dentry *dentry, *last = NULL;
        struct ceph_dentry_info *di;
        struct ceph_readdir_cache_control cache_ctl = {};
        u64 idx = 0;
        int err = 0;
 
-       dout("__dcache_readdir %p v%u at %llx\n", dir, (unsigned)shared_gen, ctx->pos);
+       doutc(cl, "%p %llx.%llx v%u at %llx\n", dir, ceph_vinop(dir),
+             (unsigned)shared_gen, ctx->pos);
 
        /* search start position */
        if (ctx->pos > 2) {
@@ -221,7 +227,8 @@ static int __dcache_readdir(struct file *file,  struct dir_context *ctx,
                        dput(dentry);
                }
 
-               dout("__dcache_readdir %p cache idx %llu\n", dir, idx);
+               doutc(cl, "%p %llx.%llx cache idx %llu\n", dir,
+                     ceph_vinop(dir), idx);
        }
 
 
@@ -257,8 +264,8 @@ static int __dcache_readdir(struct file *file,  struct dir_context *ctx,
                spin_unlock(&dentry->d_lock);
 
                if (emit_dentry) {
-                       dout(" %llx dentry %p %pd %p\n", di->offset,
-                            dentry, dentry, d_inode(dentry));
+                       doutc(cl, " %llx dentry %p %pd %p\n", di->offset,
+                             dentry, dentry, d_inode(dentry));
                        ctx->pos = di->offset;
                        if (!dir_emit(ctx, dentry->d_name.name,
                                      dentry->d_name.len, ceph_present_inode(d_inode(dentry)),
@@ -281,7 +288,8 @@ out:
        if (last) {
                int ret;
                di = ceph_dentry(last);
-               ret = note_last_dentry(dfi, last->d_name.name, last->d_name.len,
+               ret = note_last_dentry(fsc, dfi, last->d_name.name,
+                                      last->d_name.len,
                                       fpos_off(di->offset) + 1);
                if (ret < 0)
                        err = ret;
@@ -310,20 +318,23 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
        struct ceph_dir_file_info *dfi = file->private_data;
        struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
        struct ceph_mds_client *mdsc = fsc->mdsc;
+       struct ceph_client *cl = fsc->client;
        int i;
        int err;
        unsigned frag = -1;
        struct ceph_mds_reply_info_parsed *rinfo;
 
-       dout("readdir %p file %p pos %llx\n", inode, file, ctx->pos);
+       doutc(cl, "%p %llx.%llx file %p pos %llx\n", inode,
+             ceph_vinop(inode), file, ctx->pos);
        if (dfi->file_info.flags & CEPH_F_ATEND)
                return 0;
 
        /* always start with . and .. */
        if (ctx->pos == 0) {
-               dout("readdir off 0 -> '.'\n");
+               doutc(cl, "%p %llx.%llx off 0 -> '.'\n", inode,
+                     ceph_vinop(inode));
                if (!dir_emit(ctx, ".", 1, ceph_present_inode(inode),
                            inode->i_mode >> 12))
                        return 0;
@@ -337,7 +348,8 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
                ino = ceph_present_inode(dentry->d_parent->d_inode);
                spin_unlock(&dentry->d_lock);
 
-               dout("readdir off 1 -> '..'\n");
+               doutc(cl, "%p %llx.%llx off 1 -> '..'\n", inode,
+                     ceph_vinop(inode));
                if (!dir_emit(ctx, "..", 2, ino, inode->i_mode >> 12))
                        return 0;
                ctx->pos = 2;
@@ -391,8 +403,8 @@ more:
                        frag = fpos_frag(ctx->pos);
                }
 
-               dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
-                    ceph_vinop(inode), frag, dfi->last_name);
+               doutc(cl, "fetching %p %llx.%llx frag %x offset '%s'\n",
+                     inode, ceph_vinop(inode), frag, dfi->last_name);
                req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
                if (IS_ERR(req))
                        return PTR_ERR(req);
@@ -446,12 +458,12 @@ more:
                        ceph_mdsc_put_request(req);
                        return err;
                }
-               dout("readdir got and parsed readdir result=%d on "
-                    "frag %x, end=%d, complete=%d, hash_order=%d\n",
-                    err, frag,
-                    (int)req->r_reply_info.dir_end,
-                    (int)req->r_reply_info.dir_complete,
-                    (int)req->r_reply_info.hash_order);
+               doutc(cl, "%p %llx.%llx got and parsed readdir result=%d"
+                     "on frag %x, end=%d, complete=%d, hash_order=%d\n",
+                     inode, ceph_vinop(inode), err, frag,
+                     (int)req->r_reply_info.dir_end,
+                     (int)req->r_reply_info.dir_complete,
+                     (int)req->r_reply_info.hash_order);
 
                rinfo = &req->r_reply_info;
                if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
@@ -481,7 +493,8 @@ more:
                                dfi->dir_ordered_count = req->r_dir_ordered_cnt;
                        }
                } else {
-                       dout("readdir !did_prepopulate\n");
+                       doutc(cl, "%p %llx.%llx !did_prepopulate\n", inode,
+                             ceph_vinop(inode));
                        /* disable readdir cache */
                        dfi->readdir_cache_idx = -1;
                        /* preclude from marking dir complete */
@@ -494,8 +507,8 @@ more:
                                        rinfo->dir_entries + (rinfo->dir_nr-1);
                        unsigned next_offset = req->r_reply_info.dir_end ?
                                        2 : (fpos_off(rde->offset) + 1);
-                       err = note_last_dentry(dfi, rde->name, rde->name_len,
-                                              next_offset);
+                       err = note_last_dentry(fsc, dfi, rde->name,
+                                              rde->name_len, next_offset);
                        if (err) {
                                ceph_mdsc_put_request(dfi->last_readdir);
                                dfi->last_readdir = NULL;
@@ -508,9 +521,9 @@ more:
        }
 
        rinfo = &dfi->last_readdir->r_reply_info;
-       dout("readdir frag %x num %d pos %llx chunk first %llx\n",
-            dfi->frag, rinfo->dir_nr, ctx->pos,
-            rinfo->dir_nr ? rinfo->dir_entries[0].offset : 0LL);
+       doutc(cl, "%p %llx.%llx frag %x num %d pos %llx chunk first %llx\n",
+             inode, ceph_vinop(inode), dfi->frag, rinfo->dir_nr, ctx->pos,
+             rinfo->dir_nr ? rinfo->dir_entries[0].offset : 0LL);
 
        i = 0;
        /* search start position */
@@ -530,8 +543,9 @@ more:
                struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
 
                if (rde->offset < ctx->pos) {
-                       pr_warn("%s: rde->offset 0x%llx ctx->pos 0x%llx\n",
-                               __func__, rde->offset, ctx->pos);
+                       pr_warn_client(cl,
+                               "%p %llx.%llx rde->offset 0x%llx ctx->pos 0x%llx\n",
+                               inode, ceph_vinop(inode), rde->offset, ctx->pos);
                        return -EIO;
                }
 
@@ -539,9 +553,9 @@ more:
                        return -EIO;
 
                ctx->pos = rde->offset;
-               dout("readdir (%d/%d) -> %llx '%.*s' %p\n",
-                    i, rinfo->dir_nr, ctx->pos,
-                    rde->name_len, rde->name, &rde->inode.in);
+               doutc(cl, "%p %llx.%llx (%d/%d) -> %llx '%.*s' %p\n", inode,
+                     ceph_vinop(inode), i, rinfo->dir_nr, ctx->pos,
+                     rde->name_len, rde->name, &rde->inode.in);
 
                if (!dir_emit(ctx, rde->name, rde->name_len,
                              ceph_present_ino(inode->i_sb, le64_to_cpu(rde->inode.in->ino)),
@@ -552,7 +566,7 @@ more:
                         * doesn't have enough memory, etc. So for next readdir
                         * it will continue.
                         */
-                       dout("filldir stopping us...\n");
+                       doutc(cl, "filldir stopping us...\n");
                        return 0;
                }
 
@@ -583,7 +597,8 @@ more:
                        kfree(dfi->last_name);
                        dfi->last_name = NULL;
                }
-               dout("readdir next frag is %x\n", frag);
+               doutc(cl, "%p %llx.%llx next frag is %x\n", inode,
+                     ceph_vinop(inode), frag);
                goto more;
        }
        dfi->file_info.flags |= CEPH_F_ATEND;
@@ -598,20 +613,23 @@ more:
                spin_lock(&ci->i_ceph_lock);
                if (dfi->dir_ordered_count ==
                                atomic64_read(&ci->i_ordered_count)) {
-                       dout(" marking %p complete and ordered\n", inode);
+                       doutc(cl, " marking %p %llx.%llx complete and ordered\n",
+                             inode, ceph_vinop(inode));
                        /* use i_size to track number of entries in
                         * readdir cache */
                        BUG_ON(dfi->readdir_cache_idx < 0);
                        i_size_write(inode, dfi->readdir_cache_idx *
                                     sizeof(struct dentry*));
                } else {
-                       dout(" marking %p complete\n", inode);
+                       doutc(cl, " marking %llx.%llx complete\n",
+                             ceph_vinop(inode));
                }
                __ceph_dir_set_complete(ci, dfi->dir_release_count,
                                        dfi->dir_ordered_count);
                spin_unlock(&ci->i_ceph_lock);
        }
-       dout("readdir %p file %p done.\n", inode, file);
+       doutc(cl, "%p %llx.%llx file %p done.\n", inode, ceph_vinop(inode),
+             file);
        return 0;
 }
 
@@ -657,6 +675,7 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
 {
        struct ceph_dir_file_info *dfi = file->private_data;
        struct inode *inode = file->f_mapping->host;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        loff_t retval;
 
        inode_lock(inode);
@@ -676,7 +695,8 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
 
        if (offset >= 0) {
                if (need_reset_readdir(dfi, offset)) {
-                       dout("dir_llseek dropping %p content\n", file);
+                       doutc(cl, "%p %llx.%llx dropping %p content\n",
+                             inode, ceph_vinop(inode), file);
                        reset_readdir(dfi);
                } else if (is_hash_order(offset) && offset > file->f_pos) {
                        /* for hash offset, we don't know if a forward seek
@@ -703,8 +723,9 @@ out:
 struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
                                   struct dentry *dentry)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
        struct inode *parent = d_inode(dentry->d_parent); /* we hold i_rwsem */
+       struct ceph_client *cl = ceph_inode_to_client(parent);
 
        /* .snap dir? */
        if (ceph_snap(parent) == CEPH_NOSNAP &&
@@ -713,8 +734,9 @@ struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
                struct inode *inode = ceph_get_snapdir(parent);
 
                res = d_splice_alias(inode, dentry);
-               dout("ENOENT on snapdir %p '%pd', linking to snapdir %p. Spliced dentry %p\n",
-                    dentry, dentry, inode, res);
+               doutc(cl, "ENOENT on snapdir %p '%pd', linking to "
+                     "snapdir %p %llx.%llx. Spliced dentry %p\n",
+                     dentry, dentry, inode, ceph_vinop(inode), res);
                if (res)
                        dentry = res;
        }
@@ -735,12 +757,15 @@ struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
                                  struct dentry *dentry, int err)
 {
+       struct ceph_client *cl = req->r_mdsc->fsc->client;
+
        if (err == -ENOENT) {
                /* no trace? */
                err = 0;
                if (!req->r_reply_info.head->is_dentry) {
-                       dout("ENOENT and no trace, dentry %p inode %p\n",
-                            dentry, d_inode(dentry));
+                       doutc(cl,
+                             "ENOENT and no trace, dentry %p inode %llx.%llx\n",
+                             dentry, ceph_vinop(d_inode(dentry)));
                        if (d_really_is_positive(dentry)) {
                                d_drop(dentry);
                                err = -ENOENT;
@@ -771,15 +796,16 @@ static bool is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
                                  unsigned int flags)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dir->i_sb);
        struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
+       struct ceph_client *cl = fsc->client;
        struct ceph_mds_request *req;
        int op;
        int mask;
        int err;
 
-       dout("lookup %p dentry %p '%pd'\n",
-            dir, dentry, dentry);
+       doutc(cl, "%p %llx.%llx/'%pd' dentry %p\n", dir, ceph_vinop(dir),
+             dentry, dentry);
 
        if (dentry->d_name.len > NAME_MAX)
                return ERR_PTR(-ENAMETOOLONG);
@@ -802,7 +828,8 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
                struct ceph_dentry_info *di = ceph_dentry(dentry);
 
                spin_lock(&ci->i_ceph_lock);
-               dout(" dir %p flags are 0x%lx\n", dir, ci->i_ceph_flags);
+               doutc(cl, " dir %llx.%llx flags are 0x%lx\n",
+                     ceph_vinop(dir), ci->i_ceph_flags);
                if (strncmp(dentry->d_name.name,
                            fsc->mount_options->snapdir_name,
                            dentry->d_name.len) &&
@@ -812,7 +839,8 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
                    __ceph_caps_issued_mask_metric(ci, CEPH_CAP_FILE_SHARED, 1)) {
                        __ceph_touch_fmode(ci, mdsc, CEPH_FILE_MODE_RD);
                        spin_unlock(&ci->i_ceph_lock);
-                       dout(" dir %p complete, -ENOENT\n", dir);
+                       doutc(cl, " dir %llx.%llx complete, -ENOENT\n",
+                             ceph_vinop(dir));
                        d_add(dentry, NULL);
                        di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
                        return NULL;
@@ -850,7 +878,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
        }
        dentry = ceph_finish_lookup(req, dentry, err);
        ceph_mdsc_put_request(req);  /* will dput(dentry) */
-       dout("lookup result=%p\n", dentry);
+       doutc(cl, "result=%p\n", dentry);
        return dentry;
 }
 
@@ -885,6 +913,7 @@ static int ceph_mknod(struct mnt_idmap *idmap, struct inode *dir,
                      struct dentry *dentry, umode_t mode, dev_t rdev)
 {
        struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_mds_request *req;
        struct ceph_acl_sec_ctx as_ctx = {};
        int err;
@@ -901,8 +930,8 @@ static int ceph_mknod(struct mnt_idmap *idmap, struct inode *dir,
                goto out;
        }
 
-       dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
-            dir, dentry, mode, rdev);
+       doutc(cl, "%p %llx.%llx/'%pd' dentry %p mode 0%ho rdev %d\n",
+             dir, ceph_vinop(dir), dentry, dentry, mode, rdev);
        req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
        if (IS_ERR(req)) {
                err = PTR_ERR(req);
@@ -924,6 +953,7 @@ static int ceph_mknod(struct mnt_idmap *idmap, struct inode *dir,
        req->r_parent = dir;
        ihold(dir);
        set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
+       req->r_mnt_idmap = mnt_idmap_get(idmap);
        req->r_args.mknod.mode = cpu_to_le32(mode);
        req->r_args.mknod.rdev = cpu_to_le32(rdev);
        req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
@@ -993,6 +1023,7 @@ static int ceph_symlink(struct mnt_idmap *idmap, struct inode *dir,
                        struct dentry *dentry, const char *dest)
 {
        struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_mds_request *req;
        struct ceph_acl_sec_ctx as_ctx = {};
        umode_t mode = S_IFLNK | 0777;
@@ -1010,7 +1041,8 @@ static int ceph_symlink(struct mnt_idmap *idmap, struct inode *dir,
                goto out;
        }
 
-       dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
+       doutc(cl, "%p %llx.%llx/'%pd' to '%s'\n", dir, ceph_vinop(dir), dentry,
+             dest);
        req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
        if (IS_ERR(req)) {
                err = PTR_ERR(req);
@@ -1040,6 +1072,7 @@ static int ceph_symlink(struct mnt_idmap *idmap, struct inode *dir,
        }
 
        set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
+       req->r_mnt_idmap = mnt_idmap_get(idmap);
        req->r_dentry = dget(dentry);
        req->r_num_caps = 2;
        req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
@@ -1064,6 +1097,7 @@ static int ceph_mkdir(struct mnt_idmap *idmap, struct inode *dir,
                      struct dentry *dentry, umode_t mode)
 {
        struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_mds_request *req;
        struct ceph_acl_sec_ctx as_ctx = {};
        int err;
@@ -1076,10 +1110,11 @@ static int ceph_mkdir(struct mnt_idmap *idmap, struct inode *dir,
        if (ceph_snap(dir) == CEPH_SNAPDIR) {
                /* mkdir .snap/foo is a MKSNAP */
                op = CEPH_MDS_OP_MKSNAP;
-               dout("mksnap dir %p snap '%pd' dn %p\n", dir,
-                    dentry, dentry);
+               doutc(cl, "mksnap %llx.%llx/'%pd' dentry %p\n",
+                     ceph_vinop(dir), dentry, dentry);
        } else if (ceph_snap(dir) == CEPH_NOSNAP) {
-               dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
+               doutc(cl, "mkdir %llx.%llx/'%pd' dentry %p mode 0%ho\n",
+                     ceph_vinop(dir), dentry, dentry, mode);
                op = CEPH_MDS_OP_MKDIR;
        } else {
                err = -EROFS;
@@ -1117,6 +1152,8 @@ static int ceph_mkdir(struct mnt_idmap *idmap, struct inode *dir,
        req->r_parent = dir;
        ihold(dir);
        set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
+       if (op == CEPH_MDS_OP_MKDIR)
+               req->r_mnt_idmap = mnt_idmap_get(idmap);
        req->r_args.mkdir.mode = cpu_to_le32(mode);
        req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
                             CEPH_CAP_XATTR_EXCL;
@@ -1144,6 +1181,7 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
                     struct dentry *dentry)
 {
        struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_mds_request *req;
        int err;
 
@@ -1161,8 +1199,8 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
        if (err)
                return err;
 
-       dout("link in dir %p %llx.%llx old_dentry %p:'%pd' dentry %p:'%pd'\n",
-            dir, ceph_vinop(dir), old_dentry, old_dentry, dentry, dentry);
+       doutc(cl, "%p %llx.%llx/'%pd' to '%pd'\n", dir, ceph_vinop(dir),
+             old_dentry, dentry);
        req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
        if (IS_ERR(req)) {
                d_drop(dentry);
@@ -1199,14 +1237,16 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
                                 struct ceph_mds_request *req)
 {
        struct dentry *dentry = req->r_dentry;
-       struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
+       struct ceph_client *cl = fsc->client;
        struct ceph_dentry_info *di = ceph_dentry(dentry);
        int result = req->r_err ? req->r_err :
                        le32_to_cpu(req->r_reply_info.head->result);
 
        if (!test_bit(CEPH_DENTRY_ASYNC_UNLINK_BIT, &di->flags))
-               pr_warn("%s dentry %p:%pd async unlink bit is not set\n",
-                       __func__, dentry, dentry);
+               pr_warn_client(cl,
+                       "dentry %p:%pd async unlink bit is not set\n",
+                       dentry, dentry);
 
        spin_lock(&fsc->async_unlink_conflict_lock);
        hash_del_rcu(&di->hnode);
@@ -1226,7 +1266,7 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
        if (result) {
                int pathlen = 0;
                u64 base = 0;
-               char *path = ceph_mdsc_build_path(dentry, &pathlen,
+               char *path = ceph_mdsc_build_path(mdsc, dentry, &pathlen,
                                                  &base, 0);
 
                /* mark error on parent + clear complete */
@@ -1240,8 +1280,8 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
                /* mark inode itself for an error (since metadata is bogus) */
                mapping_set_error(req->r_old_inode->i_mapping, result);
 
-               pr_warn("async unlink failure path=(%llx)%s result=%d!\n",
-                       base, IS_ERR(path) ? "<<bad>>" : path, result);
+               pr_warn_client(cl, "failure path=(%llx)%s result=%d!\n",
+                              base, IS_ERR(path) ? "<<bad>>" : path, result);
                ceph_mdsc_free_path(path, pathlen);
        }
 out:
@@ -1290,7 +1330,8 @@ static int get_caps_for_async_unlink(struct inode *dir, struct dentry *dentry)
  */
 static int ceph_unlink(struct inode *dir, struct dentry *dentry)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dir->i_sb);
+       struct ceph_client *cl = fsc->client;
        struct ceph_mds_client *mdsc = fsc->mdsc;
        struct inode *inode = d_inode(dentry);
        struct ceph_mds_request *req;
@@ -1300,11 +1341,12 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
 
        if (ceph_snap(dir) == CEPH_SNAPDIR) {
                /* rmdir .snap/foo is RMSNAP */
-               dout("rmsnap dir %p '%pd' dn %p\n", dir, dentry, dentry);
+               doutc(cl, "rmsnap %llx.%llx/'%pd' dn\n", ceph_vinop(dir),
+                     dentry);
                op = CEPH_MDS_OP_RMSNAP;
        } else if (ceph_snap(dir) == CEPH_NOSNAP) {
-               dout("unlink/rmdir dir %p dn %p inode %p\n",
-                    dir, dentry, inode);
+               doutc(cl, "unlink/rmdir %llx.%llx/'%pd' inode %llx.%llx\n",
+                     ceph_vinop(dir), dentry, ceph_vinop(inode));
                op = d_is_dir(dentry) ?
                        CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
        } else
@@ -1327,9 +1369,9 @@ retry:
            (req->r_dir_caps = get_caps_for_async_unlink(dir, dentry))) {
                struct ceph_dentry_info *di = ceph_dentry(dentry);
 
-               dout("async unlink on %llu/%.*s caps=%s", ceph_ino(dir),
-                    dentry->d_name.len, dentry->d_name.name,
-                    ceph_cap_string(req->r_dir_caps));
+               doutc(cl, "async unlink on %llx.%llx/'%pd' caps=%s",
+                     ceph_vinop(dir), dentry,
+                     ceph_cap_string(req->r_dir_caps));
                set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
                req->r_callback = ceph_async_unlink_cb;
                req->r_old_inode = d_inode(dentry);
@@ -1384,6 +1426,7 @@ static int ceph_rename(struct mnt_idmap *idmap, struct inode *old_dir,
                       struct dentry *new_dentry, unsigned int flags)
 {
        struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(old_dir->i_sb);
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_mds_request *req;
        int op = CEPH_MDS_OP_RENAME;
        int err;
@@ -1413,8 +1456,9 @@ static int ceph_rename(struct mnt_idmap *idmap, struct inode *old_dir,
        if (err)
                return err;
 
-       dout("rename dir %p dentry %p to dir %p dentry %p\n",
-            old_dir, old_dentry, new_dir, new_dentry);
+       doutc(cl, "%llx.%llx/'%pd' to %llx.%llx/'%pd'\n",
+             ceph_vinop(old_dir), old_dentry, ceph_vinop(new_dir),
+             new_dentry);
        req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
        if (IS_ERR(req))
                return PTR_ERR(req);
@@ -1459,9 +1503,10 @@ static int ceph_rename(struct mnt_idmap *idmap, struct inode *old_dir,
 void __ceph_dentry_lease_touch(struct ceph_dentry_info *di)
 {
        struct dentry *dn = di->dentry;
-       struct ceph_mds_client *mdsc;
+       struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(dn->d_sb)->mdsc;
+       struct ceph_client *cl = mdsc->fsc->client;
 
-       dout("dentry_lease_touch %p %p '%pd'\n", di, dn, dn);
+       doutc(cl, "%p %p '%pd'\n", di, dn, dn);
 
        di->flags |= CEPH_DENTRY_LEASE_LIST;
        if (di->flags & CEPH_DENTRY_SHRINK_LIST) {
@@ -1469,7 +1514,6 @@ void __ceph_dentry_lease_touch(struct ceph_dentry_info *di)
                return;
        }
 
-       mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
        spin_lock(&mdsc->dentry_list_lock);
        list_move_tail(&di->lease_list, &mdsc->dentry_leases);
        spin_unlock(&mdsc->dentry_list_lock);
@@ -1493,10 +1537,10 @@ static void __dentry_dir_lease_touch(struct ceph_mds_client* mdsc,
 void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di)
 {
        struct dentry *dn = di->dentry;
-       struct ceph_mds_client *mdsc;
+       struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(dn->d_sb)->mdsc;
+       struct ceph_client *cl = mdsc->fsc->client;
 
-       dout("dentry_dir_lease_touch %p %p '%pd' (offset 0x%llx)\n",
-            di, dn, dn, di->offset);
+       doutc(cl, "%p %p '%pd' (offset 0x%llx)\n", di, dn, dn, di->offset);
 
        if (!list_empty(&di->lease_list)) {
                if (di->flags & CEPH_DENTRY_LEASE_LIST) {
@@ -1516,7 +1560,6 @@ void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di)
                return;
        }
 
-       mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
        spin_lock(&mdsc->dentry_list_lock);
        __dentry_dir_lease_touch(mdsc, di),
        spin_unlock(&mdsc->dentry_list_lock);
@@ -1530,7 +1573,7 @@ static void __dentry_lease_unlist(struct ceph_dentry_info *di)
        if (list_empty(&di->lease_list))
                return;
 
-       mdsc = ceph_sb_to_client(di->dentry->d_sb)->mdsc;
+       mdsc = ceph_sb_to_fs_client(di->dentry->d_sb)->mdsc;
        spin_lock(&mdsc->dentry_list_lock);
        list_del_init(&di->lease_list);
        spin_unlock(&mdsc->dentry_list_lock);
@@ -1757,6 +1800,8 @@ static int dentry_lease_is_valid(struct dentry *dentry, unsigned int flags)
 {
        struct ceph_dentry_info *di;
        struct ceph_mds_session *session = NULL;
+       struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(dentry->d_sb)->mdsc;
+       struct ceph_client *cl = mdsc->fsc->client;
        u32 seq = 0;
        int valid = 0;
 
@@ -1789,7 +1834,7 @@ static int dentry_lease_is_valid(struct dentry *dentry, unsigned int flags)
                                         CEPH_MDS_LEASE_RENEW, seq);
                ceph_put_mds_session(session);
        }
-       dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
+       doutc(cl, "dentry %p = %d\n", dentry, valid);
        return valid;
 }
 
@@ -1832,6 +1877,7 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry,
                              struct ceph_mds_client *mdsc)
 {
        struct ceph_inode_info *ci = ceph_inode(dir);
+       struct ceph_client *cl = mdsc->fsc->client;
        int valid;
        int shared_gen;
 
@@ -1853,8 +1899,9 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry,
                        valid = 0;
                spin_unlock(&dentry->d_lock);
        }
-       dout("dir_lease_is_valid dir %p v%u dentry %p = %d\n",
-            dir, (unsigned)atomic_read(&ci->i_shared_gen), dentry, valid);
+       doutc(cl, "dir %p %llx.%llx v%u dentry %p '%pd' = %d\n", dir,
+             ceph_vinop(dir), (unsigned)atomic_read(&ci->i_shared_gen),
+             dentry, dentry, valid);
        return valid;
 }
 
@@ -1863,10 +1910,11 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry,
  */
 static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
 {
+       struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(dentry->d_sb)->mdsc;
+       struct ceph_client *cl = mdsc->fsc->client;
        int valid = 0;
        struct dentry *parent;
        struct inode *dir, *inode;
-       struct ceph_mds_client *mdsc;
 
        valid = fscrypt_d_revalidate(dentry, flags);
        if (valid <= 0)
@@ -1884,16 +1932,16 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
                inode = d_inode(dentry);
        }
 
-       dout("d_revalidate %p '%pd' inode %p offset 0x%llx nokey %d\n", dentry,
-            dentry, inode, ceph_dentry(dentry)->offset,
-            !!(dentry->d_flags & DCACHE_NOKEY_NAME));
+       doutc(cl, "%p '%pd' inode %p offset 0x%llx nokey %d\n",
+             dentry, dentry, inode, ceph_dentry(dentry)->offset,
+             !!(dentry->d_flags & DCACHE_NOKEY_NAME));
 
-       mdsc = ceph_sb_to_client(dir->i_sb)->mdsc;
+       mdsc = ceph_sb_to_fs_client(dir->i_sb)->mdsc;
 
        /* always trust cached snapped dentries, snapdir dentry */
        if (ceph_snap(dir) != CEPH_NOSNAP) {
-               dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
-                    dentry, inode);
+               doutc(cl, "%p '%pd' inode %p is SNAPPED\n", dentry,
+                     dentry, inode);
                valid = 1;
        } else if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
                valid = 1;
@@ -1948,14 +1996,14 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
                                break;
                        }
                        ceph_mdsc_put_request(req);
-                       dout("d_revalidate %p lookup result=%d\n",
-                            dentry, err);
+                       doutc(cl, "%p '%pd', lookup result=%d\n", dentry,
+                             dentry, err);
                }
        } else {
                percpu_counter_inc(&mdsc->metric.d_lease_hit);
        }
 
-       dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
+       doutc(cl, "%p '%pd' %s\n", dentry, dentry, valid ? "valid" : "invalid");
        if (!valid)
                ceph_dir_clear_complete(dir);
 
@@ -1995,9 +2043,9 @@ static int ceph_d_delete(const struct dentry *dentry)
 static void ceph_d_release(struct dentry *dentry)
 {
        struct ceph_dentry_info *di = ceph_dentry(dentry);
-       struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
 
-       dout("d_release %p\n", dentry);
+       doutc(fsc->client, "dentry %p '%pd'\n", dentry, dentry);
 
        atomic64_dec(&fsc->mdsc->metric.total_dentries);
 
@@ -2018,10 +2066,12 @@ static void ceph_d_release(struct dentry *dentry)
  */
 static void ceph_d_prune(struct dentry *dentry)
 {
+       struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dentry->d_sb);
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_inode_info *dir_ci;
        struct ceph_dentry_info *di;
 
-       dout("ceph_d_prune %pd %p\n", dentry, dentry);
+       doutc(cl, "dentry %p '%pd'\n", dentry, dentry);
 
        /* do we have a valid parent? */
        if (IS_ROOT(dentry))
@@ -2064,7 +2114,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
        int left;
        const int bufsize = 1024;
 
-       if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
+       if (!ceph_test_mount_opt(ceph_sb_to_fs_client(inode->i_sb), DIRSTAT))
                return -EISDIR;
 
        if (!dfi->dir_info) {
index 8559990a59a5c5a5dcf8acc66abab67770f7c935..726af69d4d62cd7341c0c1aefa46fd553f89ec47 100644 (file)
@@ -36,6 +36,7 @@ struct ceph_nfs_snapfh {
 static int ceph_encode_snapfh(struct inode *inode, u32 *rawfh, int *max_len,
                              struct inode *parent_inode)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        static const int snap_handle_length =
                sizeof(struct ceph_nfs_snapfh) >> 2;
        struct ceph_nfs_snapfh *sfh = (void *)rawfh;
@@ -79,13 +80,14 @@ static int ceph_encode_snapfh(struct inode *inode, u32 *rawfh, int *max_len,
        *max_len = snap_handle_length;
        ret = FILEID_BTRFS_WITH_PARENT;
 out:
-       dout("encode_snapfh %llx.%llx ret=%d\n", ceph_vinop(inode), ret);
+       doutc(cl, "%p %llx.%llx ret=%d\n", inode, ceph_vinop(inode), ret);
        return ret;
 }
 
 static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
                          struct inode *parent_inode)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        static const int handle_length =
                sizeof(struct ceph_nfs_fh) >> 2;
        static const int connected_handle_length =
@@ -105,15 +107,15 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
 
        if (parent_inode) {
                struct ceph_nfs_confh *cfh = (void *)rawfh;
-               dout("encode_fh %llx with parent %llx\n",
-                    ceph_ino(inode), ceph_ino(parent_inode));
+               doutc(cl, "%p %llx.%llx with parent %p %llx.%llx\n", inode,
+                     ceph_vinop(inode), parent_inode, ceph_vinop(parent_inode));
                cfh->ino = ceph_ino(inode);
                cfh->parent_ino = ceph_ino(parent_inode);
                *max_len = connected_handle_length;
                type = FILEID_INO32_GEN_PARENT;
        } else {
                struct ceph_nfs_fh *fh = (void *)rawfh;
-               dout("encode_fh %llx\n", ceph_ino(inode));
+               doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
                fh->ino = ceph_ino(inode);
                *max_len = handle_length;
                type = FILEID_INO32_GEN;
@@ -123,7 +125,7 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
 
 static struct inode *__lookup_inode(struct super_block *sb, u64 ino)
 {
-       struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(sb)->mdsc;
        struct inode *inode;
        struct ceph_vino vino;
        int err;
@@ -205,7 +207,8 @@ static struct dentry *__snapfh_to_dentry(struct super_block *sb,
                                          struct ceph_nfs_snapfh *sfh,
                                          bool want_parent)
 {
-       struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(sb)->mdsc;
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_mds_request *req;
        struct inode *inode;
        struct ceph_vino vino;
@@ -278,11 +281,10 @@ static struct dentry *__snapfh_to_dentry(struct super_block *sb,
        ceph_mdsc_put_request(req);
 
        if (want_parent) {
-               dout("snapfh_to_parent %llx.%llx\n err=%d\n",
-                    vino.ino, vino.snap, err);
+               doutc(cl, "%llx.%llx\n err=%d\n", vino.ino, vino.snap, err);
        } else {
-               dout("snapfh_to_dentry %llx.%llx parent %llx hash %x err=%d",
-                     vino.ino, vino.snap, sfh->parent_ino, sfh->hash, err);
+               doutc(cl, "%llx.%llx parent %llx hash %x err=%d", vino.ino,
+                     vino.snap, sfh->parent_ino, sfh->hash, err);
        }
        if (IS_ERR(inode))
                return ERR_CAST(inode);
@@ -297,6 +299,7 @@ static struct dentry *ceph_fh_to_dentry(struct super_block *sb,
                                        struct fid *fid,
                                        int fh_len, int fh_type)
 {
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
        struct ceph_nfs_fh *fh = (void *)fid->raw;
 
        if (fh_type == FILEID_BTRFS_WITH_PARENT) {
@@ -310,14 +313,14 @@ static struct dentry *ceph_fh_to_dentry(struct super_block *sb,
        if (fh_len < sizeof(*fh) / 4)
                return NULL;
 
-       dout("fh_to_dentry %llx\n", fh->ino);
+       doutc(fsc->client, "%llx\n", fh->ino);
        return __fh_to_dentry(sb, fh->ino);
 }
 
 static struct dentry *__get_parent(struct super_block *sb,
                                   struct dentry *child, u64 ino)
 {
-       struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(sb)->mdsc;
        struct ceph_mds_request *req;
        struct inode *inode;
        int mask;
@@ -363,6 +366,7 @@ static struct dentry *__get_parent(struct super_block *sb,
 static struct dentry *ceph_get_parent(struct dentry *child)
 {
        struct inode *inode = d_inode(child);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct dentry *dn;
 
        if (ceph_snap(inode) != CEPH_NOSNAP) {
@@ -402,8 +406,8 @@ static struct dentry *ceph_get_parent(struct dentry *child)
                dn = __get_parent(child->d_sb, child, 0);
        }
 out:
-       dout("get_parent %p ino %llx.%llx err=%ld\n",
-            child, ceph_vinop(inode), (long)PTR_ERR_OR_ZERO(dn));
+       doutc(cl, "child %p %p %llx.%llx err=%ld\n", child, inode,
+             ceph_vinop(inode), (long)PTR_ERR_OR_ZERO(dn));
        return dn;
 }
 
@@ -414,6 +418,7 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
                                        struct fid *fid,
                                        int fh_len, int fh_type)
 {
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
        struct ceph_nfs_confh *cfh = (void *)fid->raw;
        struct dentry *dentry;
 
@@ -427,7 +432,7 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
        if (fh_len < sizeof(*cfh) / 4)
                return NULL;
 
-       dout("fh_to_parent %llx\n", cfh->parent_ino);
+       doutc(fsc->client, "%llx\n", cfh->parent_ino);
        dentry = __get_parent(sb, NULL, cfh->ino);
        if (unlikely(dentry == ERR_PTR(-ENOENT)))
                dentry = __fh_to_dentry(sb, cfh->parent_ino);
@@ -439,7 +444,7 @@ static int __get_snap_name(struct dentry *parent, char *name,
 {
        struct inode *inode = d_inode(child);
        struct inode *dir = d_inode(parent);
-       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
        struct ceph_mds_request *req = NULL;
        char *last_name = NULL;
        unsigned next_offset = 2;
@@ -526,8 +531,8 @@ out:
        if (req)
                ceph_mdsc_put_request(req);
        kfree(last_name);
-       dout("get_snap_name %p ino %llx.%llx err=%d\n",
-            child, ceph_vinop(inode), err);
+       doutc(fsc->client, "child dentry %p %p %llx.%llx err=%d\n", child,
+             inode, ceph_vinop(inode), err);
        return err;
 }
 
@@ -544,7 +549,7 @@ static int ceph_get_name(struct dentry *parent, char *name,
        if (ceph_snap(inode) != CEPH_NOSNAP)
                return __get_snap_name(parent, name, child);
 
-       mdsc = ceph_inode_to_client(inode)->mdsc;
+       mdsc = ceph_inode_to_fs_client(inode)->mdsc;
        req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPNAME,
                                       USE_ANY_MDS);
        if (IS_ERR(req))
@@ -588,9 +593,9 @@ static int ceph_get_name(struct dentry *parent, char *name,
                ceph_fname_free_buffer(dir, &oname);
        }
 out:
-       dout("get_name %p ino %llx.%llx err %d %s%s\n",
-                    child, ceph_vinop(inode), err,
-                    err ? "" : "name ", err ? "" : name);
+       doutc(mdsc->fsc->client, "child dentry %p %p %llx.%llx err %d %s%s\n",
+             child, inode, ceph_vinop(inode), err, err ? "" : "name ",
+             err ? "" : name);
        ceph_mdsc_put_request(req);
        return err;
 }
index 649600d0a7b6c08296154a7756a62423679ad184..3b5aae29e94478ced4e2a962bad1fa26f63b50cd 100644 (file)
@@ -19,8 +19,9 @@
 #include "io.h"
 #include "metric.h"
 
-static __le32 ceph_flags_sys2wire(u32 flags)
+static __le32 ceph_flags_sys2wire(struct ceph_mds_client *mdsc, u32 flags)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        u32 wire_flags = 0;
 
        switch (flags & O_ACCMODE) {
@@ -48,7 +49,7 @@ static __le32 ceph_flags_sys2wire(u32 flags)
 #undef ceph_sys2wire
 
        if (flags)
-               dout("unused open flags: %x\n", flags);
+               doutc(cl, "unused open flags: %x\n", flags);
 
        return cpu_to_le32(wire_flags);
 }
@@ -189,7 +190,7 @@ prepare_open_request(struct super_block *sb, int flags, int create_mode)
        if (IS_ERR(req))
                goto out;
        req->r_fmode = ceph_flags_to_mode(flags);
-       req->r_args.open.flags = ceph_flags_sys2wire(flags);
+       req->r_args.open.flags = ceph_flags_sys2wire(mdsc, flags);
        req->r_args.open.mode = cpu_to_le32(create_mode);
 out:
        return req;
@@ -200,12 +201,13 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_mount_options *opt =
-               ceph_inode_to_client(&ci->netfs.inode)->mount_options;
+               ceph_inode_to_fs_client(&ci->netfs.inode)->mount_options;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_file_info *fi;
        int ret;
 
-       dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
-                       inode->i_mode, isdir ? "dir" : "regular");
+       doutc(cl, "%p %llx.%llx %p 0%o (%s)\n", inode, ceph_vinop(inode),
+             file, inode->i_mode, isdir ? "dir" : "regular");
        BUG_ON(inode->i_fop->release != ceph_release);
 
        if (isdir) {
@@ -234,7 +236,7 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
 
        spin_lock_init(&fi->rw_contexts_lock);
        INIT_LIST_HEAD(&fi->rw_contexts);
-       fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
+       fi->filp_gen = READ_ONCE(ceph_inode_to_fs_client(inode)->filp_gen);
 
        if ((file->f_mode & FMODE_WRITE) && ceph_has_inline_data(ci)) {
                ret = ceph_uninline_data(file);
@@ -259,6 +261,7 @@ error:
  */
 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        int ret = 0;
 
        switch (inode->i_mode & S_IFMT) {
@@ -271,13 +274,13 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
                break;
 
        case S_IFLNK:
-               dout("init_file %p %p 0%o (symlink)\n", inode, file,
-                    inode->i_mode);
+               doutc(cl, "%p %llx.%llx %p 0%o (symlink)\n", inode,
+                     ceph_vinop(inode), file, inode->i_mode);
                break;
 
        default:
-               dout("init_file %p %p 0%o (special)\n", inode, file,
-                    inode->i_mode);
+               doutc(cl, "%p %llx.%llx %p 0%o (special)\n", inode,
+                     ceph_vinop(inode), file, inode->i_mode);
                /*
                 * we need to drop the open ref now, since we don't
                 * have .release set to ceph_release.
@@ -296,6 +299,7 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
 int ceph_renew_caps(struct inode *inode, int fmode)
 {
        struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_mds_request *req;
        int err, flags, wanted;
@@ -307,8 +311,9 @@ int ceph_renew_caps(struct inode *inode, int fmode)
            (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
                int issued = __ceph_caps_issued(ci, NULL);
                spin_unlock(&ci->i_ceph_lock);
-               dout("renew caps %p want %s issued %s updating mds_wanted\n",
-                    inode, ceph_cap_string(wanted), ceph_cap_string(issued));
+               doutc(cl, "%p %llx.%llx want %s issued %s updating mds_wanted\n",
+                     inode, ceph_vinop(inode), ceph_cap_string(wanted),
+                     ceph_cap_string(issued));
                ceph_check_caps(ci, 0);
                return 0;
        }
@@ -339,7 +344,8 @@ int ceph_renew_caps(struct inode *inode, int fmode)
        err = ceph_mdsc_do_request(mdsc, NULL, req);
        ceph_mdsc_put_request(req);
 out:
-       dout("renew caps %p open result=%d\n", inode, err);
+       doutc(cl, "%p %llx.%llx open result=%d\n", inode, ceph_vinop(inode),
+             err);
        return err < 0 ? err : 0;
 }
 
@@ -352,7 +358,8 @@ out:
 int ceph_open(struct inode *inode, struct file *file)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
+       struct ceph_client *cl = fsc->client;
        struct ceph_mds_client *mdsc = fsc->mdsc;
        struct ceph_mds_request *req;
        struct ceph_file_info *fi = file->private_data;
@@ -360,7 +367,7 @@ int ceph_open(struct inode *inode, struct file *file)
        int flags, fmode, wanted;
 
        if (fi) {
-               dout("open file %p is already opened\n", file);
+               doutc(cl, "file %p is already opened\n", file);
                return 0;
        }
 
@@ -374,8 +381,8 @@ int ceph_open(struct inode *inode, struct file *file)
                        return err;
        }
 
-       dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
-            ceph_vinop(inode), file, flags, file->f_flags);
+       doutc(cl, "%p %llx.%llx file %p flags %d (%d)\n", inode,
+             ceph_vinop(inode), file, flags, file->f_flags);
        fmode = ceph_flags_to_mode(flags);
        wanted = ceph_caps_for_mode(fmode);
 
@@ -399,9 +406,9 @@ int ceph_open(struct inode *inode, struct file *file)
                int mds_wanted = __ceph_caps_mds_wanted(ci, true);
                int issued = __ceph_caps_issued(ci, NULL);
 
-               dout("open %p fmode %d want %s issued %s using existing\n",
-                    inode, fmode, ceph_cap_string(wanted),
-                    ceph_cap_string(issued));
+               doutc(cl, "open %p fmode %d want %s issued %s using existing\n",
+                     inode, fmode, ceph_cap_string(wanted),
+                     ceph_cap_string(issued));
                __ceph_touch_fmode(ci, mdsc, fmode);
                spin_unlock(&ci->i_ceph_lock);
 
@@ -421,7 +428,7 @@ int ceph_open(struct inode *inode, struct file *file)
 
        spin_unlock(&ci->i_ceph_lock);
 
-       dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
+       doutc(cl, "open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
        req = prepare_open_request(inode->i_sb, flags, 0);
        if (IS_ERR(req)) {
                err = PTR_ERR(req);
@@ -435,7 +442,7 @@ int ceph_open(struct inode *inode, struct file *file)
        if (!err)
                err = ceph_init_file(inode, file, req->r_fmode);
        ceph_mdsc_put_request(req);
-       dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
+       doutc(cl, "open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
 out:
        return err;
 }
@@ -515,6 +522,7 @@ no_async:
 
 static void restore_deleg_ino(struct inode *dir, u64 ino)
 {
+       struct ceph_client *cl = ceph_inode_to_client(dir);
        struct ceph_inode_info *ci = ceph_inode(dir);
        struct ceph_mds_session *s = NULL;
 
@@ -525,7 +533,8 @@ static void restore_deleg_ino(struct inode *dir, u64 ino)
        if (s) {
                int err = ceph_restore_deleg_ino(s, ino);
                if (err)
-                       pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n",
+                       pr_warn_client(cl,
+                               "unable to restore delegated ino 0x%llx to session: %d\n",
                                ino, err);
                ceph_put_mds_session(s);
        }
@@ -557,6 +566,7 @@ static void wake_async_create_waiters(struct inode *inode,
 static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
                                  struct ceph_mds_request *req)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct dentry *dentry = req->r_dentry;
        struct inode *dinode = d_inode(dentry);
        struct inode *tinode = req->r_target_inode;
@@ -574,10 +584,11 @@ static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
        if (result) {
                int pathlen = 0;
                u64 base = 0;
-               char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
+               char *path = ceph_mdsc_build_path(mdsc, req->r_dentry, &pathlen,
                                                  &base, 0);
 
-               pr_warn("async create failure path=(%llx)%s result=%d!\n",
+               pr_warn_client(cl,
+                       "async create failure path=(%llx)%s result=%d!\n",
                        base, IS_ERR(path) ? "<<bad>>" : path, result);
                ceph_mdsc_free_path(path, pathlen);
 
@@ -596,14 +607,15 @@ static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
                u64 ino = ceph_vino(tinode).ino;
 
                if (req->r_deleg_ino != ino)
-                       pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
-                               __func__, req->r_err, req->r_deleg_ino, ino);
+                       pr_warn_client(cl,
+                               "inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
+                               req->r_err, req->r_deleg_ino, ino);
 
                mapping_set_error(tinode->i_mapping, result);
                wake_async_create_waiters(tinode, req->r_session);
        } else if (!result) {
-               pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
-                       req->r_deleg_ino);
+               pr_warn_client(cl, "no req->r_target_inode for 0x%llx\n",
+                              req->r_deleg_ino);
        }
 out:
        ceph_mdsc_release_dir_caps(req);
@@ -625,6 +637,7 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
        struct timespec64 now;
        struct ceph_string *pool_ns;
        struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_vino vino = { .ino = req->r_deleg_ino,
                                  .snap = CEPH_NOSNAP };
 
@@ -655,7 +668,9 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
        in.truncate_seq = cpu_to_le32(1);
        in.truncate_size = cpu_to_le64(-1ULL);
        in.xattr_version = cpu_to_le64(1);
-       in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
+       in.uid = cpu_to_le32(from_kuid(&init_user_ns,
+                                      mapped_fsuid(req->r_mnt_idmap,
+                                                   &init_user_ns)));
        if (dir->i_mode & S_ISGID) {
                in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_gid));
 
@@ -663,7 +678,9 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
                if (S_ISDIR(mode))
                        mode |= S_ISGID;
        } else {
-               in.gid = cpu_to_le32(from_kgid(&init_user_ns, current_fsgid()));
+               in.gid = cpu_to_le32(from_kgid(&init_user_ns,
+                                    mapped_fsgid(req->r_mnt_idmap,
+                                                 &init_user_ns)));
        }
        in.mode = cpu_to_le32((u32)mode);
 
@@ -683,7 +700,7 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
                              req->r_fmode, NULL);
        up_read(&mdsc->snap_rwsem);
        if (ret) {
-               dout("%s failed to fill inode: %d\n", __func__, ret);
+               doutc(cl, "failed to fill inode: %d\n", ret);
                ceph_dir_clear_complete(dir);
                if (!d_unhashed(dentry))
                        d_drop(dentry);
@@ -691,8 +708,8 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
        } else {
                struct dentry *dn;
 
-               dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__,
-                       vino.ino, ceph_ino(dir), dentry->d_name.name);
+               doutc(cl, "d_adding new inode 0x%llx to 0x%llx/%s\n",
+                     vino.ino, ceph_ino(dir), dentry->d_name.name);
                ceph_dir_clear_ordered(dir);
                ceph_init_inode_acls(inode, as_ctx);
                if (inode->i_state & I_NEW) {
@@ -730,7 +747,9 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
                     struct file *file, unsigned flags, umode_t mode)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
+       struct mnt_idmap *idmap = file_mnt_idmap(file);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dir->i_sb);
+       struct ceph_client *cl = fsc->client;
        struct ceph_mds_client *mdsc = fsc->mdsc;
        struct ceph_mds_request *req;
        struct inode *new_inode = NULL;
@@ -740,9 +759,9 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
        int mask;
        int err;
 
-       dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
-            dir, dentry, dentry,
-            d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
+       doutc(cl, "%p %llx.%llx dentry %p '%pd' %s flags %d mode 0%o\n",
+             dir, ceph_vinop(dir), dentry, dentry,
+             d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
 
        if (dentry->d_name.len > NAME_MAX)
                return -ENAMETOOLONG;
@@ -788,6 +807,8 @@ retry:
                mask |= CEPH_CAP_XATTR_SHARED;
        req->r_args.open.mask = cpu_to_le32(mask);
        req->r_parent = dir;
+       if (req->r_op == CEPH_MDS_OP_CREATE)
+               req->r_mnt_idmap = mnt_idmap_get(idmap);
        ihold(dir);
        if (IS_ENCRYPTED(dir)) {
                set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
@@ -880,17 +901,18 @@ retry:
                goto out_req;
        if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
                /* make vfs retry on splice, ENOENT, or symlink */
-               dout("atomic_open finish_no_open on dn %p\n", dn);
+               doutc(cl, "finish_no_open on dn %p\n", dn);
                err = finish_no_open(file, dn);
        } else {
                if (IS_ENCRYPTED(dir) &&
                    !fscrypt_has_permitted_context(dir, d_inode(dentry))) {
-                       pr_warn("Inconsistent encryption context (parent %llx:%llx child %llx:%llx)\n",
+                       pr_warn_client(cl,
+                               "Inconsistent encryption context (parent %llx:%llx child %llx:%llx)\n",
                                ceph_vinop(dir), ceph_vinop(d_inode(dentry)));
                        goto out_req;
                }
 
-               dout("atomic_open finish_open on dn %p\n", dn);
+               doutc(cl, "finish_open on dn %p\n", dn);
                if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
                        struct inode *newino = d_inode(dentry);
 
@@ -905,17 +927,19 @@ out_req:
        iput(new_inode);
 out_ctx:
        ceph_release_acl_sec_ctx(&as_ctx);
-       dout("atomic_open result=%d\n", err);
+       doutc(cl, "result=%d\n", err);
        return err;
 }
 
 int ceph_release(struct inode *inode, struct file *file)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
 
        if (S_ISDIR(inode->i_mode)) {
                struct ceph_dir_file_info *dfi = file->private_data;
-               dout("release inode %p dir file %p\n", inode, file);
+               doutc(cl, "%p %llx.%llx dir file %p\n", inode,
+                     ceph_vinop(inode), file);
                WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
 
                ceph_put_fmode(ci, dfi->file_info.fmode, 1);
@@ -927,7 +951,8 @@ int ceph_release(struct inode *inode, struct file *file)
                kmem_cache_free(ceph_dir_file_cachep, dfi);
        } else {
                struct ceph_file_info *fi = file->private_data;
-               dout("release inode %p regular file %p\n", inode, file);
+               doutc(cl, "%p %llx.%llx regular file %p\n", inode,
+                     ceph_vinop(inode), file);
                WARN_ON(!list_empty(&fi->rw_contexts));
 
                ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
@@ -962,7 +987,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
                         u64 *last_objver)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+       struct ceph_client *cl = fsc->client;
        struct ceph_osd_client *osdc = &fsc->client->osdc;
        ssize_t ret;
        u64 off = *ki_pos;
@@ -971,7 +997,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
        bool sparse = IS_ENCRYPTED(inode) || ceph_test_mount_opt(fsc, SPARSEREAD);
        u64 objver = 0;
 
-       dout("sync_read on inode %p %llx~%llx\n", inode, *ki_pos, len);
+       doutc(cl, "on inode %p %llx.%llx %llx~%llx\n", inode,
+             ceph_vinop(inode), *ki_pos, len);
 
        if (ceph_inode_is_shutdown(inode))
                return -EIO;
@@ -1005,8 +1032,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
                /* determine new offset/length if encrypted */
                ceph_fscrypt_adjust_off_and_len(inode, &read_off, &read_len);
 
-               dout("sync_read orig %llu~%llu reading %llu~%llu",
-                    off, len, read_off, read_len);
+               doutc(cl, "orig %llu~%llu reading %llu~%llu", off, len,
+                     read_off, read_len);
 
                req = ceph_osdc_new_request(osdc, &ci->i_layout,
                                        ci->i_vino, read_off, &read_len, 0, 1,
@@ -1059,8 +1086,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
                        objver = req->r_version;
 
                i_size = i_size_read(inode);
-               dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
-                    off, len, ret, i_size, (more ? " MORE" : ""));
+               doutc(cl, "%llu~%llu got %zd i_size %llu%s\n", off, len,
+                     ret, i_size, (more ? " MORE" : ""));
 
                /* Fix it to go to end of extent map */
                if (sparse && ret >= 0)
@@ -1101,8 +1128,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
                        int zlen = min(len - ret, i_size - off - ret);
                        int zoff = page_off + ret;
 
-                       dout("sync_read zero gap %llu~%llu\n",
-                               off + ret, off + ret + zlen);
+                       doutc(cl, "zero gap %llu~%llu\n", off + ret,
+                             off + ret + zlen);
                        ceph_zero_page_vector_range(zoff, zlen, pages);
                        ret += zlen;
                }
@@ -1151,7 +1178,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
                if (last_objver)
                        *last_objver = objver;
        }
-       dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
+       doutc(cl, "result %zd retry_op %d\n", ret, *retry_op);
        return ret;
 }
 
@@ -1160,9 +1187,11 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
 
-       dout("sync_read on file %p %llx~%zx %s\n", file, iocb->ki_pos,
-            iov_iter_count(to), (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
+       doutc(cl, "on file %p %llx~%zx %s\n", file, iocb->ki_pos,
+             iov_iter_count(to),
+             (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
 
        return __ceph_sync_read(inode, &iocb->ki_pos, to, retry_op, NULL);
 }
@@ -1190,6 +1219,7 @@ static void ceph_aio_retry_work(struct work_struct *work);
 static void ceph_aio_complete(struct inode *inode,
                              struct ceph_aio_request *aio_req)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
        int ret;
 
@@ -1203,7 +1233,7 @@ static void ceph_aio_complete(struct inode *inode,
        if (!ret)
                ret = aio_req->total_len;
 
-       dout("ceph_aio_complete %p rc %d\n", inode, ret);
+       doutc(cl, "%p %llx.%llx rc %d\n", inode, ceph_vinop(inode), ret);
 
        if (ret >= 0 && aio_req->write) {
                int dirty;
@@ -1242,11 +1272,13 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
        struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric;
        unsigned int len = osd_data->bvec_pos.iter.bi_size;
        bool sparse = (op->op == CEPH_OSD_OP_SPARSE_READ);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
 
        BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
        BUG_ON(!osd_data->num_bvecs);
 
-       dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, len);
+       doutc(cl, "req %p inode %p %llx.%llx, rc %d bytes %u\n", req,
+             inode, ceph_vinop(inode), rc, len);
 
        if (rc == -EOLDSNAPC) {
                struct ceph_aio_work *aio_work;
@@ -1256,7 +1288,7 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
                if (aio_work) {
                        INIT_WORK(&aio_work->work, ceph_aio_retry_work);
                        aio_work->req = req;
-                       queue_work(ceph_inode_to_client(inode)->inode_wq,
+                       queue_work(ceph_inode_to_fs_client(inode)->inode_wq,
                                   &aio_work->work);
                        return;
                }
@@ -1386,7 +1418,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
        struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+       struct ceph_client *cl = fsc->client;
        struct ceph_client_metric *metric = &fsc->mdsc->metric;
        struct ceph_vino vino;
        struct ceph_osd_request *req;
@@ -1405,9 +1438,9 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
        if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
                return -EROFS;
 
-       dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
-            (write ? "write" : "read"), file, pos, (unsigned)count,
-            snapc, snapc ? snapc->seq : 0);
+       doutc(cl, "sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
+             (write ? "write" : "read"), file, pos, (unsigned)count,
+             snapc, snapc ? snapc->seq : 0);
 
        if (write) {
                int ret2;
@@ -1418,7 +1451,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
                                        pos >> PAGE_SHIFT,
                                        (pos + count - 1) >> PAGE_SHIFT);
                if (ret2 < 0)
-                       dout("invalidate_inode_pages2_range returned %d\n", ret2);
+                       doutc(cl, "invalidate_inode_pages2_range returned %d\n",
+                             ret2);
 
                flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
        } else {
@@ -1610,7 +1644,8 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
        struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+       struct ceph_client *cl = fsc->client;
        struct ceph_osd_client *osdc = &fsc->client->osdc;
        struct ceph_osd_request *req;
        struct page **pages;
@@ -1625,8 +1660,8 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
        if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
                return -EROFS;
 
-       dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
-            file, pos, (unsigned)count, snapc, snapc->seq);
+       doutc(cl, "on file %p %lld~%u snapc %p seq %lld\n", file, pos,
+             (unsigned)count, snapc, snapc->seq);
 
        ret = filemap_write_and_wait_range(inode->i_mapping,
                                           pos, pos + count - 1);
@@ -1670,9 +1705,9 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
                last = (pos + len) != (write_pos + write_len);
                rmw = first || last;
 
-               dout("sync_write ino %llx %lld~%llu adjusted %lld~%llu -- %srmw\n",
-                    ci->i_vino.ino, pos, len, write_pos, write_len,
-                    rmw ? "" : "no ");
+               doutc(cl, "ino %llx %lld~%llu adjusted %lld~%llu -- %srmw\n",
+                     ci->i_vino.ino, pos, len, write_pos, write_len,
+                     rmw ? "" : "no ");
 
                /*
                 * The data is emplaced into the page as it would be if it were
@@ -1881,7 +1916,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
                        left -= ret;
                }
                if (ret < 0) {
-                       dout("sync_write write failed with %d\n", ret);
+                       doutc(cl, "write failed with %d\n", ret);
                        ceph_release_page_vector(pages, num_pages);
                        break;
                }
@@ -1891,7 +1926,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
                                                         write_pos, write_len,
                                                         GFP_KERNEL);
                        if (ret < 0) {
-                               dout("encryption failed with %d\n", ret);
+                               doutc(cl, "encryption failed with %d\n", ret);
                                ceph_release_page_vector(pages, num_pages);
                                break;
                        }
@@ -1910,7 +1945,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
                        break;
                }
 
-               dout("sync_write write op %lld~%llu\n", write_pos, write_len);
+               doutc(cl, "write op %lld~%llu\n", write_pos, write_len);
                osd_req_op_extent_osd_data_pages(req, rmw ? 1 : 0, pages, write_len,
                                                 offset_in_page(write_pos), false,
                                                 true);
@@ -1941,7 +1976,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
                                          req->r_end_latency, len, ret);
                ceph_osdc_put_request(req);
                if (ret != 0) {
-                       dout("sync_write osd write returned %d\n", ret);
+                       doutc(cl, "osd write returned %d\n", ret);
                        /* Version changed! Must re-do the rmw cycle */
                        if ((assert_ver && (ret == -ERANGE || ret == -EOVERFLOW)) ||
                            (!assert_ver && ret == -EEXIST)) {
@@ -1971,13 +2006,13 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
                                pos >> PAGE_SHIFT,
                                (pos + len - 1) >> PAGE_SHIFT);
                if (ret < 0) {
-                       dout("invalidate_inode_pages2_range returned %d\n",
-                            ret);
+                       doutc(cl, "invalidate_inode_pages2_range returned %d\n",
+                             ret);
                        ret = 0;
                }
                pos += len;
                written += len;
-               dout("sync_write written %d\n", written);
+               doutc(cl, "written %d\n", written);
                if (pos > i_size_read(inode)) {
                        check_caps = ceph_inode_set_size(inode, pos);
                        if (check_caps)
@@ -1991,7 +2026,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
                ret = written;
                iocb->ki_pos = pos;
        }
-       dout("sync_write returning %d\n", ret);
+       doutc(cl, "returning %d\n", ret);
        return ret;
 }
 
@@ -2010,13 +2045,14 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
        struct inode *inode = file_inode(filp);
        struct ceph_inode_info *ci = ceph_inode(inode);
        bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        ssize_t ret;
        int want = 0, got = 0;
        int retry_op = 0, read = 0;
 
 again:
-       dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
-            inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
+       doutc(cl, "%llu~%u trying to get caps on %p %llx.%llx\n",
+             iocb->ki_pos, (unsigned)len, inode, ceph_vinop(inode));
 
        if (ceph_inode_is_shutdown(inode))
                return -ESTALE;
@@ -2044,9 +2080,9 @@ again:
            (iocb->ki_flags & IOCB_DIRECT) ||
            (fi->flags & CEPH_F_SYNC)) {
 
-               dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
-                    inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
-                    ceph_cap_string(got));
+               doutc(cl, "sync %p %llx.%llx %llu~%u got cap refs on %s\n",
+                     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
+                     ceph_cap_string(got));
 
                if (!ceph_has_inline_data(ci)) {
                        if (!retry_op &&
@@ -2064,16 +2100,16 @@ again:
                }
        } else {
                CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
-               dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
-                    inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
-                    ceph_cap_string(got));
+               doutc(cl, "async %p %llx.%llx %llu~%u got cap refs on %s\n",
+                     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
+                     ceph_cap_string(got));
                ceph_add_rw_context(fi, &rw_ctx);
                ret = generic_file_read_iter(iocb, to);
                ceph_del_rw_context(fi, &rw_ctx);
        }
 
-       dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
-            inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
+       doutc(cl, "%p %llx.%llx dropping cap refs on %s = %d\n",
+             inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
        ceph_put_cap_refs(ci, got);
 
        if (direct_lock)
@@ -2133,8 +2169,8 @@ again:
                /* hit EOF or hole? */
                if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
                    ret < len) {
-                       dout("sync_read hit hole, ppos %lld < size %lld"
-                            ", reading more\n", iocb->ki_pos, i_size);
+                       doutc(cl, "hit hole, ppos %lld < size %lld, reading more\n",
+                             iocb->ki_pos, i_size);
 
                        read += ret;
                        len -= ret;
@@ -2228,7 +2264,8 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
        struct ceph_file_info *fi = file->private_data;
        struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+       struct ceph_client *cl = fsc->client;
        struct ceph_osd_client *osdc = &fsc->client->osdc;
        struct ceph_cap_flush *prealloc_cf;
        ssize_t count, written = 0;
@@ -2296,8 +2333,9 @@ retry_snap:
        if (err)
                goto out;
 
-       dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
-            inode, ceph_vinop(inode), pos, count, i_size_read(inode));
+       doutc(cl, "%p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
+             inode, ceph_vinop(inode), pos, count,
+             i_size_read(inode));
        if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
                want |= CEPH_CAP_FILE_BUFFER;
        if (fi->fmode & CEPH_FILE_MODE_LAZY)
@@ -2313,8 +2351,8 @@ retry_snap:
 
        inode_inc_iversion_raw(inode);
 
-       dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
-            inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
+       doutc(cl, "%p %llx.%llx %llu~%zd got cap refs on %s\n",
+             inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
 
        if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
            (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
@@ -2374,14 +2412,14 @@ retry_snap:
                        ceph_check_caps(ci, CHECK_CAPS_FLUSH);
        }
 
-       dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
-            inode, ceph_vinop(inode), pos, (unsigned)count,
-            ceph_cap_string(got));
+       doutc(cl, "%p %llx.%llx %llu~%u  dropping cap refs on %s\n",
+             inode, ceph_vinop(inode), pos, (unsigned)count,
+             ceph_cap_string(got));
        ceph_put_cap_refs(ci, got);
 
        if (written == -EOLDSNAPC) {
-               dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
-                    inode, ceph_vinop(inode), pos, (unsigned)count);
+               doutc(cl, "%p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
+                     inode, ceph_vinop(inode), pos, (unsigned)count);
                goto retry_snap;
        }
 
@@ -2462,7 +2500,7 @@ static int ceph_zero_partial_object(struct inode *inode,
                                    loff_t offset, loff_t *length)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
        struct ceph_osd_request *req;
        int ret = 0;
        loff_t zero = 0;
@@ -2553,14 +2591,15 @@ static long ceph_fallocate(struct file *file, int mode,
        struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_cap_flush *prealloc_cf;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        int want, got = 0;
        int dirty;
        int ret = 0;
        loff_t endoff = 0;
        loff_t size;
 
-       dout("%s %p %llx.%llx mode %x, offset %llu length %llu\n", __func__,
-            inode, ceph_vinop(inode), mode, offset, length);
+       doutc(cl, "%p %llx.%llx mode %x, offset %llu length %llu\n",
+             inode, ceph_vinop(inode), mode, offset, length);
 
        if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
                return -EOPNOTSUPP;
@@ -2689,6 +2728,7 @@ static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
 static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
                           loff_t src_off, loff_t dst_off, size_t len)
 {
+       struct ceph_client *cl = ceph_inode_to_client(src_inode);
        loff_t size, endoff;
 
        size = i_size_read(src_inode);
@@ -2699,8 +2739,8 @@ static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
         * inode.
         */
        if (src_off + len > size) {
-               dout("Copy beyond EOF (%llu + %zu > %llu)\n",
-                    src_off, len, size);
+               doutc(cl, "Copy beyond EOF (%llu + %zu > %llu)\n", src_off,
+                     len, size);
                return -EOPNOTSUPP;
        }
        size = i_size_read(dst_inode);
@@ -2776,6 +2816,7 @@ static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off
        u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
        u32 src_objlen, dst_objlen;
        u32 object_size = src_ci->i_layout.object_size;
+       struct ceph_client *cl = fsc->client;
        int ret;
 
        src_oloc.pool = src_ci->i_layout.pool_id;
@@ -2817,9 +2858,10 @@ static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off
                if (ret) {
                        if (ret == -EOPNOTSUPP) {
                                fsc->have_copy_from2 = false;
-                               pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
+                               pr_notice_client(cl,
+                                       "OSDs don't support copy-from2; disabling copy offload\n");
                        }
-                       dout("ceph_osdc_copy_from returned %d\n", ret);
+                       doutc(cl, "returned %d\n", ret);
                        if (!bytes)
                                bytes = ret;
                        goto out;
@@ -2845,7 +2887,8 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
        struct ceph_inode_info *src_ci = ceph_inode(src_inode);
        struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
        struct ceph_cap_flush *prealloc_cf;
-       struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
+       struct ceph_fs_client *src_fsc = ceph_inode_to_fs_client(src_inode);
+       struct ceph_client *cl = src_fsc->client;
        loff_t size;
        ssize_t ret = -EIO, bytes;
        u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
@@ -2853,7 +2896,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
        int src_got = 0, dst_got = 0, err, dirty;
 
        if (src_inode->i_sb != dst_inode->i_sb) {
-               struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
+               struct ceph_fs_client *dst_fsc = ceph_inode_to_fs_client(dst_inode);
 
                if (ceph_fsid_compare(&src_fsc->client->fsid,
                                      &dst_fsc->client->fsid)) {
@@ -2888,7 +2931,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
            (src_ci->i_layout.stripe_count != 1) ||
            (dst_ci->i_layout.stripe_count != 1) ||
            (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
-               dout("Invalid src/dst files layout\n");
+               doutc(cl, "Invalid src/dst files layout\n");
                return -EOPNOTSUPP;
        }
 
@@ -2906,12 +2949,12 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
        /* Start by sync'ing the source and destination files */
        ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
        if (ret < 0) {
-               dout("failed to write src file (%zd)\n", ret);
+               doutc(cl, "failed to write src file (%zd)\n", ret);
                goto out;
        }
        ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
        if (ret < 0) {
-               dout("failed to write dst file (%zd)\n", ret);
+               doutc(cl, "failed to write dst file (%zd)\n", ret);
                goto out;
        }
 
@@ -2923,7 +2966,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
        err = get_rd_wr_caps(src_file, &src_got,
                             dst_file, (dst_off + len), &dst_got);
        if (err < 0) {
-               dout("get_rd_wr_caps returned %d\n", err);
+               doutc(cl, "get_rd_wr_caps returned %d\n", err);
                ret = -EOPNOTSUPP;
                goto out;
        }
@@ -2938,7 +2981,8 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
                                            dst_off >> PAGE_SHIFT,
                                            (dst_off + len) >> PAGE_SHIFT);
        if (ret < 0) {
-               dout("Failed to invalidate inode pages (%zd)\n", ret);
+               doutc(cl, "Failed to invalidate inode pages (%zd)\n",
+                           ret);
                ret = 0; /* XXX */
        }
        ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
@@ -2959,7 +3003,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
         * starting at the src_off
         */
        if (src_objoff) {
-               dout("Initial partial copy of %u bytes\n", src_objlen);
+               doutc(cl, "Initial partial copy of %u bytes\n", src_objlen);
 
                /*
                 * we need to temporarily drop all caps as we'll be calling
@@ -2970,7 +3014,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
                                       &dst_off, src_objlen, flags);
                /* Abort on short copies or on error */
                if (ret < (long)src_objlen) {
-                       dout("Failed partial copy (%zd)\n", ret);
+                       doutc(cl, "Failed partial copy (%zd)\n", ret);
                        goto out;
                }
                len -= ret;
@@ -2992,7 +3036,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
                        ret = bytes;
                goto out_caps;
        }
-       dout("Copied %zu bytes out of %zu\n", bytes, len);
+       doutc(cl, "Copied %zu bytes out of %zu\n", bytes, len);
        len -= bytes;
        ret += bytes;
 
@@ -3020,13 +3064,13 @@ out_caps:
         * there were errors in remote object copies (len >= object_size).
         */
        if (len && (len < src_ci->i_layout.object_size)) {
-               dout("Final partial copy of %zu bytes\n", len);
+               doutc(cl, "Final partial copy of %zu bytes\n", len);
                bytes = do_splice_direct(src_file, &src_off, dst_file,
                                         &dst_off, len, flags);
                if (bytes > 0)
                        ret += bytes;
                else
-                       dout("Failed partial copy (%zd)\n", bytes);
+                       doutc(cl, "Failed partial copy (%zd)\n", bytes);
        }
 
 out:
index 2e2a303b9e649d1c5d3b863846d801de5630f885..0679240f06db924e9aba25052675268885c4bd04 100644 (file)
@@ -129,6 +129,8 @@ void ceph_as_ctx_to_req(struct ceph_mds_request *req,
 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino,
                             struct inode *newino)
 {
+       struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
+       struct ceph_client *cl = mdsc->fsc->client;
        struct inode *inode;
 
        if (ceph_vino_is_reserved(vino))
@@ -145,12 +147,13 @@ struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino,
        }
 
        if (!inode) {
-               dout("No inode found for %llx.%llx\n", vino.ino, vino.snap);
+               doutc(cl, "no inode found for %llx.%llx\n", vino.ino, vino.snap);
                return ERR_PTR(-ENOMEM);
        }
 
-       dout("get_inode on %llu=%llx.%llx got %p new %d\n", ceph_present_inode(inode),
-            ceph_vinop(inode), inode, !!(inode->i_state & I_NEW));
+       doutc(cl, "on %llx=%llx.%llx got %p new %d\n",
+             ceph_present_inode(inode), ceph_vinop(inode), inode,
+             !!(inode->i_state & I_NEW));
        return inode;
 }
 
@@ -159,6 +162,7 @@ struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino,
  */
 struct inode *ceph_get_snapdir(struct inode *parent)
 {
+       struct ceph_client *cl = ceph_inode_to_client(parent);
        struct ceph_vino vino = {
                .ino = ceph_ino(parent),
                .snap = CEPH_SNAPDIR,
@@ -171,14 +175,14 @@ struct inode *ceph_get_snapdir(struct inode *parent)
                return inode;
 
        if (!S_ISDIR(parent->i_mode)) {
-               pr_warn_once("bad snapdir parent type (mode=0%o)\n",
-                            parent->i_mode);
+               pr_warn_once_client(cl, "bad snapdir parent type (mode=0%o)\n",
+                                   parent->i_mode);
                goto err;
        }
 
        if (!(inode->i_state & I_NEW) && !S_ISDIR(inode->i_mode)) {
-               pr_warn_once("bad snapdir inode type (mode=0%o)\n",
-                            inode->i_mode);
+               pr_warn_once_client(cl, "bad snapdir inode type (mode=0%o)\n",
+                                   inode->i_mode);
                goto err;
        }
 
@@ -203,7 +207,7 @@ struct inode *ceph_get_snapdir(struct inode *parent)
                        inode->i_flags |= S_ENCRYPTED;
                        ci->fscrypt_auth_len = pci->fscrypt_auth_len;
                } else {
-                       dout("Failed to alloc snapdir fscrypt_auth\n");
+                       doutc(cl, "Failed to alloc snapdir fscrypt_auth\n");
                        ret = -ENOMEM;
                        goto err;
                }
@@ -249,6 +253,8 @@ const struct inode_operations ceph_file_iops = {
 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
                                                    u32 f)
 {
+       struct inode *inode = &ci->netfs.inode;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct rb_node **p;
        struct rb_node *parent = NULL;
        struct ceph_inode_frag *frag;
@@ -279,8 +285,7 @@ static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
        rb_link_node(&frag->node, parent, p);
        rb_insert_color(&frag->node, &ci->i_fragtree);
 
-       dout("get_or_create_frag added %llx.%llx frag %x\n",
-            ceph_vinop(&ci->netfs.inode), f);
+       doutc(cl, "added %p %llx.%llx frag %x\n", inode, ceph_vinop(inode), f);
        return frag;
 }
 
@@ -313,6 +318,7 @@ struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
                              struct ceph_inode_frag *pfrag, int *found)
 {
+       struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
        u32 t = ceph_frag_make(0, 0);
        struct ceph_inode_frag *frag;
        unsigned nway, i;
@@ -336,8 +342,8 @@ static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
 
                /* choose child */
                nway = 1 << frag->split_by;
-               dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
-                    frag->split_by, nway);
+               doutc(cl, "frag(%x) %x splits by %d (%d ways)\n", v, t,
+                     frag->split_by, nway);
                for (i = 0; i < nway; i++) {
                        n = ceph_frag_make_child(t, frag->split_by, i);
                        if (ceph_frag_contains_value(n, v)) {
@@ -347,7 +353,7 @@ static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
                }
                BUG_ON(i == nway);
        }
-       dout("choose_frag(%x) = %x\n", v, t);
+       doutc(cl, "frag(%x) = %x\n", v, t);
 
        return t;
 }
@@ -371,6 +377,7 @@ static int ceph_fill_dirfrag(struct inode *inode,
                             struct ceph_mds_reply_dirfrag *dirinfo)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_frag *frag;
        u32 id = le32_to_cpu(dirinfo->frag);
        int mds = le32_to_cpu(dirinfo->auth);
@@ -395,14 +402,14 @@ static int ceph_fill_dirfrag(struct inode *inode,
                        goto out;
                if (frag->split_by == 0) {
                        /* tree leaf, remove */
-                       dout("fill_dirfrag removed %llx.%llx frag %x"
-                            " (no ref)\n", ceph_vinop(inode), id);
+                       doutc(cl, "removed %p %llx.%llx frag %x (no ref)\n",
+                             inode, ceph_vinop(inode), id);
                        rb_erase(&frag->node, &ci->i_fragtree);
                        kfree(frag);
                } else {
                        /* tree branch, keep and clear */
-                       dout("fill_dirfrag cleared %llx.%llx frag %x"
-                            " referral\n", ceph_vinop(inode), id);
+                       doutc(cl, "cleared %p %llx.%llx frag %x referral\n",
+                             inode, ceph_vinop(inode), id);
                        frag->mds = -1;
                        frag->ndist = 0;
                }
@@ -415,8 +422,9 @@ static int ceph_fill_dirfrag(struct inode *inode,
        if (IS_ERR(frag)) {
                /* this is not the end of the world; we can continue
                   with bad/inaccurate delegation info */
-               pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
-                      ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
+               pr_err_client(cl, "ENOMEM on mds ref %p %llx.%llx fg %x\n",
+                             inode, ceph_vinop(inode),
+                             le32_to_cpu(dirinfo->frag));
                err = -ENOMEM;
                goto out;
        }
@@ -425,8 +433,8 @@ static int ceph_fill_dirfrag(struct inode *inode,
        frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
        for (i = 0; i < frag->ndist; i++)
                frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
-       dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
-            ceph_vinop(inode), frag->frag, frag->ndist);
+       doutc(cl, "%p %llx.%llx frag %x ndist=%d\n", inode,
+             ceph_vinop(inode), frag->frag, frag->ndist);
 
 out:
        mutex_unlock(&ci->i_fragtree_mutex);
@@ -454,6 +462,7 @@ static int ceph_fill_fragtree(struct inode *inode,
                              struct ceph_frag_tree_head *fragtree,
                              struct ceph_mds_reply_dirfrag *dirinfo)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_inode_frag *frag, *prev_frag = NULL;
        struct rb_node *rb_node;
@@ -489,15 +498,15 @@ static int ceph_fill_fragtree(struct inode *inode,
                     frag_tree_split_cmp, NULL);
        }
 
-       dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
+       doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
        rb_node = rb_first(&ci->i_fragtree);
        for (i = 0; i < nsplits; i++) {
                id = le32_to_cpu(fragtree->splits[i].frag);
                split_by = le32_to_cpu(fragtree->splits[i].by);
                if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
-                       pr_err("fill_fragtree %llx.%llx invalid split %d/%u, "
-                              "frag %x split by %d\n", ceph_vinop(inode),
-                              i, nsplits, id, split_by);
+                       pr_err_client(cl, "%p %llx.%llx invalid split %d/%u, "
+                              "frag %x split by %d\n", inode,
+                              ceph_vinop(inode), i, nsplits, id, split_by);
                        continue;
                }
                frag = NULL;
@@ -529,7 +538,7 @@ static int ceph_fill_fragtree(struct inode *inode,
                if (frag->split_by == 0)
                        ci->i_fragtree_nsplits++;
                frag->split_by = split_by;
-               dout(" frag %x split by %d\n", frag->frag, frag->split_by);
+               doutc(cl, " frag %x split by %d\n", frag->frag, frag->split_by);
                prev_frag = frag;
        }
        while (rb_node) {
@@ -554,6 +563,7 @@ out_unlock:
  */
 struct inode *ceph_alloc_inode(struct super_block *sb)
 {
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
        struct ceph_inode_info *ci;
        int i;
 
@@ -561,7 +571,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
        if (!ci)
                return NULL;
 
-       dout("alloc_inode %p\n", &ci->netfs.inode);
+       doutc(fsc->client, "%p\n", &ci->netfs.inode);
 
        /* Set parameters for the netfs library */
        netfs_inode_init(&ci->netfs, &ceph_netfs_ops);
@@ -675,10 +685,11 @@ void ceph_evict_inode(struct inode *inode)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_frag *frag;
        struct rb_node *n;
 
-       dout("evict_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
+       doutc(cl, "%p ino %llx.%llx\n", inode, ceph_vinop(inode));
 
        percpu_counter_dec(&mdsc->metric.total_inodes);
 
@@ -701,8 +712,8 @@ void ceph_evict_inode(struct inode *inode)
         */
        if (ci->i_snap_realm) {
                if (ceph_snap(inode) == CEPH_NOSNAP) {
-                       dout(" dropping residual ref to snap realm %p\n",
-                            ci->i_snap_realm);
+                       doutc(cl, " dropping residual ref to snap realm %p\n",
+                             ci->i_snap_realm);
                        ceph_change_snap_realm(inode, NULL);
                } else {
                        ceph_put_snapid_map(mdsc, ci->i_snapid_map);
@@ -743,15 +754,16 @@ static inline blkcnt_t calc_inode_blocks(u64 size)
 int ceph_fill_file_size(struct inode *inode, int issued,
                        u32 truncate_seq, u64 truncate_size, u64 size)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
        int queue_trunc = 0;
        loff_t isize = i_size_read(inode);
 
        if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
            (truncate_seq == ci->i_truncate_seq && size > isize)) {
-               dout("size %lld -> %llu\n", isize, size);
+               doutc(cl, "size %lld -> %llu\n", isize, size);
                if (size > 0 && S_ISDIR(inode->i_mode)) {
-                       pr_err("fill_file_size non-zero size for directory\n");
+                       pr_err_client(cl, "non-zero size for directory\n");
                        size = 0;
                }
                i_size_write(inode, size);
@@ -764,8 +776,8 @@ int ceph_fill_file_size(struct inode *inode, int issued,
                        ceph_fscache_update(inode);
                ci->i_reported_size = size;
                if (truncate_seq != ci->i_truncate_seq) {
-                       dout("%s truncate_seq %u -> %u\n", __func__,
-                            ci->i_truncate_seq, truncate_seq);
+                       doutc(cl, "truncate_seq %u -> %u\n",
+                             ci->i_truncate_seq, truncate_seq);
                        ci->i_truncate_seq = truncate_seq;
 
                        /* the MDS should have revoked these caps */
@@ -794,14 +806,15 @@ int ceph_fill_file_size(struct inode *inode, int issued,
         * anyway.
         */
        if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0) {
-               dout("%s truncate_size %lld -> %llu, encrypted %d\n", __func__,
-                    ci->i_truncate_size, truncate_size, !!IS_ENCRYPTED(inode));
+               doutc(cl, "truncate_size %lld -> %llu, encrypted %d\n",
+                     ci->i_truncate_size, truncate_size,
+                     !!IS_ENCRYPTED(inode));
 
                ci->i_truncate_size = truncate_size;
 
                if (IS_ENCRYPTED(inode)) {
-                       dout("%s truncate_pagecache_size %lld -> %llu\n",
-                            __func__, ci->i_truncate_pagecache_size, size);
+                       doutc(cl, "truncate_pagecache_size %lld -> %llu\n",
+                             ci->i_truncate_pagecache_size, size);
                        ci->i_truncate_pagecache_size = size;
                } else {
                        ci->i_truncate_pagecache_size = truncate_size;
@@ -814,6 +827,7 @@ void ceph_fill_file_time(struct inode *inode, int issued,
                         u64 time_warp_seq, struct timespec64 *ctime,
                         struct timespec64 *mtime, struct timespec64 *atime)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct timespec64 ictime = inode_get_ctime(inode);
        int warn = 0;
@@ -825,7 +839,7 @@ void ceph_fill_file_time(struct inode *inode, int issued,
                      CEPH_CAP_XATTR_EXCL)) {
                if (ci->i_version == 0 ||
                    timespec64_compare(ctime, &ictime) > 0) {
-                       dout("ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
+                       doutc(cl, "ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
                             ictime.tv_sec, ictime.tv_nsec,
                             ctime->tv_sec, ctime->tv_nsec);
                        inode_set_ctime_to_ts(inode, *ctime);
@@ -833,8 +847,7 @@ void ceph_fill_file_time(struct inode *inode, int issued,
                if (ci->i_version == 0 ||
                    ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
                        /* the MDS did a utimes() */
-                       dout("mtime %lld.%09ld -> %lld.%09ld "
-                            "tw %d -> %d\n",
+                       doutc(cl, "mtime %lld.%09ld -> %lld.%09ld tw %d -> %d\n",
                             inode_get_mtime_sec(inode),
                             inode_get_mtime_nsec(inode),
                             mtime->tv_sec, mtime->tv_nsec,
@@ -849,14 +862,14 @@ void ceph_fill_file_time(struct inode *inode, int issued,
                        /* nobody did utimes(); take the max */
                        ts = inode_get_mtime(inode);
                        if (timespec64_compare(mtime, &ts) > 0) {
-                               dout("mtime %lld.%09ld -> %lld.%09ld inc\n",
+                               doutc(cl, "mtime %lld.%09ld -> %lld.%09ld inc\n",
                                     ts.tv_sec, ts.tv_nsec,
                                     mtime->tv_sec, mtime->tv_nsec);
                                inode_set_mtime_to_ts(inode, *mtime);
                        }
                        ts = inode_get_atime(inode);
                        if (timespec64_compare(atime, &ts) > 0) {
-                               dout("atime %lld.%09ld -> %lld.%09ld inc\n",
+                               doutc(cl, "atime %lld.%09ld -> %lld.%09ld inc\n",
                                     ts.tv_sec, ts.tv_nsec,
                                     atime->tv_sec, atime->tv_nsec);
                                inode_set_atime_to_ts(inode, *atime);
@@ -878,13 +891,16 @@ void ceph_fill_file_time(struct inode *inode, int issued,
                }
        }
        if (warn) /* time_warp_seq shouldn't go backwards */
-               dout("%p mds time_warp_seq %llu < %u\n",
-                    inode, time_warp_seq, ci->i_time_warp_seq);
+               doutc(cl, "%p mds time_warp_seq %llu < %u\n", inode,
+                     time_warp_seq, ci->i_time_warp_seq);
 }
 
 #if IS_ENABLED(CONFIG_FS_ENCRYPTION)
-static int decode_encrypted_symlink(const char *encsym, int enclen, u8 **decsym)
+static int decode_encrypted_symlink(struct ceph_mds_client *mdsc,
+                                   const char *encsym,
+                                   int enclen, u8 **decsym)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        int declen;
        u8 *sym;
 
@@ -894,8 +910,9 @@ static int decode_encrypted_symlink(const char *encsym, int enclen, u8 **decsym)
 
        declen = ceph_base64_decode(encsym, enclen, sym);
        if (declen < 0) {
-               pr_err("%s: can't decode symlink (%d). Content: %.*s\n",
-                      __func__, declen, enclen, encsym);
+               pr_err_client(cl,
+                       "can't decode symlink (%d). Content: %.*s\n",
+                       declen, enclen, encsym);
                kfree(sym);
                return -EIO;
        }
@@ -904,7 +921,9 @@ static int decode_encrypted_symlink(const char *encsym, int enclen, u8 **decsym)
        return declen;
 }
 #else
-static int decode_encrypted_symlink(const char *encsym, int symlen, u8 **decsym)
+static int decode_encrypted_symlink(struct ceph_mds_client *mdsc,
+                                   const char *encsym,
+                                   int symlen, u8 **decsym)
 {
        return -EOPNOTSUPP;
 }
@@ -921,6 +940,7 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
                    struct ceph_cap_reservation *caps_reservation)
 {
        struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_mds_reply_inode *info = iinfo->in;
        struct ceph_inode_info *ci = ceph_inode(inode);
        int issued, new_issued, info_caps;
@@ -939,25 +959,26 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
 
        lockdep_assert_held(&mdsc->snap_rwsem);
 
-       dout("%s %p ino %llx.%llx v %llu had %llu\n", __func__,
-            inode, ceph_vinop(inode), le64_to_cpu(info->version),
-            ci->i_version);
+       doutc(cl, "%p ino %llx.%llx v %llu had %llu\n", inode, ceph_vinop(inode),
+             le64_to_cpu(info->version), ci->i_version);
 
        /* Once I_NEW is cleared, we can't change type or dev numbers */
        if (inode->i_state & I_NEW) {
                inode->i_mode = mode;
        } else {
                if (inode_wrong_type(inode, mode)) {
-                       pr_warn_once("inode type changed! (ino %llx.%llx is 0%o, mds says 0%o)\n",
-                                    ceph_vinop(inode), inode->i_mode, mode);
+                       pr_warn_once_client(cl,
+                               "inode type changed! (ino %llx.%llx is 0%o, mds says 0%o)\n",
+                               ceph_vinop(inode), inode->i_mode, mode);
                        return -ESTALE;
                }
 
                if ((S_ISCHR(mode) || S_ISBLK(mode)) && inode->i_rdev != rdev) {
-                       pr_warn_once("dev inode rdev changed! (ino %llx.%llx is %u:%u, mds says %u:%u)\n",
-                                    ceph_vinop(inode), MAJOR(inode->i_rdev),
-                                    MINOR(inode->i_rdev), MAJOR(rdev),
-                                    MINOR(rdev));
+                       pr_warn_once_client(cl,
+                               "dev inode rdev changed! (ino %llx.%llx is %u:%u, mds says %u:%u)\n",
+                               ceph_vinop(inode), MAJOR(inode->i_rdev),
+                               MINOR(inode->i_rdev), MAJOR(rdev),
+                               MINOR(rdev));
                        return -ESTALE;
                }
        }
@@ -979,8 +1000,8 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
        if (iinfo->xattr_len > 4) {
                xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
                if (!xattr_blob)
-                       pr_err("%s ENOMEM xattr blob %d bytes\n", __func__,
-                              iinfo->xattr_len);
+                       pr_err_client(cl, "ENOMEM xattr blob %d bytes\n",
+                                     iinfo->xattr_len);
        }
 
        if (iinfo->pool_ns_len > 0)
@@ -1034,9 +1055,10 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
                inode->i_mode = mode;
                inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
                inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
-               dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
-                    from_kuid(&init_user_ns, inode->i_uid),
-                    from_kgid(&init_user_ns, inode->i_gid));
+               doutc(cl, "%p %llx.%llx mode 0%o uid.gid %d.%d\n", inode,
+                     ceph_vinop(inode), inode->i_mode,
+                     from_kuid(&init_user_ns, inode->i_uid),
+                     from_kgid(&init_user_ns, inode->i_gid));
                ceph_decode_timespec64(&ci->i_btime, &iinfo->btime);
                ceph_decode_timespec64(&ci->i_snap_btime, &iinfo->snap_btime);
        }
@@ -1092,7 +1114,8 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
                        if (size == round_up(fsize, CEPH_FSCRYPT_BLOCK_SIZE)) {
                                size = fsize;
                        } else {
-                               pr_warn("fscrypt size mismatch: size=%llu fscrypt_file=%llu, discarding fscrypt_file size.\n",
+                               pr_warn_client(cl,
+                                       "fscrypt size mismatch: size=%llu fscrypt_file=%llu, discarding fscrypt_file size.\n",
                                        info->size, size);
                        }
                }
@@ -1104,8 +1127,8 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
                /* only update max_size on auth cap */
                if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
                    ci->i_max_size != le64_to_cpu(info->max_size)) {
-                       dout("max_size %lld -> %llu\n", ci->i_max_size,
-                                       le64_to_cpu(info->max_size));
+                       doutc(cl, "max_size %lld -> %llu\n",
+                           ci->i_max_size, le64_to_cpu(info->max_size));
                        ci->i_max_size = le64_to_cpu(info->max_size);
                }
        }
@@ -1168,15 +1191,17 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
 
                        if (IS_ENCRYPTED(inode)) {
                                if (symlen != i_size_read(inode))
-                                       pr_err("%s %llx.%llx BAD symlink size %lld\n",
-                                               __func__, ceph_vinop(inode),
+                                       pr_err_client(cl,
+                                               "%p %llx.%llx BAD symlink size %lld\n",
+                                               inode, ceph_vinop(inode),
                                                i_size_read(inode));
 
-                               err = decode_encrypted_symlink(iinfo->symlink,
+                               err = decode_encrypted_symlink(mdsc, iinfo->symlink,
                                                               symlen, (u8 **)&sym);
                                if (err < 0) {
-                                       pr_err("%s decoding encrypted symlink failed: %d\n",
-                                               __func__, err);
+                                       pr_err_client(cl,
+                                               "decoding encrypted symlink failed: %d\n",
+                                               err);
                                        goto out;
                                }
                                symlen = err;
@@ -1184,8 +1209,9 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
                                inode->i_blocks = calc_inode_blocks(symlen);
                        } else {
                                if (symlen != i_size_read(inode)) {
-                                       pr_err("%s %llx.%llx BAD symlink size %lld\n",
-                                               __func__, ceph_vinop(inode),
+                                       pr_err_client(cl,
+                                               "%p %llx.%llx BAD symlink size %lld\n",
+                                               inode, ceph_vinop(inode),
                                                i_size_read(inode));
                                        i_size_write(inode, symlen);
                                        inode->i_blocks = calc_inode_blocks(symlen);
@@ -1220,8 +1246,8 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
                inode->i_fop = &ceph_dir_fops;
                break;
        default:
-               pr_err("%s %llx.%llx BAD mode 0%o\n", __func__,
-                      ceph_vinop(inode), inode->i_mode);
+               pr_err_client(cl, "%p %llx.%llx BAD mode 0%o\n", inode,
+                             ceph_vinop(inode), inode->i_mode);
        }
 
        /* were we issued a capability? */
@@ -1242,7 +1268,8 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
                            (info_caps & CEPH_CAP_FILE_SHARED) &&
                            (issued & CEPH_CAP_FILE_EXCL) == 0 &&
                            !__ceph_dir_is_complete(ci)) {
-                               dout(" marking %p complete (empty)\n", inode);
+                               doutc(cl, " marking %p complete (empty)\n",
+                                     inode);
                                i_size_write(inode, 0);
                                __ceph_dir_set_complete(ci,
                                        atomic64_read(&ci->i_release_count),
@@ -1251,8 +1278,8 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
 
                        wake = true;
                } else {
-                       dout(" %p got snap_caps %s\n", inode,
-                            ceph_cap_string(info_caps));
+                       doutc(cl, " %p got snap_caps %s\n", inode,
+                             ceph_cap_string(info_caps));
                        ci->i_snap_caps |= info_caps;
                }
        }
@@ -1268,8 +1295,8 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
 
        if (cap_fmode >= 0) {
                if (!info_caps)
-                       pr_warn("mds issued no caps on %llx.%llx\n",
-                               ceph_vinop(inode));
+                       pr_warn_client(cl, "mds issued no caps on %llx.%llx\n",
+                                      ceph_vinop(inode));
                __ceph_touch_fmode(ci, mdsc, cap_fmode);
        }
 
@@ -1315,14 +1342,14 @@ static void __update_dentry_lease(struct inode *dir, struct dentry *dentry,
                                  unsigned long from_time,
                                  struct ceph_mds_session **old_lease_session)
 {
+       struct ceph_client *cl = ceph_inode_to_client(dir);
        struct ceph_dentry_info *di = ceph_dentry(dentry);
        unsigned mask = le16_to_cpu(lease->mask);
        long unsigned duration = le32_to_cpu(lease->duration_ms);
        long unsigned ttl = from_time + (duration * HZ) / 1000;
        long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
 
-       dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
-            dentry, duration, ttl);
+       doutc(cl, "%p duration %lu ms ttl %lu\n", dentry, duration, ttl);
 
        /* only track leases on regular dentries */
        if (ceph_snap(dir) != CEPH_NOSNAP)
@@ -1423,6 +1450,7 @@ out_unlock:
  */
 static int splice_dentry(struct dentry **pdn, struct inode *in)
 {
+       struct ceph_client *cl = ceph_inode_to_client(in);
        struct dentry *dn = *pdn;
        struct dentry *realdn;
 
@@ -1454,23 +1482,21 @@ static int splice_dentry(struct dentry **pdn, struct inode *in)
                d_drop(dn);
        realdn = d_splice_alias(in, dn);
        if (IS_ERR(realdn)) {
-               pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
-                      PTR_ERR(realdn), dn, in, ceph_vinop(in));
+               pr_err_client(cl, "error %ld %p inode %p ino %llx.%llx\n",
+                             PTR_ERR(realdn), dn, in, ceph_vinop(in));
                return PTR_ERR(realdn);
        }
 
        if (realdn) {
-               dout("dn %p (%d) spliced with %p (%d) "
-                    "inode %p ino %llx.%llx\n",
-                    dn, d_count(dn),
-                    realdn, d_count(realdn),
-                    d_inode(realdn), ceph_vinop(d_inode(realdn)));
+               doutc(cl, "dn %p (%d) spliced with %p (%d) inode %p ino %llx.%llx\n",
+                     dn, d_count(dn), realdn, d_count(realdn),
+                     d_inode(realdn), ceph_vinop(d_inode(realdn)));
                dput(dn);
                *pdn = realdn;
        } else {
                BUG_ON(!ceph_dentry(dn));
-               dout("dn %p attached to %p ino %llx.%llx\n",
-                    dn, d_inode(dn), ceph_vinop(d_inode(dn)));
+               doutc(cl, "dn %p attached to %p ino %llx.%llx\n", dn,
+                     d_inode(dn), ceph_vinop(d_inode(dn)));
        }
        return 0;
 }
@@ -1492,14 +1518,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
        struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
        struct inode *in = NULL;
        struct ceph_vino tvino, dvino;
-       struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+       struct ceph_client *cl = fsc->client;
        int err = 0;
 
-       dout("fill_trace %p is_dentry %d is_target %d\n", req,
-            rinfo->head->is_dentry, rinfo->head->is_target);
+       doutc(cl, "%p is_dentry %d is_target %d\n", req,
+             rinfo->head->is_dentry, rinfo->head->is_target);
 
        if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
-               dout("fill_trace reply is empty!\n");
+               doutc(cl, "reply is empty!\n");
                if (rinfo->head->result == 0 && req->r_parent)
                        ceph_invalidate_dir_request(req);
                return 0;
@@ -1556,13 +1583,13 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
                        tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
 retry_lookup:
                        dn = d_lookup(parent, &dname);
-                       dout("d_lookup on parent=%p name=%.*s got %p\n",
-                            parent, dname.len, dname.name, dn);
+                       doutc(cl, "d_lookup on parent=%p name=%.*s got %p\n",
+                             parent, dname.len, dname.name, dn);
 
                        if (!dn) {
                                dn = d_alloc(parent, &dname);
-                               dout("d_alloc %p '%.*s' = %p\n", parent,
-                                    dname.len, dname.name, dn);
+                               doutc(cl, "d_alloc %p '%.*s' = %p\n", parent,
+                                     dname.len, dname.name, dn);
                                if (!dn) {
                                        dput(parent);
                                        ceph_fname_free_buffer(dir, &oname);
@@ -1578,8 +1605,8 @@ retry_lookup:
                        } else if (d_really_is_positive(dn) &&
                                   (ceph_ino(d_inode(dn)) != tvino.ino ||
                                    ceph_snap(d_inode(dn)) != tvino.snap)) {
-                               dout(" dn %p points to wrong inode %p\n",
-                                    dn, d_inode(dn));
+                               doutc(cl, " dn %p points to wrong inode %p\n",
+                                     dn, d_inode(dn));
                                ceph_dir_clear_ordered(dir);
                                d_delete(dn);
                                dput(dn);
@@ -1604,8 +1631,8 @@ retry_lookup:
                                 rinfo->head->result == 0) ?  req->r_fmode : -1,
                                &req->r_caps_reservation);
                if (err < 0) {
-                       pr_err("ceph_fill_inode badness %p %llx.%llx\n",
-                               in, ceph_vinop(in));
+                       pr_err_client(cl, "badness %p %llx.%llx\n", in,
+                                     ceph_vinop(in));
                        req->r_target_inode = NULL;
                        if (in->i_state & I_NEW)
                                discard_new_inode(in);
@@ -1655,36 +1682,32 @@ retry_lookup:
                have_lease = have_dir_cap ||
                        le32_to_cpu(rinfo->dlease->duration_ms);
                if (!have_lease)
-                       dout("fill_trace  no dentry lease or dir cap\n");
+                       doutc(cl, "no dentry lease or dir cap\n");
 
                /* rename? */
                if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
                        struct inode *olddir = req->r_old_dentry_dir;
                        BUG_ON(!olddir);
 
-                       dout(" src %p '%pd' dst %p '%pd'\n",
-                            req->r_old_dentry,
-                            req->r_old_dentry,
-                            dn, dn);
-                       dout("fill_trace doing d_move %p -> %p\n",
-                            req->r_old_dentry, dn);
+                       doutc(cl, " src %p '%pd' dst %p '%pd'\n",
+                             req->r_old_dentry, req->r_old_dentry, dn, dn);
+                       doutc(cl, "doing d_move %p -> %p\n", req->r_old_dentry, dn);
 
                        /* d_move screws up sibling dentries' offsets */
                        ceph_dir_clear_ordered(dir);
                        ceph_dir_clear_ordered(olddir);
 
                        d_move(req->r_old_dentry, dn);
-                       dout(" src %p '%pd' dst %p '%pd'\n",
-                            req->r_old_dentry,
-                            req->r_old_dentry,
-                            dn, dn);
+                       doutc(cl, " src %p '%pd' dst %p '%pd'\n",
+                             req->r_old_dentry, req->r_old_dentry, dn, dn);
 
                        /* ensure target dentry is invalidated, despite
                           rehashing bug in vfs_rename_dir */
                        ceph_invalidate_dentry_lease(dn);
 
-                       dout("dn %p gets new offset %lld\n", req->r_old_dentry,
-                            ceph_dentry(req->r_old_dentry)->offset);
+                       doutc(cl, "dn %p gets new offset %lld\n",
+                             req->r_old_dentry,
+                             ceph_dentry(req->r_old_dentry)->offset);
 
                        /* swap r_dentry and r_old_dentry in case that
                         * splice_dentry() gets called later. This is safe
@@ -1696,9 +1719,9 @@ retry_lookup:
 
                /* null dentry? */
                if (!rinfo->head->is_target) {
-                       dout("fill_trace null dentry\n");
+                       doutc(cl, "null dentry\n");
                        if (d_really_is_positive(dn)) {
-                               dout("d_delete %p\n", dn);
+                               doutc(cl, "d_delete %p\n", dn);
                                ceph_dir_clear_ordered(dir);
                                d_delete(dn);
                        } else if (have_lease) {
@@ -1722,9 +1745,9 @@ retry_lookup:
                                goto done;
                        dn = req->r_dentry;  /* may have spliced */
                } else if (d_really_is_positive(dn) && d_inode(dn) != in) {
-                       dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
-                            dn, d_inode(dn), ceph_vinop(d_inode(dn)),
-                            ceph_vinop(in));
+                       doutc(cl, " %p links to %p %llx.%llx, not %llx.%llx\n",
+                             dn, d_inode(dn), ceph_vinop(d_inode(dn)),
+                             ceph_vinop(in));
                        d_invalidate(dn);
                        have_lease = false;
                }
@@ -1734,7 +1757,7 @@ retry_lookup:
                                            rinfo->dlease, session,
                                            req->r_request_started);
                }
-               dout(" final dn %p\n", dn);
+               doutc(cl, " final dn %p\n", dn);
        } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
                    req->r_op == CEPH_MDS_OP_MKSNAP) &&
                   test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
@@ -1745,7 +1768,8 @@ retry_lookup:
                BUG_ON(!dir);
                BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
                BUG_ON(!req->r_dentry);
-               dout(" linking snapped dir %p to dn %p\n", in, req->r_dentry);
+               doutc(cl, " linking snapped dir %p to dn %p\n", in,
+                     req->r_dentry);
                ceph_dir_clear_ordered(dir);
                ihold(in);
                err = splice_dentry(&req->r_dentry, in);
@@ -1767,7 +1791,7 @@ retry_lookup:
                                            &dvino, ptvino);
        }
 done:
-       dout("fill_trace done err=%d\n", err);
+       doutc(cl, "done err=%d\n", err);
        return err;
 }
 
@@ -1778,6 +1802,7 @@ static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
                                           struct ceph_mds_session *session)
 {
        struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
+       struct ceph_client *cl = session->s_mdsc->fsc->client;
        int i, err = 0;
 
        for (i = 0; i < rinfo->dir_nr; i++) {
@@ -1792,14 +1817,14 @@ static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
                in = ceph_get_inode(req->r_dentry->d_sb, vino, NULL);
                if (IS_ERR(in)) {
                        err = PTR_ERR(in);
-                       dout("new_inode badness got %d\n", err);
+                       doutc(cl, "badness got %d\n", err);
                        continue;
                }
                rc = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
                                     -1, &req->r_caps_reservation);
                if (rc < 0) {
-                       pr_err("ceph_fill_inode badness on %p got %d\n",
-                              in, rc);
+                       pr_err_client(cl, "inode badness on %p got %d\n", in,
+                                     rc);
                        err = rc;
                        if (in->i_state & I_NEW) {
                                ihold(in);
@@ -1828,6 +1853,7 @@ static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
                              struct ceph_readdir_cache_control *ctl,
                              struct ceph_mds_request *req)
 {
+       struct ceph_client *cl = ceph_inode_to_client(dir);
        struct ceph_inode_info *ci = ceph_inode(dir);
        unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
        unsigned idx = ctl->index % nsize;
@@ -1853,11 +1879,11 @@ static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
 
        if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
            req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
-               dout("readdir cache dn %p idx %d\n", dn, ctl->index);
+               doutc(cl, "dn %p idx %d\n", dn, ctl->index);
                ctl->dentries[idx] = dn;
                ctl->index++;
        } else {
-               dout("disable readdir cache\n");
+               doutc(cl, "disable readdir cache\n");
                ctl->index = -1;
        }
        return 0;
@@ -1870,6 +1896,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
        struct inode *inode = d_inode(parent);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
+       struct ceph_client *cl = session->s_mdsc->fsc->client;
        struct qstr dname;
        struct dentry *dn;
        struct inode *in;
@@ -1897,19 +1924,18 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
 
        if (rinfo->dir_dir &&
            le32_to_cpu(rinfo->dir_dir->frag) != frag) {
-               dout("readdir_prepopulate got new frag %x -> %x\n",
-                    frag, le32_to_cpu(rinfo->dir_dir->frag));
+               doutc(cl, "got new frag %x -> %x\n", frag,
+                           le32_to_cpu(rinfo->dir_dir->frag));
                frag = le32_to_cpu(rinfo->dir_dir->frag);
                if (!rinfo->hash_order)
                        req->r_readdir_offset = 2;
        }
 
        if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
-               dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
-                    rinfo->dir_nr, parent);
+               doutc(cl, "%d items under SNAPDIR dn %p\n",
+                     rinfo->dir_nr, parent);
        } else {
-               dout("readdir_prepopulate %d items under dn %p\n",
-                    rinfo->dir_nr, parent);
+               doutc(cl, "%d items under dn %p\n", rinfo->dir_nr, parent);
                if (rinfo->dir_dir)
                        ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
 
@@ -1953,15 +1979,15 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
 
 retry_lookup:
                dn = d_lookup(parent, &dname);
-               dout("d_lookup on parent=%p name=%.*s got %p\n",
-                    parent, dname.len, dname.name, dn);
+               doutc(cl, "d_lookup on parent=%p name=%.*s got %p\n",
+                     parent, dname.len, dname.name, dn);
 
                if (!dn) {
                        dn = d_alloc(parent, &dname);
-                       dout("d_alloc %p '%.*s' = %p\n", parent,
-                            dname.len, dname.name, dn);
+                       doutc(cl, "d_alloc %p '%.*s' = %p\n", parent,
+                             dname.len, dname.name, dn);
                        if (!dn) {
-                               dout("d_alloc badness\n");
+                               doutc(cl, "d_alloc badness\n");
                                err = -ENOMEM;
                                goto out;
                        }
@@ -1974,8 +2000,8 @@ retry_lookup:
                           (ceph_ino(d_inode(dn)) != tvino.ino ||
                            ceph_snap(d_inode(dn)) != tvino.snap)) {
                        struct ceph_dentry_info *di = ceph_dentry(dn);
-                       dout(" dn %p points to wrong inode %p\n",
-                            dn, d_inode(dn));
+                       doutc(cl, " dn %p points to wrong inode %p\n",
+                             dn, d_inode(dn));
 
                        spin_lock(&dn->d_lock);
                        if (di->offset > 0 &&
@@ -1997,7 +2023,7 @@ retry_lookup:
                } else {
                        in = ceph_get_inode(parent->d_sb, tvino, NULL);
                        if (IS_ERR(in)) {
-                               dout("new_inode badness\n");
+                               doutc(cl, "new_inode badness\n");
                                d_drop(dn);
                                dput(dn);
                                err = PTR_ERR(in);
@@ -2008,7 +2034,8 @@ retry_lookup:
                ret = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
                                      -1, &req->r_caps_reservation);
                if (ret < 0) {
-                       pr_err("ceph_fill_inode badness on %p\n", in);
+                       pr_err_client(cl, "badness on %p %llx.%llx\n", in,
+                                     ceph_vinop(in));
                        if (d_really_is_negative(dn)) {
                                if (in->i_state & I_NEW) {
                                        ihold(in);
@@ -2025,8 +2052,8 @@ retry_lookup:
 
                if (d_really_is_negative(dn)) {
                        if (ceph_security_xattr_deadlock(in)) {
-                               dout(" skip splicing dn %p to inode %p"
-                                    " (security xattr deadlock)\n", dn, in);
+                               doutc(cl, " skip splicing dn %p to inode %p"
+                                     " (security xattr deadlock)\n", dn, in);
                                iput(in);
                                skipped++;
                                goto next_item;
@@ -2058,17 +2085,18 @@ out:
                req->r_readdir_cache_idx = cache_ctl.index;
        }
        ceph_readdir_cache_release(&cache_ctl);
-       dout("readdir_prepopulate done\n");
+       doutc(cl, "done\n");
        return err;
 }
 
 bool ceph_inode_set_size(struct inode *inode, loff_t size)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
        bool ret;
 
        spin_lock(&ci->i_ceph_lock);
-       dout("set_size %p %llu -> %llu\n", inode, i_size_read(inode), size);
+       doutc(cl, "set_size %p %llu -> %llu\n", inode, i_size_read(inode), size);
        i_size_write(inode, size);
        ceph_fscache_update(inode);
        inode->i_blocks = calc_inode_blocks(size);
@@ -2082,22 +2110,25 @@ bool ceph_inode_set_size(struct inode *inode, loff_t size)
 
 void ceph_queue_inode_work(struct inode *inode, int work_bit)
 {
-       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
+       struct ceph_client *cl = fsc->client;
        struct ceph_inode_info *ci = ceph_inode(inode);
        set_bit(work_bit, &ci->i_work_mask);
 
        ihold(inode);
        if (queue_work(fsc->inode_wq, &ci->i_work)) {
-               dout("queue_inode_work %p, mask=%lx\n", inode, ci->i_work_mask);
+               doutc(cl, "%p %llx.%llx mask=%lx\n", inode,
+                     ceph_vinop(inode), ci->i_work_mask);
        } else {
-               dout("queue_inode_work %p already queued, mask=%lx\n",
-                    inode, ci->i_work_mask);
+               doutc(cl, "%p %llx.%llx already queued, mask=%lx\n",
+                     inode, ceph_vinop(inode), ci->i_work_mask);
                iput(inode);
        }
 }
 
 static void ceph_do_invalidate_pages(struct inode *inode)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
        u32 orig_gen;
        int check = 0;
@@ -2107,8 +2138,9 @@ static void ceph_do_invalidate_pages(struct inode *inode)
        mutex_lock(&ci->i_truncate_mutex);
 
        if (ceph_inode_is_shutdown(inode)) {
-               pr_warn_ratelimited("%s: inode %llx.%llx is shut down\n",
-                                   __func__, ceph_vinop(inode));
+               pr_warn_ratelimited_client(cl,
+                       "%p %llx.%llx is shut down\n", inode,
+                       ceph_vinop(inode));
                mapping_set_error(inode->i_mapping, -EIO);
                truncate_pagecache(inode, 0);
                mutex_unlock(&ci->i_truncate_mutex);
@@ -2116,8 +2148,8 @@ static void ceph_do_invalidate_pages(struct inode *inode)
        }
 
        spin_lock(&ci->i_ceph_lock);
-       dout("invalidate_pages %p gen %d revoking %d\n", inode,
-            ci->i_rdcache_gen, ci->i_rdcache_revoking);
+       doutc(cl, "%p %llx.%llx gen %d revoking %d\n", inode,
+             ceph_vinop(inode), ci->i_rdcache_gen, ci->i_rdcache_revoking);
        if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
                if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
                        check = 1;
@@ -2129,21 +2161,21 @@ static void ceph_do_invalidate_pages(struct inode *inode)
        spin_unlock(&ci->i_ceph_lock);
 
        if (invalidate_inode_pages2(inode->i_mapping) < 0) {
-               pr_err("invalidate_inode_pages2 %llx.%llx failed\n",
-                      ceph_vinop(inode));
+               pr_err_client(cl, "invalidate_inode_pages2 %llx.%llx failed\n",
+                             ceph_vinop(inode));
        }
 
        spin_lock(&ci->i_ceph_lock);
        if (orig_gen == ci->i_rdcache_gen &&
            orig_gen == ci->i_rdcache_revoking) {
-               dout("invalidate_pages %p gen %d successful\n", inode,
-                    ci->i_rdcache_gen);
+               doutc(cl, "%p %llx.%llx gen %d successful\n", inode,
+                     ceph_vinop(inode), ci->i_rdcache_gen);
                ci->i_rdcache_revoking--;
                check = 1;
        } else {
-               dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
-                    inode, orig_gen, ci->i_rdcache_gen,
-                    ci->i_rdcache_revoking);
+               doutc(cl, "%p %llx.%llx gen %d raced, now %d revoking %d\n",
+                     inode, ceph_vinop(inode), orig_gen, ci->i_rdcache_gen,
+                     ci->i_rdcache_revoking);
                if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
                        check = 1;
        }
@@ -2160,6 +2192,7 @@ out:
  */
 void __ceph_do_pending_vmtruncate(struct inode *inode)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
        u64 to;
        int wrbuffer_refs, finish = 0;
@@ -2168,7 +2201,8 @@ void __ceph_do_pending_vmtruncate(struct inode *inode)
 retry:
        spin_lock(&ci->i_ceph_lock);
        if (ci->i_truncate_pending == 0) {
-               dout("%s %p none pending\n", __func__, inode);
+               doutc(cl, "%p %llx.%llx none pending\n", inode,
+                     ceph_vinop(inode));
                spin_unlock(&ci->i_ceph_lock);
                mutex_unlock(&ci->i_truncate_mutex);
                return;
@@ -2180,7 +2214,8 @@ retry:
         */
        if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
                spin_unlock(&ci->i_ceph_lock);
-               dout("%s %p flushing snaps first\n", __func__, inode);
+               doutc(cl, "%p %llx.%llx flushing snaps first\n", inode,
+                     ceph_vinop(inode));
                filemap_write_and_wait_range(&inode->i_data, 0,
                                             inode->i_sb->s_maxbytes);
                goto retry;
@@ -2191,8 +2226,8 @@ retry:
 
        to = ci->i_truncate_pagecache_size;
        wrbuffer_refs = ci->i_wrbuffer_ref;
-       dout("%s %p (%d) to %lld\n", __func__, inode,
-            ci->i_truncate_pending, to);
+       doutc(cl, "%p %llx.%llx (%d) to %lld\n", inode, ceph_vinop(inode),
+             ci->i_truncate_pending, to);
        spin_unlock(&ci->i_ceph_lock);
 
        ceph_fscache_resize(inode, to);
@@ -2220,9 +2255,10 @@ static void ceph_inode_work(struct work_struct *work)
        struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
                                                 i_work);
        struct inode *inode = &ci->netfs.inode;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
 
        if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) {
-               dout("writeback %p\n", inode);
+               doutc(cl, "writeback %p %llx.%llx\n", inode, ceph_vinop(inode));
                filemap_fdatawrite(&inode->i_data);
        }
        if (test_and_clear_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask))
@@ -2294,6 +2330,7 @@ static int fill_fscrypt_truncate(struct inode *inode,
                                 struct ceph_mds_request *req,
                                 struct iattr *attr)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
        int boff = attr->ia_size % CEPH_FSCRYPT_BLOCK_SIZE;
        loff_t pos, orig_pos = round_down(attr->ia_size,
@@ -2316,9 +2353,9 @@ static int fill_fscrypt_truncate(struct inode *inode,
 
        issued = __ceph_caps_issued(ci, NULL);
 
-       dout("%s size %lld -> %lld got cap refs on %s, issued %s\n", __func__,
-            i_size, attr->ia_size, ceph_cap_string(got),
-            ceph_cap_string(issued));
+       doutc(cl, "size %lld -> %lld got cap refs on %s, issued %s\n",
+             i_size, attr->ia_size, ceph_cap_string(got),
+             ceph_cap_string(issued));
 
        /* Try to writeback the dirty pagecaches */
        if (issued & (CEPH_CAP_FILE_BUFFER)) {
@@ -2373,8 +2410,7 @@ static int fill_fscrypt_truncate(struct inode *inode,
         * If the Rados object doesn't exist, it will be set to 0.
         */
        if (!objver) {
-               dout("%s hit hole, ppos %lld < size %lld\n", __func__,
-                    pos, i_size);
+               doutc(cl, "hit hole, ppos %lld < size %lld\n", pos, i_size);
 
                header.data_len = cpu_to_le32(8 + 8 + 4);
                header.file_offset = 0;
@@ -2383,8 +2419,8 @@ static int fill_fscrypt_truncate(struct inode *inode,
                header.data_len = cpu_to_le32(8 + 8 + 4 + CEPH_FSCRYPT_BLOCK_SIZE);
                header.file_offset = cpu_to_le64(orig_pos);
 
-               dout("%s encrypt block boff/bsize %d/%lu\n", __func__,
-                    boff, CEPH_FSCRYPT_BLOCK_SIZE);
+               doutc(cl, "encrypt block boff/bsize %d/%lu\n", boff,
+                     CEPH_FSCRYPT_BLOCK_SIZE);
 
                /* truncate and zero out the extra contents for the last block */
                memset(iov.iov_base + boff, 0, PAGE_SIZE - boff);
@@ -2412,8 +2448,8 @@ static int fill_fscrypt_truncate(struct inode *inode,
        }
        req->r_pagelist = pagelist;
 out:
-       dout("%s %p size dropping cap refs on %s\n", __func__,
-            inode, ceph_cap_string(got));
+       doutc(cl, "%p %llx.%llx size dropping cap refs on %s\n", inode,
+             ceph_vinop(inode), ceph_cap_string(got));
        ceph_put_cap_refs(ci, got);
        if (iov.iov_base)
                kunmap_local(iov.iov_base);
@@ -2424,13 +2460,14 @@ out:
        return ret;
 }
 
-int __ceph_setattr(struct inode *inode, struct iattr *attr,
-                  struct ceph_iattr *cia)
+int __ceph_setattr(struct mnt_idmap *idmap, struct inode *inode,
+                  struct iattr *attr, struct ceph_iattr *cia)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        unsigned int ia_valid = attr->ia_valid;
        struct ceph_mds_request *req;
-       struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_cap_flush *prealloc_cf;
        loff_t isize = i_size_read(inode);
        int issued;
@@ -2469,7 +2506,8 @@ retry:
                }
        }
 
-       dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
+       doutc(cl, "%p %llx.%llx issued %s\n", inode, ceph_vinop(inode),
+             ceph_cap_string(issued));
 #if IS_ENABLED(CONFIG_FS_ENCRYPTION)
        if (cia && cia->fscrypt_auth) {
                u32 len = ceph_fscrypt_auth_len(cia->fscrypt_auth);
@@ -2480,8 +2518,8 @@ retry:
                        goto out;
                }
 
-               dout("setattr %llx:%llx fscrypt_auth len %u to %u)\n",
-                       ceph_vinop(inode), ci->fscrypt_auth_len, len);
+               doutc(cl, "%p %llx.%llx fscrypt_auth len %u to %u)\n", inode,
+                     ceph_vinop(inode), ci->fscrypt_auth_len, len);
 
                /* It should never be re-set once set */
                WARN_ON_ONCE(ci->fscrypt_auth);
@@ -2509,38 +2547,44 @@ retry:
 #endif /* CONFIG_FS_ENCRYPTION */
 
        if (ia_valid & ATTR_UID) {
-               dout("setattr %p uid %d -> %d\n", inode,
-                    from_kuid(&init_user_ns, inode->i_uid),
-                    from_kuid(&init_user_ns, attr->ia_uid));
+               kuid_t fsuid = from_vfsuid(idmap, i_user_ns(inode), attr->ia_vfsuid);
+
+               doutc(cl, "%p %llx.%llx uid %d -> %d\n", inode,
+                     ceph_vinop(inode),
+                     from_kuid(&init_user_ns, inode->i_uid),
+                     from_kuid(&init_user_ns, attr->ia_uid));
                if (issued & CEPH_CAP_AUTH_EXCL) {
-                       inode->i_uid = attr->ia_uid;
+                       inode->i_uid = fsuid;
                        dirtied |= CEPH_CAP_AUTH_EXCL;
                } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
-                          !uid_eq(attr->ia_uid, inode->i_uid)) {
+                          !uid_eq(fsuid, inode->i_uid)) {
                        req->r_args.setattr.uid = cpu_to_le32(
-                               from_kuid(&init_user_ns, attr->ia_uid));
+                               from_kuid(&init_user_ns, fsuid));
                        mask |= CEPH_SETATTR_UID;
                        release |= CEPH_CAP_AUTH_SHARED;
                }
        }
        if (ia_valid & ATTR_GID) {
-               dout("setattr %p gid %d -> %d\n", inode,
-                    from_kgid(&init_user_ns, inode->i_gid),
-                    from_kgid(&init_user_ns, attr->ia_gid));
+               kgid_t fsgid = from_vfsgid(idmap, i_user_ns(inode), attr->ia_vfsgid);
+
+               doutc(cl, "%p %llx.%llx gid %d -> %d\n", inode,
+                     ceph_vinop(inode),
+                     from_kgid(&init_user_ns, inode->i_gid),
+                     from_kgid(&init_user_ns, attr->ia_gid));
                if (issued & CEPH_CAP_AUTH_EXCL) {
-                       inode->i_gid = attr->ia_gid;
+                       inode->i_gid = fsgid;
                        dirtied |= CEPH_CAP_AUTH_EXCL;
                } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
-                          !gid_eq(attr->ia_gid, inode->i_gid)) {
+                          !gid_eq(fsgid, inode->i_gid)) {
                        req->r_args.setattr.gid = cpu_to_le32(
-                               from_kgid(&init_user_ns, attr->ia_gid));
+                               from_kgid(&init_user_ns, fsgid));
                        mask |= CEPH_SETATTR_GID;
                        release |= CEPH_CAP_AUTH_SHARED;
                }
        }
        if (ia_valid & ATTR_MODE) {
-               dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
-                    attr->ia_mode);
+               doutc(cl, "%p %llx.%llx mode 0%o -> 0%o\n", inode,
+                     ceph_vinop(inode), inode->i_mode, attr->ia_mode);
                if (issued & CEPH_CAP_AUTH_EXCL) {
                        inode->i_mode = attr->ia_mode;
                        dirtied |= CEPH_CAP_AUTH_EXCL;
@@ -2556,9 +2600,10 @@ retry:
        if (ia_valid & ATTR_ATIME) {
                struct timespec64 atime = inode_get_atime(inode);
 
-               dout("setattr %p atime %lld.%ld -> %lld.%ld\n", inode,
-                    atime.tv_sec, atime.tv_nsec,
-                    attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
+               doutc(cl, "%p %llx.%llx atime %lld.%09ld -> %lld.%09ld\n",
+                     inode, ceph_vinop(inode),
+                     atime.tv_sec, atime.tv_nsec,
+                     attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
                if (issued & CEPH_CAP_FILE_EXCL) {
                        ci->i_time_warp_seq++;
                        inode_set_atime_to_ts(inode, attr->ia_atime);
@@ -2578,7 +2623,8 @@ retry:
                }
        }
        if (ia_valid & ATTR_SIZE) {
-               dout("setattr %p size %lld -> %lld\n", inode, isize, attr->ia_size);
+               doutc(cl, "%p %llx.%llx size %lld -> %lld\n", inode,
+                     ceph_vinop(inode), isize, attr->ia_size);
                /*
                 * Only when the new size is smaller and not aligned to
                 * CEPH_FSCRYPT_BLOCK_SIZE will the RMW is needed.
@@ -2631,9 +2677,10 @@ retry:
        if (ia_valid & ATTR_MTIME) {
                struct timespec64 mtime = inode_get_mtime(inode);
 
-               dout("setattr %p mtime %lld.%ld -> %lld.%ld\n", inode,
-                    mtime.tv_sec, mtime.tv_nsec,
-                    attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
+               doutc(cl, "%p %llx.%llx mtime %lld.%09ld -> %lld.%09ld\n",
+                     inode, ceph_vinop(inode),
+                     mtime.tv_sec, mtime.tv_nsec,
+                     attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
                if (issued & CEPH_CAP_FILE_EXCL) {
                        ci->i_time_warp_seq++;
                        inode_set_mtime_to_ts(inode, attr->ia_mtime);
@@ -2656,11 +2703,12 @@ retry:
        if (ia_valid & ATTR_CTIME) {
                bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
                                         ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
-               dout("setattr %p ctime %lld.%ld -> %lld.%ld (%s)\n", inode,
-                    inode_get_ctime_sec(inode),
-                    inode_get_ctime_nsec(inode),
-                    attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
-                    only ? "ctime only" : "ignored");
+               doutc(cl, "%p %llx.%llx ctime %lld.%09ld -> %lld.%09ld (%s)\n",
+                     inode, ceph_vinop(inode),
+                     inode_get_ctime_sec(inode),
+                     inode_get_ctime_nsec(inode),
+                     attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
+                     only ? "ctime only" : "ignored");
                if (only) {
                        /*
                         * if kernel wants to dirty ctime but nothing else,
@@ -2678,7 +2726,8 @@ retry:
                }
        }
        if (ia_valid & ATTR_FILE)
-               dout("setattr %p ATTR_FILE ... hrm!\n", inode);
+               doutc(cl, "%p %llx.%llx ATTR_FILE ... hrm!\n", inode,
+                     ceph_vinop(inode));
 
        if (dirtied) {
                inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
@@ -2719,16 +2768,17 @@ retry:
                 */
                err = ceph_mdsc_do_request(mdsc, NULL, req);
                if (err == -EAGAIN && truncate_retry--) {
-                       dout("setattr %p result=%d (%s locally, %d remote), retry it!\n",
-                            inode, err, ceph_cap_string(dirtied), mask);
+                       doutc(cl, "%p %llx.%llx result=%d (%s locally, %d remote), retry it!\n",
+                             inode, ceph_vinop(inode), err,
+                             ceph_cap_string(dirtied), mask);
                        ceph_mdsc_put_request(req);
                        ceph_free_cap_flush(prealloc_cf);
                        goto retry;
                }
        }
 out:
-       dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
-            ceph_cap_string(dirtied), mask);
+       doutc(cl, "%p %llx.%llx result=%d (%s locally, %d remote)\n", inode,
+             ceph_vinop(inode), err, ceph_cap_string(dirtied), mask);
 
        ceph_mdsc_put_request(req);
        ceph_free_cap_flush(prealloc_cf);
@@ -2746,7 +2796,7 @@ int ceph_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
                 struct iattr *attr)
 {
        struct inode *inode = d_inode(dentry);
-       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
        int err;
 
        if (ceph_snap(inode) != CEPH_NOSNAP)
@@ -2759,7 +2809,7 @@ int ceph_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
        if (err)
                return err;
 
-       err = setattr_prepare(&nop_mnt_idmap, dentry, attr);
+       err = setattr_prepare(idmap, dentry, attr);
        if (err != 0)
                return err;
 
@@ -2771,10 +2821,10 @@ int ceph_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
            ceph_quota_is_max_bytes_exceeded(inode, attr->ia_size))
                return -EDQUOT;
 
-       err = __ceph_setattr(inode, attr, NULL);
+       err = __ceph_setattr(idmap, inode, attr, NULL);
 
        if (err >= 0 && (attr->ia_valid & ATTR_MODE))
-               err = posix_acl_chmod(&nop_mnt_idmap, dentry, attr->ia_mode);
+               err = posix_acl_chmod(idmap, dentry, attr->ia_mode);
 
        return err;
 }
@@ -2816,19 +2866,21 @@ int ceph_try_to_choose_auth_mds(struct inode *inode, int mask)
 int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
                      int mask, bool force)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
+       struct ceph_client *cl = fsc->client;
        struct ceph_mds_client *mdsc = fsc->mdsc;
        struct ceph_mds_request *req;
        int mode;
        int err;
 
        if (ceph_snap(inode) == CEPH_SNAPDIR) {
-               dout("do_getattr inode %p SNAPDIR\n", inode);
+               doutc(cl, "inode %p %llx.%llx SNAPDIR\n", inode,
+                     ceph_vinop(inode));
                return 0;
        }
 
-       dout("do_getattr inode %p mask %s mode 0%o\n",
-            inode, ceph_cap_string(mask), inode->i_mode);
+       doutc(cl, "inode %p %llx.%llx mask %s mode 0%o\n", inode,
+             ceph_vinop(inode), ceph_cap_string(mask), inode->i_mode);
        if (!force && ceph_caps_issued_mask_metric(ceph_inode(inode), mask, 1))
                        return 0;
 
@@ -2855,14 +2907,15 @@ int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
                }
        }
        ceph_mdsc_put_request(req);
-       dout("do_getattr result=%d\n", err);
+       doutc(cl, "result=%d\n", err);
        return err;
 }
 
 int ceph_do_getvxattr(struct inode *inode, const char *name, void *value,
                      size_t size)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
+       struct ceph_client *cl = fsc->client;
        struct ceph_mds_client *mdsc = fsc->mdsc;
        struct ceph_mds_request *req;
        int mode = USE_AUTH_MDS;
@@ -2892,7 +2945,7 @@ int ceph_do_getvxattr(struct inode *inode, const char *name, void *value,
        xattr_value = req->r_reply_info.xattr_info.xattr_value;
        xattr_value_len = req->r_reply_info.xattr_info.xattr_value_len;
 
-       dout("do_getvxattr xattr_value_len:%zu, size:%zu\n", xattr_value_len, size);
+       doutc(cl, "xattr_value_len:%zu, size:%zu\n", xattr_value_len, size);
 
        err = (int)xattr_value_len;
        if (size == 0)
@@ -2907,7 +2960,7 @@ int ceph_do_getvxattr(struct inode *inode, const char *name, void *value,
 put:
        ceph_mdsc_put_request(req);
 out:
-       dout("do_getvxattr result=%d\n", err);
+       doutc(cl, "result=%d\n", err);
        return err;
 }
 
@@ -2927,7 +2980,7 @@ int ceph_permission(struct mnt_idmap *idmap, struct inode *inode,
        err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
 
        if (!err)
-               err = generic_permission(&nop_mnt_idmap, inode, mask);
+               err = generic_permission(idmap, inode, mask);
        return err;
 }
 
@@ -2984,7 +3037,7 @@ int ceph_getattr(struct mnt_idmap *idmap, const struct path *path,
                        return err;
        }
 
-       generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
+       generic_fillattr(idmap, request_mask, inode, stat);
        stat->ino = ceph_present_inode(inode);
 
        /*
@@ -3007,7 +3060,7 @@ int ceph_getattr(struct mnt_idmap *idmap, const struct path *path,
                stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0;
 
        if (S_ISDIR(inode->i_mode)) {
-               if (ceph_test_mount_opt(ceph_sb_to_client(sb), RBYTES)) {
+               if (ceph_test_mount_opt(ceph_sb_to_fs_client(sb), RBYTES)) {
                        stat->size = ci->i_rbytes;
                } else if (ceph_snap(inode) == CEPH_SNAPDIR) {
                        struct ceph_inode_info *pci;
index 91a84917d203c54527ba676a32c23bb1a00cf1da..e861de3c79b9e126849d930500dee51aff072c80 100644 (file)
@@ -65,7 +65,7 @@ static long __validate_layout(struct ceph_mds_client *mdsc,
 static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
 {
        struct inode *inode = file_inode(file);
-       struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
        struct ceph_mds_request *req;
        struct ceph_ioctl_layout l;
        struct ceph_inode_info *ci = ceph_inode(file_inode(file));
@@ -140,7 +140,7 @@ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
        struct ceph_mds_request *req;
        struct ceph_ioctl_layout l;
        int err;
-       struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
 
        /* copy and validate */
        if (copy_from_user(&l, arg, sizeof(l)))
@@ -183,7 +183,7 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
        struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_osd_client *osdc =
-               &ceph_sb_to_client(inode->i_sb)->client->osdc;
+               &ceph_sb_to_fs_client(inode->i_sb)->client->osdc;
        struct ceph_object_locator oloc;
        CEPH_DEFINE_OID_ONSTACK(oid);
        u32 xlen;
@@ -244,7 +244,8 @@ static long ceph_ioctl_lazyio(struct file *file)
        struct ceph_file_info *fi = file->private_data;
        struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
+       struct ceph_client *cl = mdsc->fsc->client;
 
        if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
                spin_lock(&ci->i_ceph_lock);
@@ -252,11 +253,13 @@ static long ceph_ioctl_lazyio(struct file *file)
                ci->i_nr_by_mode[ffs(CEPH_FILE_MODE_LAZY)]++;
                __ceph_touch_fmode(ci, mdsc, fi->fmode);
                spin_unlock(&ci->i_ceph_lock);
-               dout("ioctl_layzio: file %p marked lazy\n", file);
+               doutc(cl, "file %p %p %llx.%llx marked lazy\n", file, inode,
+                     ceph_vinop(inode));
 
                ceph_check_caps(ci, 0);
        } else {
-               dout("ioctl_layzio: file %p already lazy\n", file);
+               doutc(cl, "file %p %p %llx.%llx already lazy\n", file, inode,
+                     ceph_vinop(inode));
        }
        return 0;
 }
@@ -355,10 +358,12 @@ static const char *ceph_ioctl_cmd_name(const unsigned int cmd)
 
 long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
+       struct inode *inode = file_inode(file);
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
        int ret;
 
-       dout("ioctl file %p cmd %s arg %lu\n", file,
-            ceph_ioctl_cmd_name(cmd), arg);
+       doutc(fsc->client, "file %p %p %llx.%llx cmd %s arg %lu\n", file,
+             inode, ceph_vinop(inode), ceph_ioctl_cmd_name(cmd), arg);
        switch (cmd) {
        case CEPH_IOC_GET_LAYOUT:
                return ceph_ioctl_get_layout(file, (void __user *)arg);
index cb51c7e9c8e22cd5b3295ea902bb667905d2d17b..e07ad29ff8b97210ed3d3173412f0b1d1cb7a257 100644 (file)
@@ -77,6 +77,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
                             int cmd, u8 wait, struct file_lock *fl)
 {
        struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_mds_request *req;
        int err;
        u64 length = 0;
@@ -111,10 +112,10 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
 
        owner = secure_addr(fl->fl_owner);
 
-       dout("ceph_lock_message: rule: %d, op: %d, owner: %llx, pid: %llu, "
-            "start: %llu, length: %llu, wait: %d, type: %d\n", (int)lock_type,
-            (int)operation, owner, (u64)fl->fl_pid, fl->fl_start, length,
-            wait, fl->fl_type);
+       doutc(cl, "rule: %d, op: %d, owner: %llx, pid: %llu, "
+                   "start: %llu, length: %llu, wait: %d, type: %d\n",
+                   (int)lock_type, (int)operation, owner, (u64)fl->fl_pid,
+                   fl->fl_start, length, wait, fl->fl_type);
 
        req->r_args.filelock_change.rule = lock_type;
        req->r_args.filelock_change.type = cmd;
@@ -147,16 +148,17 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
 
        }
        ceph_mdsc_put_request(req);
-       dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
-            "length: %llu, wait: %d, type: %d, err code %d\n", (int)lock_type,
-            (int)operation, (u64)fl->fl_pid, fl->fl_start,
-            length, wait, fl->fl_type, err);
+       doutc(cl, "rule: %d, op: %d, pid: %llu, start: %llu, "
+             "length: %llu, wait: %d, type: %d, err code %d\n",
+             (int)lock_type, (int)operation, (u64)fl->fl_pid,
+             fl->fl_start, length, wait, fl->fl_type, err);
        return err;
 }
 
 static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
                                          struct ceph_mds_request *req)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_mds_request *intr_req;
        struct inode *inode = req->r_inode;
        int err, lock_type;
@@ -174,8 +176,7 @@ static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
        if (!err)
                return 0;
 
-       dout("ceph_lock_wait_for_completion: request %llu was interrupted\n",
-            req->r_tid);
+       doutc(cl, "request %llu was interrupted\n", req->r_tid);
 
        mutex_lock(&mdsc->mutex);
        if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
@@ -246,6 +247,7 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
 {
        struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        int err = 0;
        u16 op = CEPH_MDS_OP_SETFILELOCK;
        u8 wait = 0;
@@ -257,7 +259,7 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
        if (ceph_inode_is_shutdown(inode))
                return -ESTALE;
 
-       dout("ceph_lock, fl_owner: %p\n", fl->fl_owner);
+       doutc(cl, "fl_owner: %p\n", fl->fl_owner);
 
        /* set wait bit as appropriate, then make command as Ceph expects it*/
        if (IS_GETLK(cmd))
@@ -292,7 +294,7 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
        err = ceph_lock_message(CEPH_LOCK_FCNTL, op, inode, lock_cmd, wait, fl);
        if (!err) {
                if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK != fl->fl_type) {
-                       dout("mds locked, locking locally\n");
+                       doutc(cl, "locking locally\n");
                        err = posix_lock_file(file, fl, NULL);
                        if (err) {
                                /* undo! This should only happen if
@@ -300,8 +302,8 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
                                 * deadlock. */
                                ceph_lock_message(CEPH_LOCK_FCNTL, op, inode,
                                                  CEPH_LOCK_UNLOCK, 0, fl);
-                               dout("got %d on posix_lock_file, undid lock\n",
-                                    err);
+                               doutc(cl, "got %d on posix_lock_file, undid lock\n",
+                                     err);
                        }
                }
        }
@@ -312,6 +314,7 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
 {
        struct inode *inode = file_inode(file);
        struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        int err = 0;
        u8 wait = 0;
        u8 lock_cmd;
@@ -322,7 +325,7 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
        if (ceph_inode_is_shutdown(inode))
                return -ESTALE;
 
-       dout("ceph_flock, fl_file: %p\n", fl->fl_file);
+       doutc(cl, "fl_file: %p\n", fl->fl_file);
 
        spin_lock(&ci->i_ceph_lock);
        if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) {
@@ -359,7 +362,8 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
                        ceph_lock_message(CEPH_LOCK_FLOCK,
                                          CEPH_MDS_OP_SETFILELOCK,
                                          inode, CEPH_LOCK_UNLOCK, 0, fl);
-                       dout("got %d on locks_lock_file_wait, undid lock\n", err);
+                       doutc(cl, "got %d on locks_lock_file_wait, undid lock\n",
+                             err);
                }
        }
        return err;
@@ -371,6 +375,7 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
  */
 void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct file_lock *lock;
        struct file_lock_context *ctx;
 
@@ -386,17 +391,20 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
                        ++(*flock_count);
                spin_unlock(&ctx->flc_lock);
        }
-       dout("counted %d flock locks and %d fcntl locks\n",
-            *flock_count, *fcntl_count);
+       doutc(cl, "counted %d flock locks and %d fcntl locks\n",
+             *flock_count, *fcntl_count);
 }
 
 /*
  * Given a pointer to a lock, convert it to a ceph filelock
  */
-static int lock_to_ceph_filelock(struct file_lock *lock,
+static int lock_to_ceph_filelock(struct inode *inode,
+                                struct file_lock *lock,
                                 struct ceph_filelock *cephlock)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        int err = 0;
+
        cephlock->start = cpu_to_le64(lock->fl_start);
        cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1);
        cephlock->client = cpu_to_le64(0);
@@ -414,7 +422,7 @@ static int lock_to_ceph_filelock(struct file_lock *lock,
                cephlock->type = CEPH_LOCK_UNLOCK;
                break;
        default:
-               dout("Have unknown lock type %d\n", lock->fl_type);
+               doutc(cl, "Have unknown lock type %d\n", lock->fl_type);
                err = -EINVAL;
        }
 
@@ -432,13 +440,14 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
 {
        struct file_lock *lock;
        struct file_lock_context *ctx = locks_inode_context(inode);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        int err = 0;
        int seen_fcntl = 0;
        int seen_flock = 0;
        int l = 0;
 
-       dout("encoding %d flock and %d fcntl locks\n", num_flock_locks,
-            num_fcntl_locks);
+       doutc(cl, "encoding %d flock and %d fcntl locks\n", num_flock_locks,
+             num_fcntl_locks);
 
        if (!ctx)
                return 0;
@@ -450,7 +459,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
                        err = -ENOSPC;
                        goto fail;
                }
-               err = lock_to_ceph_filelock(lock, &flocks[l]);
+               err = lock_to_ceph_filelock(inode, lock, &flocks[l]);
                if (err)
                        goto fail;
                ++l;
@@ -461,7 +470,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
                        err = -ENOSPC;
                        goto fail;
                }
-               err = lock_to_ceph_filelock(lock, &flocks[l]);
+               err = lock_to_ceph_filelock(inode, lock, &flocks[l]);
                if (err)
                        goto fail;
                ++l;
index de798444bb974168561a3318dfb54519bc762ae1..d95eb525519a644148af693ba68972c84b2ba941 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/bits.h>
 #include <linux/ktime.h>
 #include <linux/bitmap.h>
+#include <linux/mnt_idmapping.h>
 
 #include "super.h"
 #include "mds_client.h"
@@ -411,6 +412,7 @@ static int parse_reply_info_readdir(void **p, void *end,
                                    u64 features)
 {
        struct ceph_mds_reply_info_parsed *info = &req->r_reply_info;
+       struct ceph_client *cl = req->r_mdsc->fsc->client;
        u32 num, i = 0;
        int err;
 
@@ -433,7 +435,7 @@ static int parse_reply_info_readdir(void **p, void *end,
        BUG_ON(!info->dir_entries);
        if ((unsigned long)(info->dir_entries + num) >
            (unsigned long)info->dir_entries + info->dir_buf_size) {
-               pr_err("dir contents are larger than expected\n");
+               pr_err_client(cl, "dir contents are larger than expected\n");
                WARN_ON(1);
                goto bad;
        }
@@ -454,7 +456,7 @@ static int parse_reply_info_readdir(void **p, void *end,
                ceph_decode_need(p, end, _name_len, bad);
                _name = *p;
                *p += _name_len;
-               dout("parsed dir dname '%.*s'\n", _name_len, _name);
+               doutc(cl, "parsed dir dname '%.*s'\n", _name_len, _name);
 
                if (info->hash_order)
                        rde->raw_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
@@ -514,8 +516,8 @@ static int parse_reply_info_readdir(void **p, void *end,
                rde->is_nokey = false;
                err = ceph_fname_to_usr(&fname, &tname, &oname, &rde->is_nokey);
                if (err) {
-                       pr_err("%s unable to decode %.*s, got %d\n", __func__,
-                              _name_len, _name, err);
+                       pr_err_client(cl, "unable to decode %.*s, got %d\n",
+                                     _name_len, _name, err);
                        goto out_bad;
                }
                rde->name = oname.name;
@@ -539,7 +541,7 @@ done:
 bad:
        err = -EIO;
 out_bad:
-       pr_err("problem parsing dir contents %d\n", err);
+       pr_err_client(cl, "problem parsing dir contents %d\n", err);
        return err;
 }
 
@@ -570,10 +572,11 @@ bad:
 static int ceph_parse_deleg_inos(void **p, void *end,
                                 struct ceph_mds_session *s)
 {
+       struct ceph_client *cl = s->s_mdsc->fsc->client;
        u32 sets;
 
        ceph_decode_32_safe(p, end, sets, bad);
-       dout("got %u sets of delegated inodes\n", sets);
+       doutc(cl, "got %u sets of delegated inodes\n", sets);
        while (sets--) {
                u64 start, len;
 
@@ -582,8 +585,9 @@ static int ceph_parse_deleg_inos(void **p, void *end,
 
                /* Don't accept a delegation of system inodes */
                if (start < CEPH_INO_SYSTEM_BASE) {
-                       pr_warn_ratelimited("ceph: ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n",
-                                       start, len);
+                       pr_warn_ratelimited_client(cl,
+                               "ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n",
+                               start, len);
                        continue;
                }
                while (len--) {
@@ -591,10 +595,10 @@ static int ceph_parse_deleg_inos(void **p, void *end,
                                            DELEGATED_INO_AVAILABLE,
                                            GFP_KERNEL);
                        if (!err) {
-                               dout("added delegated inode 0x%llx\n",
-                                    start - 1);
+                               doutc(cl, "added delegated inode 0x%llx\n", start - 1);
                        } else if (err == -EBUSY) {
-                               pr_warn("MDS delegated inode 0x%llx more than once.\n",
+                               pr_warn_client(cl,
+                                       "MDS delegated inode 0x%llx more than once.\n",
                                        start - 1);
                        } else {
                                return err;
@@ -744,6 +748,7 @@ static int parse_reply_info(struct ceph_mds_session *s, struct ceph_msg *msg,
                            struct ceph_mds_request *req, u64 features)
 {
        struct ceph_mds_reply_info_parsed *info = &req->r_reply_info;
+       struct ceph_client *cl = s->s_mdsc->fsc->client;
        void *p, *end;
        u32 len;
        int err;
@@ -783,7 +788,7 @@ static int parse_reply_info(struct ceph_mds_session *s, struct ceph_msg *msg,
 bad:
        err = -EIO;
 out_bad:
-       pr_err("mds parse_reply err %d\n", err);
+       pr_err_client(cl, "mds parse_reply err %d\n", err);
        ceph_msg_dump(msg);
        return err;
 }
@@ -830,7 +835,8 @@ static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
  */
 int ceph_wait_on_conflict_unlink(struct dentry *dentry)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
+       struct ceph_client *cl = fsc->client;
        struct dentry *pdentry = dentry->d_parent;
        struct dentry *udentry, *found = NULL;
        struct ceph_dentry_info *di;
@@ -855,8 +861,8 @@ int ceph_wait_on_conflict_unlink(struct dentry *dentry)
                        goto next;
 
                if (!test_bit(CEPH_DENTRY_ASYNC_UNLINK_BIT, &di->flags))
-                       pr_warn("%s dentry %p:%pd async unlink bit is not set\n",
-                               __func__, dentry, dentry);
+                       pr_warn_client(cl, "dentry %p:%pd async unlink bit is not set\n",
+                                      dentry, dentry);
 
                if (!d_same_name(udentry, pdentry, &dname))
                        goto next;
@@ -872,8 +878,8 @@ next:
        if (likely(!found))
                return 0;
 
-       dout("%s dentry %p:%pd conflict with old %p:%pd\n", __func__,
-            dentry, dentry, found, found);
+       doutc(cl, "dentry %p:%pd conflict with old %p:%pd\n", dentry, dentry,
+             found, found);
 
        err = wait_on_bit(&di->flags, CEPH_DENTRY_ASYNC_UNLINK_BIT,
                          TASK_KILLABLE);
@@ -957,6 +963,7 @@ static int __verify_registered_session(struct ceph_mds_client *mdsc,
 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
                                                 int mds)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_mds_session *s;
 
        if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
@@ -973,7 +980,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
                int newmax = 1 << get_count_order(mds + 1);
                struct ceph_mds_session **sa;
 
-               dout("%s: realloc to %d\n", __func__, newmax);
+               doutc(cl, "realloc to %d\n", newmax);
                sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
                if (!sa)
                        goto fail_realloc;
@@ -986,7 +993,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
                mdsc->max_sessions = newmax;
        }
 
-       dout("%s: mds%d\n", __func__, mds);
+       doutc(cl, "mds%d\n", mds);
        s->s_mdsc = mdsc;
        s->s_mds = mds;
        s->s_state = CEPH_MDS_SESSION_NEW;
@@ -1029,7 +1036,7 @@ fail_realloc:
 static void __unregister_session(struct ceph_mds_client *mdsc,
                               struct ceph_mds_session *s)
 {
-       dout("__unregister_session mds%d %p\n", s->s_mds, s);
+       doutc(mdsc->fsc->client, "mds%d %p\n", s->s_mds, s);
        BUG_ON(mdsc->sessions[s->s_mds] != s);
        mdsc->sessions[s->s_mds] = NULL;
        ceph_con_close(&s->s_con);
@@ -1116,6 +1123,8 @@ void ceph_mdsc_release_request(struct kref *kref)
        kfree(req->r_path1);
        kfree(req->r_path2);
        put_cred(req->r_cred);
+       if (req->r_mnt_idmap)
+               mnt_idmap_put(req->r_mnt_idmap);
        if (req->r_pagelist)
                ceph_pagelist_release(req->r_pagelist);
        kfree(req->r_fscrypt_auth);
@@ -1155,6 +1164,7 @@ static void __register_request(struct ceph_mds_client *mdsc,
                               struct ceph_mds_request *req,
                               struct inode *dir)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        int ret = 0;
 
        req->r_tid = ++mdsc->last_tid;
@@ -1162,18 +1172,20 @@ static void __register_request(struct ceph_mds_client *mdsc,
                ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation,
                                        req->r_num_caps);
                if (ret < 0) {
-                       pr_err("__register_request %p "
-                              "failed to reserve caps: %d\n", req, ret);
+                       pr_err_client(cl, "%p failed to reserve caps: %d\n",
+                                     req, ret);
                        /* set req->r_err to fail early from __do_request */
                        req->r_err = ret;
                        return;
                }
        }
-       dout("__register_request %p tid %lld\n", req, req->r_tid);
+       doutc(cl, "%p tid %lld\n", req, req->r_tid);
        ceph_mdsc_get_request(req);
        insert_request(&mdsc->request_tree, req);
 
        req->r_cred = get_current_cred();
+       if (!req->r_mnt_idmap)
+               req->r_mnt_idmap = &nop_mnt_idmap;
 
        if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
                mdsc->oldest_tid = req->r_tid;
@@ -1192,7 +1204,7 @@ static void __register_request(struct ceph_mds_client *mdsc,
 static void __unregister_request(struct ceph_mds_client *mdsc,
                                 struct ceph_mds_request *req)
 {
-       dout("__unregister_request %p tid %lld\n", req, req->r_tid);
+       doutc(mdsc->fsc->client, "%p tid %lld\n", req, req->r_tid);
 
        /* Never leave an unregistered request on an unsafe list! */
        list_del_init(&req->r_unsafe_item);
@@ -1278,6 +1290,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
        int mds = -1;
        u32 hash = req->r_direct_hash;
        bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
+       struct ceph_client *cl = mdsc->fsc->client;
 
        if (random)
                *random = false;
@@ -1289,8 +1302,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
        if (req->r_resend_mds >= 0 &&
            (__have_session(mdsc, req->r_resend_mds) ||
             ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
-               dout("%s using resend_mds mds%d\n", __func__,
-                    req->r_resend_mds);
+               doutc(cl, "using resend_mds mds%d\n", req->r_resend_mds);
                return req->r_resend_mds;
        }
 
@@ -1307,7 +1319,8 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
                        rcu_read_lock();
                        inode = get_nonsnap_parent(req->r_dentry);
                        rcu_read_unlock();
-                       dout("%s using snapdir's parent %p\n", __func__, inode);
+                       doutc(cl, "using snapdir's parent %p %llx.%llx\n",
+                             inode, ceph_vinop(inode));
                }
        } else if (req->r_dentry) {
                /* ignore race with rename; old or new d_parent is okay */
@@ -1327,7 +1340,8 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
                        /* direct snapped/virtual snapdir requests
                         * based on parent dir inode */
                        inode = get_nonsnap_parent(parent);
-                       dout("%s using nonsnap parent %p\n", __func__, inode);
+                       doutc(cl, "using nonsnap parent %p %llx.%llx\n",
+                             inode, ceph_vinop(inode));
                } else {
                        /* dentry target */
                        inode = d_inode(req->r_dentry);
@@ -1343,10 +1357,11 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
                rcu_read_unlock();
        }
 
-       dout("%s %p is_hash=%d (0x%x) mode %d\n", __func__, inode, (int)is_hash,
-            hash, mode);
        if (!inode)
                goto random;
+
+       doutc(cl, "%p %llx.%llx is_hash=%d (0x%x) mode %d\n", inode,
+             ceph_vinop(inode), (int)is_hash, hash, mode);
        ci = ceph_inode(inode);
 
        if (is_hash && S_ISDIR(inode->i_mode)) {
@@ -1362,9 +1377,9 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
                                get_random_bytes(&r, 1);
                                r %= frag.ndist;
                                mds = frag.dist[r];
-                               dout("%s %p %llx.%llx frag %u mds%d (%d/%d)\n",
-                                    __func__, inode, ceph_vinop(inode),
-                                    frag.frag, mds, (int)r, frag.ndist);
+                               doutc(cl, "%p %llx.%llx frag %u mds%d (%d/%d)\n",
+                                     inode, ceph_vinop(inode), frag.frag,
+                                     mds, (int)r, frag.ndist);
                                if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
                                    CEPH_MDS_STATE_ACTIVE &&
                                    !ceph_mdsmap_is_laggy(mdsc->mdsmap, mds))
@@ -1377,9 +1392,8 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
                        if (frag.mds >= 0) {
                                /* choose auth mds */
                                mds = frag.mds;
-                               dout("%s %p %llx.%llx frag %u mds%d (auth)\n",
-                                    __func__, inode, ceph_vinop(inode),
-                                    frag.frag, mds);
+                               doutc(cl, "%p %llx.%llx frag %u mds%d (auth)\n",
+                                     inode, ceph_vinop(inode), frag.frag, mds);
                                if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
                                    CEPH_MDS_STATE_ACTIVE) {
                                        if (!ceph_mdsmap_is_laggy(mdsc->mdsmap,
@@ -1403,9 +1417,9 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
                goto random;
        }
        mds = cap->session->s_mds;
-       dout("%s %p %llx.%llx mds%d (%scap %p)\n", __func__,
-            inode, ceph_vinop(inode), mds,
-            cap == ci->i_auth_cap ? "auth " : "", cap);
+       doutc(cl, "%p %llx.%llx mds%d (%scap %p)\n", inode,
+             ceph_vinop(inode), mds,
+             cap == ci->i_auth_cap ? "auth " : "", cap);
        spin_unlock(&ci->i_ceph_lock);
 out:
        iput(inode);
@@ -1416,7 +1430,7 @@ random:
                *random = true;
 
        mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
-       dout("%s chose random mds%d\n", __func__, mds);
+       doutc(cl, "chose random mds%d\n", mds);
        return mds;
 }
 
@@ -1529,6 +1543,7 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
        int metadata_key_count = 0;
        struct ceph_options *opt = mdsc->fsc->client->options;
        struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
+       struct ceph_client *cl = mdsc->fsc->client;
        size_t size, count;
        void *p, *end;
        int ret;
@@ -1567,7 +1582,7 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
        msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
                           GFP_NOFS, false);
        if (!msg) {
-               pr_err("ENOMEM creating session open msg\n");
+               pr_err_client(cl, "ENOMEM creating session open msg\n");
                return ERR_PTR(-ENOMEM);
        }
        p = msg->front.iov_base;
@@ -1607,14 +1622,14 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
 
        ret = encode_supported_features(&p, end);
        if (ret) {
-               pr_err("encode_supported_features failed!\n");
+               pr_err_client(cl, "encode_supported_features failed!\n");
                ceph_msg_put(msg);
                return ERR_PTR(ret);
        }
 
        ret = encode_metric_spec(&p, end);
        if (ret) {
-               pr_err("encode_metric_spec failed!\n");
+               pr_err_client(cl, "encode_metric_spec failed!\n");
                ceph_msg_put(msg);
                return ERR_PTR(ret);
        }
@@ -1642,8 +1657,8 @@ static int __open_session(struct ceph_mds_client *mdsc,
 
        /* wait for mds to go active? */
        mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
-       dout("open_session to mds%d (%s)\n", mds,
-            ceph_mds_state_name(mstate));
+       doutc(mdsc->fsc->client, "open_session to mds%d (%s)\n", mds,
+             ceph_mds_state_name(mstate));
        session->s_state = CEPH_MDS_SESSION_OPENING;
        session->s_renew_requested = jiffies;
 
@@ -1686,8 +1701,9 @@ struct ceph_mds_session *
 ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
 {
        struct ceph_mds_session *session;
+       struct ceph_client *cl = mdsc->fsc->client;
 
-       dout("open_export_target_session to mds%d\n", target);
+       doutc(cl, "to mds%d\n", target);
 
        mutex_lock(&mdsc->mutex);
        session = __open_export_target_session(mdsc, target);
@@ -1702,13 +1718,14 @@ static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
        struct ceph_mds_info *mi;
        struct ceph_mds_session *ts;
        int i, mds = session->s_mds;
+       struct ceph_client *cl = mdsc->fsc->client;
 
        if (mds >= mdsc->mdsmap->possible_max_rank)
                return;
 
        mi = &mdsc->mdsmap->m_info[mds];
-       dout("open_export_target_sessions for mds%d (%d targets)\n",
-            session->s_mds, mi->num_export_targets);
+       doutc(cl, "for mds%d (%d targets)\n", session->s_mds,
+             mi->num_export_targets);
 
        for (i = 0; i < mi->num_export_targets; i++) {
                ts = __open_export_target_session(mdsc, mi->export_targets[i]);
@@ -1731,11 +1748,13 @@ void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
 static void detach_cap_releases(struct ceph_mds_session *session,
                                struct list_head *target)
 {
+       struct ceph_client *cl = session->s_mdsc->fsc->client;
+
        lockdep_assert_held(&session->s_cap_lock);
 
        list_splice_init(&session->s_cap_releases, target);
        session->s_num_cap_releases = 0;
-       dout("dispose_cap_releases mds%d\n", session->s_mds);
+       doutc(cl, "mds%d\n", session->s_mds);
 }
 
 static void dispose_cap_releases(struct ceph_mds_client *mdsc,
@@ -1753,16 +1772,17 @@ static void dispose_cap_releases(struct ceph_mds_client *mdsc,
 static void cleanup_session_requests(struct ceph_mds_client *mdsc,
                                     struct ceph_mds_session *session)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_mds_request *req;
        struct rb_node *p;
 
-       dout("cleanup_session_requests mds%d\n", session->s_mds);
+       doutc(cl, "mds%d\n", session->s_mds);
        mutex_lock(&mdsc->mutex);
        while (!list_empty(&session->s_unsafe)) {
                req = list_first_entry(&session->s_unsafe,
                                       struct ceph_mds_request, r_unsafe_item);
-               pr_warn_ratelimited(" dropping unsafe request %llu\n",
-                                   req->r_tid);
+               pr_warn_ratelimited_client(cl, " dropping unsafe request %llu\n",
+                                          req->r_tid);
                if (req->r_target_inode)
                        mapping_set_error(req->r_target_inode->i_mapping, -EIO);
                if (req->r_unsafe_dir)
@@ -1791,13 +1811,14 @@ int ceph_iterate_session_caps(struct ceph_mds_session *session,
                              int (*cb)(struct inode *, int mds, void *),
                              void *arg)
 {
+       struct ceph_client *cl = session->s_mdsc->fsc->client;
        struct list_head *p;
        struct ceph_cap *cap;
        struct inode *inode, *last_inode = NULL;
        struct ceph_cap *old_cap = NULL;
        int ret;
 
-       dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
+       doutc(cl, "%p mds%d\n", session, session->s_mds);
        spin_lock(&session->s_cap_lock);
        p = session->s_caps.next;
        while (p != &session->s_caps) {
@@ -1828,8 +1849,7 @@ int ceph_iterate_session_caps(struct ceph_mds_session *session,
                spin_lock(&session->s_cap_lock);
                p = p->next;
                if (!cap->ci) {
-                       dout("iterate_session_caps  finishing cap %p removal\n",
-                            cap);
+                       doutc(cl, "finishing cap %p removal\n", cap);
                        BUG_ON(cap->session != session);
                        cap->session = NULL;
                        list_del_init(&cap->session_caps);
@@ -1858,6 +1878,7 @@ out:
 static int remove_session_caps_cb(struct inode *inode, int mds, void *arg)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        bool invalidate = false;
        struct ceph_cap *cap;
        int iputs = 0;
@@ -1865,8 +1886,8 @@ static int remove_session_caps_cb(struct inode *inode, int mds, void *arg)
        spin_lock(&ci->i_ceph_lock);
        cap = __get_cap_for_mds(ci, mds);
        if (cap) {
-               dout(" removing cap %p, ci is %p, inode is %p\n",
-                    cap, ci, &ci->netfs.inode);
+               doutc(cl, " removing cap %p, ci is %p, inode is %p\n",
+                     cap, ci, &ci->netfs.inode);
 
                iputs = ceph_purge_inode_cap(inode, cap, &invalidate);
        }
@@ -1890,7 +1911,7 @@ static void remove_session_caps(struct ceph_mds_session *session)
        struct super_block *sb = fsc->sb;
        LIST_HEAD(dispose);
 
-       dout("remove_session_caps on %p\n", session);
+       doutc(fsc->client, "on %p\n", session);
        ceph_iterate_session_caps(session, remove_session_caps_cb, fsc);
 
        wake_up_all(&fsc->mdsc->cap_flushing_wq);
@@ -1971,7 +1992,9 @@ static int wake_up_session_cb(struct inode *inode, int mds, void *arg)
 
 static void wake_up_session_caps(struct ceph_mds_session *session, int ev)
 {
-       dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
+       struct ceph_client *cl = session->s_mdsc->fsc->client;
+
+       doutc(cl, "session %p mds%d\n", session, session->s_mds);
        ceph_iterate_session_caps(session, wake_up_session_cb,
                                  (void *)(unsigned long)ev);
 }
@@ -1985,25 +2008,26 @@ static void wake_up_session_caps(struct ceph_mds_session *session, int ev)
 static int send_renew_caps(struct ceph_mds_client *mdsc,
                           struct ceph_mds_session *session)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_msg *msg;
        int state;
 
        if (time_after_eq(jiffies, session->s_cap_ttl) &&
            time_after_eq(session->s_cap_ttl, session->s_renew_requested))
-               pr_info("mds%d caps stale\n", session->s_mds);
+               pr_info_client(cl, "mds%d caps stale\n", session->s_mds);
        session->s_renew_requested = jiffies;
 
        /* do not try to renew caps until a recovering mds has reconnected
         * with its clients. */
        state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
        if (state < CEPH_MDS_STATE_RECONNECT) {
-               dout("send_renew_caps ignoring mds%d (%s)\n",
-                    session->s_mds, ceph_mds_state_name(state));
+               doutc(cl, "ignoring mds%d (%s)\n", session->s_mds,
+                     ceph_mds_state_name(state));
                return 0;
        }
 
-       dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
-               ceph_mds_state_name(state));
+       doutc(cl, "to mds%d (%s)\n", session->s_mds,
+             ceph_mds_state_name(state));
        msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
                                      ++session->s_renew_seq);
        if (!msg)
@@ -2015,10 +2039,11 @@ static int send_renew_caps(struct ceph_mds_client *mdsc,
 static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
                             struct ceph_mds_session *session, u64 seq)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_msg *msg;
 
-       dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
-            session->s_mds, ceph_session_state_name(session->s_state), seq);
+       doutc(cl, "to mds%d (%s)s seq %lld\n", session->s_mds,
+             ceph_session_state_name(session->s_state), seq);
        msg = ceph_create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
        if (!msg)
                return -ENOMEM;
@@ -2035,6 +2060,7 @@ static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
 static void renewed_caps(struct ceph_mds_client *mdsc,
                         struct ceph_mds_session *session, int is_renew)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        int was_stale;
        int wake = 0;
 
@@ -2046,15 +2072,17 @@ static void renewed_caps(struct ceph_mds_client *mdsc,
 
        if (was_stale) {
                if (time_before(jiffies, session->s_cap_ttl)) {
-                       pr_info("mds%d caps renewed\n", session->s_mds);
+                       pr_info_client(cl, "mds%d caps renewed\n",
+                                      session->s_mds);
                        wake = 1;
                } else {
-                       pr_info("mds%d caps still stale\n", session->s_mds);
+                       pr_info_client(cl, "mds%d caps still stale\n",
+                                      session->s_mds);
                }
        }
-       dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
-            session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
-            time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
+       doutc(cl, "mds%d ttl now %lu, was %s, now %s\n", session->s_mds,
+             session->s_cap_ttl, was_stale ? "stale" : "fresh",
+             time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
        spin_unlock(&session->s_cap_lock);
 
        if (wake)
@@ -2066,11 +2094,11 @@ static void renewed_caps(struct ceph_mds_client *mdsc,
  */
 static int request_close_session(struct ceph_mds_session *session)
 {
+       struct ceph_client *cl = session->s_mdsc->fsc->client;
        struct ceph_msg *msg;
 
-       dout("request_close_session mds%d state %s seq %lld\n",
-            session->s_mds, ceph_session_state_name(session->s_state),
-            session->s_seq);
+       doutc(cl, "mds%d state %s seq %lld\n", session->s_mds,
+             ceph_session_state_name(session->s_state), session->s_seq);
        msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_CLOSE,
                                      session->s_seq);
        if (!msg)
@@ -2126,6 +2154,8 @@ out:
  */
 static int trim_caps_cb(struct inode *inode, int mds, void *arg)
 {
+       struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+       struct ceph_client *cl = mdsc->fsc->client;
        int *remaining = arg;
        struct ceph_inode_info *ci = ceph_inode(inode);
        int used, wanted, oissued, mine;
@@ -2145,9 +2175,10 @@ static int trim_caps_cb(struct inode *inode, int mds, void *arg)
        wanted = __ceph_caps_file_wanted(ci);
        oissued = __ceph_caps_issued_other(ci, cap);
 
-       dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
-            inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
-            ceph_cap_string(used), ceph_cap_string(wanted));
+       doutc(cl, "%p %llx.%llx cap %p mine %s oissued %s used %s wanted %s\n",
+             inode, ceph_vinop(inode), cap, ceph_cap_string(mine),
+             ceph_cap_string(oissued), ceph_cap_string(used),
+             ceph_cap_string(wanted));
        if (cap == ci->i_auth_cap) {
                if (ci->i_dirty_caps || ci->i_flushing_caps ||
                    !list_empty(&ci->i_cap_snaps))
@@ -2173,7 +2204,7 @@ static int trim_caps_cb(struct inode *inode, int mds, void *arg)
 
        if (oissued) {
                /* we aren't the only cap.. just remove us */
-               ceph_remove_cap(cap, true);
+               ceph_remove_cap(mdsc, cap, true);
                (*remaining)--;
        } else {
                struct dentry *dentry;
@@ -2187,8 +2218,8 @@ static int trim_caps_cb(struct inode *inode, int mds, void *arg)
                        count = atomic_read(&inode->i_count);
                        if (count == 1)
                                (*remaining)--;
-                       dout("trim_caps_cb %p cap %p pruned, count now %d\n",
-                            inode, cap, count);
+                       doutc(cl, "%p %llx.%llx cap %p pruned, count now %d\n",
+                             inode, ceph_vinop(inode), cap, count);
                } else {
                        dput(dentry);
                }
@@ -2207,17 +2238,18 @@ int ceph_trim_caps(struct ceph_mds_client *mdsc,
                   struct ceph_mds_session *session,
                   int max_caps)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        int trim_caps = session->s_nr_caps - max_caps;
 
-       dout("trim_caps mds%d start: %d / %d, trim %d\n",
-            session->s_mds, session->s_nr_caps, max_caps, trim_caps);
+       doutc(cl, "mds%d start: %d / %d, trim %d\n", session->s_mds,
+             session->s_nr_caps, max_caps, trim_caps);
        if (trim_caps > 0) {
                int remaining = trim_caps;
 
                ceph_iterate_session_caps(session, trim_caps_cb, &remaining);
-               dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
-                    session->s_mds, session->s_nr_caps, max_caps,
-                       trim_caps - remaining);
+               doutc(cl, "mds%d done: %d / %d, trimmed %d\n",
+                     session->s_mds, session->s_nr_caps, max_caps,
+                     trim_caps - remaining);
        }
 
        ceph_flush_cap_releases(mdsc, session);
@@ -2227,6 +2259,7 @@ int ceph_trim_caps(struct ceph_mds_client *mdsc,
 static int check_caps_flush(struct ceph_mds_client *mdsc,
                            u64 want_flush_tid)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        int ret = 1;
 
        spin_lock(&mdsc->cap_dirty_lock);
@@ -2235,8 +2268,8 @@ static int check_caps_flush(struct ceph_mds_client *mdsc,
                        list_first_entry(&mdsc->cap_flush_list,
                                         struct ceph_cap_flush, g_list);
                if (cf->tid <= want_flush_tid) {
-                       dout("check_caps_flush still flushing tid "
-                            "%llu <= %llu\n", cf->tid, want_flush_tid);
+                       doutc(cl, "still flushing tid %llu <= %llu\n",
+                             cf->tid, want_flush_tid);
                        ret = 0;
                }
        }
@@ -2252,12 +2285,14 @@ static int check_caps_flush(struct ceph_mds_client *mdsc,
 static void wait_caps_flush(struct ceph_mds_client *mdsc,
                            u64 want_flush_tid)
 {
-       dout("check_caps_flush want %llu\n", want_flush_tid);
+       struct ceph_client *cl = mdsc->fsc->client;
+
+       doutc(cl, "want %llu\n", want_flush_tid);
 
        wait_event(mdsc->cap_flushing_wq,
                   check_caps_flush(mdsc, want_flush_tid));
 
-       dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid);
+       doutc(cl, "ok, flushed thru %llu\n", want_flush_tid);
 }
 
 /*
@@ -2266,6 +2301,7 @@ static void wait_caps_flush(struct ceph_mds_client *mdsc,
 static void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
                                   struct ceph_mds_session *session)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_msg *msg = NULL;
        struct ceph_mds_cap_release *head;
        struct ceph_mds_cap_item *item;
@@ -2324,7 +2360,7 @@ again:
                        msg->front.iov_len += sizeof(*cap_barrier);
 
                        msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
-                       dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
+                       doutc(cl, "mds%d %p\n", session->s_mds, msg);
                        ceph_con_send(&session->s_con, msg);
                        msg = NULL;
                }
@@ -2344,13 +2380,13 @@ again:
                msg->front.iov_len += sizeof(*cap_barrier);
 
                msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
-               dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
+               doutc(cl, "mds%d %p\n", session->s_mds, msg);
                ceph_con_send(&session->s_con, msg);
        }
        return;
 out_err:
-       pr_err("send_cap_releases mds%d, failed to allocate message\n",
-               session->s_mds);
+       pr_err_client(cl, "mds%d, failed to allocate message\n",
+                     session->s_mds);
        spin_lock(&session->s_cap_lock);
        list_splice(&tmp_list, &session->s_cap_releases);
        session->s_num_cap_releases += num_cap_releases;
@@ -2373,16 +2409,17 @@ static void ceph_cap_release_work(struct work_struct *work)
 void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
                             struct ceph_mds_session *session)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        if (mdsc->stopping)
                return;
 
        ceph_get_mds_session(session);
        if (queue_work(mdsc->fsc->cap_wq,
                       &session->s_cap_release_work)) {
-               dout("cap release work queued\n");
+               doutc(cl, "cap release work queued\n");
        } else {
                ceph_put_mds_session(session);
-               dout("failed to queue cap release work\n");
+               doutc(cl, "failed to queue cap release work\n");
        }
 }
 
@@ -2410,13 +2447,14 @@ static void ceph_cap_reclaim_work(struct work_struct *work)
 
 void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        if (mdsc->stopping)
                return;
 
         if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_reclaim_work)) {
-                dout("caps reclaim work queued\n");
+                doutc(cl, "caps reclaim work queued\n");
         } else {
-                dout("failed to queue caps release work\n");
+                doutc(cl, "failed to queue caps release work\n");
         }
 }
 
@@ -2588,6 +2626,7 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
 
 /**
  * ceph_mdsc_build_path - build a path string to a given dentry
+ * @mdsc: mds client
  * @dentry: dentry to which path should be built
  * @plen: returned length of string
  * @pbase: returned base inode number
@@ -2607,9 +2646,10 @@ static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
  * Encode hidden .snap dirs as a double /, i.e.
  *   foo/.snap/bar -> foo//bar
  */
-char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *pbase,
-                          int for_wire)
+char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
+                          int *plen, u64 *pbase, int for_wire)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct dentry *cur;
        struct inode *inode;
        char *path;
@@ -2635,8 +2675,7 @@ retry:
                spin_lock(&cur->d_lock);
                inode = d_inode(cur);
                if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
-                       dout("build_path path+%d: %p SNAPDIR\n",
-                            pos, cur);
+                       doutc(cl, "path+%d: %p SNAPDIR\n", pos, cur);
                        spin_unlock(&cur->d_lock);
                        parent = dget_parent(cur);
                } else if (for_wire && inode && dentry != cur &&
@@ -2714,21 +2753,21 @@ retry:
                 * A rename didn't occur, but somehow we didn't end up where
                 * we thought we would. Throw a warning and try again.
                 */
-               pr_warn("build_path did not end path lookup where expected (pos = %d)\n",
-                       pos);
+               pr_warn_client(cl, "did not end path lookup where expected (pos = %d)\n",
+                              pos);
                goto retry;
        }
 
        *pbase = base;
        *plen = PATH_MAX - 1 - pos;
-       dout("build_path on %p %d built %llx '%.*s'\n",
-            dentry, d_count(dentry), base, *plen, path + pos);
+       doutc(cl, "on %p %d built %llx '%.*s'\n", dentry, d_count(dentry),
+             base, *plen, path + pos);
        return path + pos;
 }
 
-static int build_dentry_path(struct dentry *dentry, struct inode *dir,
-                            const char **ppath, int *ppathlen, u64 *pino,
-                            bool *pfreepath, bool parent_locked)
+static int build_dentry_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
+                            struct inode *dir, const char **ppath, int *ppathlen,
+                            u64 *pino, bool *pfreepath, bool parent_locked)
 {
        char *path;
 
@@ -2744,7 +2783,7 @@ static int build_dentry_path(struct dentry *dentry, struct inode *dir,
                return 0;
        }
        rcu_read_unlock();
-       path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
+       path = ceph_mdsc_build_path(mdsc, dentry, ppathlen, pino, 1);
        if (IS_ERR(path))
                return PTR_ERR(path);
        *ppath = path;
@@ -2756,6 +2795,7 @@ static int build_inode_path(struct inode *inode,
                            const char **ppath, int *ppathlen, u64 *pino,
                            bool *pfreepath)
 {
+       struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
        struct dentry *dentry;
        char *path;
 
@@ -2765,7 +2805,7 @@ static int build_inode_path(struct inode *inode,
                return 0;
        }
        dentry = d_find_alias(inode);
-       path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
+       path = ceph_mdsc_build_path(mdsc, dentry, ppathlen, pino, 1);
        dput(dentry);
        if (IS_ERR(path))
                return PTR_ERR(path);
@@ -2778,27 +2818,28 @@ static int build_inode_path(struct inode *inode,
  * request arguments may be specified via an inode *, a dentry *, or
  * an explicit ino+path.
  */
-static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
-                                 struct inode *rdiri, const char *rpath,
-                                 u64 rino, const char **ppath, int *pathlen,
-                                 u64 *ino, bool *freepath, bool parent_locked)
+static int set_request_path_attr(struct ceph_mds_client *mdsc, struct inode *rinode,
+                                struct dentry *rdentry, struct inode *rdiri,
+                                const char *rpath, u64 rino, const char **ppath,
+                                int *pathlen, u64 *ino, bool *freepath,
+                                bool parent_locked)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        int r = 0;
 
        if (rinode) {
                r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
-               dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
-                    ceph_snap(rinode));
+               doutc(cl, " inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
+                     ceph_snap(rinode));
        } else if (rdentry) {
-               r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
+               r = build_dentry_path(mdsc, rdentry, rdiri, ppath, pathlen, ino,
                                        freepath, parent_locked);
-               dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
-                    *ppath);
+               doutc(cl, " dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, *ppath);
        } else if (rpath || rino) {
                *ino = rino;
                *ppath = rpath;
                *pathlen = rpath ? strlen(rpath) : 0;
-               dout(" path %.*s\n", *pathlen, rpath);
+               doutc(cl, " path %.*s\n", *pathlen, rpath);
        }
 
        return r;
@@ -2840,6 +2881,17 @@ static void encode_mclientrequest_tail(void **p,
        }
 }
 
+static inline u16 mds_supported_head_version(struct ceph_mds_session *session)
+{
+       if (!test_bit(CEPHFS_FEATURE_32BITS_RETRY_FWD, &session->s_features))
+               return 1;
+
+       if (!test_bit(CEPHFS_FEATURE_HAS_OWNER_UIDGID, &session->s_features))
+               return 2;
+
+       return CEPH_MDS_REQUEST_HEAD_VERSION;
+}
+
 static struct ceph_mds_request_head_legacy *
 find_legacy_request_head(void *p, u64 features)
 {
@@ -2861,6 +2913,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
 {
        int mds = session->s_mds;
        struct ceph_mds_client *mdsc = session->s_mdsc;
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_msg *msg;
        struct ceph_mds_request_head_legacy *lhead;
        const char *path1 = NULL;
@@ -2874,10 +2927,11 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
        void *p, *end;
        int ret;
        bool legacy = !(session->s_con.peer_features & CEPH_FEATURE_FS_BTIME);
-       bool old_version = !test_bit(CEPHFS_FEATURE_32BITS_RETRY_FWD,
-                                    &session->s_features);
+       u16 request_head_version = mds_supported_head_version(session);
+       kuid_t caller_fsuid = req->r_cred->fsuid;
+       kgid_t caller_fsgid = req->r_cred->fsgid;
 
-       ret = set_request_path_attr(req->r_inode, req->r_dentry,
+       ret = set_request_path_attr(mdsc, req->r_inode, req->r_dentry,
                              req->r_parent, req->r_path1, req->r_ino1.ino,
                              &path1, &pathlen1, &ino1, &freepath1,
                              test_bit(CEPH_MDS_R_PARENT_LOCKED,
@@ -2891,7 +2945,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
        if (req->r_old_dentry &&
            !(req->r_old_dentry->d_flags & DCACHE_DISCONNECTED))
                old_dentry = req->r_old_dentry;
-       ret = set_request_path_attr(NULL, old_dentry,
+       ret = set_request_path_attr(mdsc, NULL, old_dentry,
                              req->r_old_dentry_dir,
                              req->r_path2, req->r_ino2.ino,
                              &path2, &pathlen2, &ino2, &freepath2, true);
@@ -2916,8 +2970,10 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
         */
        if (legacy)
                len = sizeof(struct ceph_mds_request_head_legacy);
-       else if (old_version)
+       else if (request_head_version == 1)
                len = sizeof(struct ceph_mds_request_head_old);
+       else if (request_head_version == 2)
+               len = offsetofend(struct ceph_mds_request_head, ext_num_fwd);
        else
                len = sizeof(struct ceph_mds_request_head);
 
@@ -2967,6 +3023,30 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
        lhead = find_legacy_request_head(msg->front.iov_base,
                                         session->s_con.peer_features);
 
+       if ((req->r_mnt_idmap != &nop_mnt_idmap) &&
+           !test_bit(CEPHFS_FEATURE_HAS_OWNER_UIDGID, &session->s_features)) {
+               WARN_ON_ONCE(!IS_CEPH_MDS_OP_NEWINODE(req->r_op));
+
+               if (enable_unsafe_idmap) {
+                       pr_warn_once_client(cl,
+                               "idmapped mount is used and CEPHFS_FEATURE_HAS_OWNER_UIDGID"
+                               " is not supported by MDS. UID/GID-based restrictions may"
+                               " not work properly.\n");
+
+                       caller_fsuid = from_vfsuid(req->r_mnt_idmap, &init_user_ns,
+                                                  VFSUIDT_INIT(req->r_cred->fsuid));
+                       caller_fsgid = from_vfsgid(req->r_mnt_idmap, &init_user_ns,
+                                                  VFSGIDT_INIT(req->r_cred->fsgid));
+               } else {
+                       pr_err_ratelimited_client(cl,
+                               "idmapped mount is used and CEPHFS_FEATURE_HAS_OWNER_UIDGID"
+                               " is not supported by MDS. Fail request with -EIO.\n");
+
+                       ret = -EIO;
+                       goto out_err;
+               }
+       }
+
        /*
         * The ceph_mds_request_head_legacy didn't contain a version field, and
         * one was added when we moved the message version from 3->4.
@@ -2974,17 +3054,40 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
        if (legacy) {
                msg->hdr.version = cpu_to_le16(3);
                p = msg->front.iov_base + sizeof(*lhead);
-       } else if (old_version) {
+       } else if (request_head_version == 1) {
                struct ceph_mds_request_head_old *ohead = msg->front.iov_base;
 
                msg->hdr.version = cpu_to_le16(4);
                ohead->version = cpu_to_le16(1);
                p = msg->front.iov_base + sizeof(*ohead);
+       } else if (request_head_version == 2) {
+               struct ceph_mds_request_head *nhead = msg->front.iov_base;
+
+               msg->hdr.version = cpu_to_le16(6);
+               nhead->version = cpu_to_le16(2);
+
+               p = msg->front.iov_base + offsetofend(struct ceph_mds_request_head, ext_num_fwd);
        } else {
                struct ceph_mds_request_head *nhead = msg->front.iov_base;
+               kuid_t owner_fsuid;
+               kgid_t owner_fsgid;
 
                msg->hdr.version = cpu_to_le16(6);
                nhead->version = cpu_to_le16(CEPH_MDS_REQUEST_HEAD_VERSION);
+               nhead->struct_len = cpu_to_le32(sizeof(struct ceph_mds_request_head));
+
+               if (IS_CEPH_MDS_OP_NEWINODE(req->r_op)) {
+                       owner_fsuid = from_vfsuid(req->r_mnt_idmap, &init_user_ns,
+                                               VFSUIDT_INIT(req->r_cred->fsuid));
+                       owner_fsgid = from_vfsgid(req->r_mnt_idmap, &init_user_ns,
+                                               VFSGIDT_INIT(req->r_cred->fsgid));
+                       nhead->owner_uid = cpu_to_le32(from_kuid(&init_user_ns, owner_fsuid));
+                       nhead->owner_gid = cpu_to_le32(from_kgid(&init_user_ns, owner_fsgid));
+               } else {
+                       nhead->owner_uid = cpu_to_le32(-1);
+                       nhead->owner_gid = cpu_to_le32(-1);
+               }
+
                p = msg->front.iov_base + sizeof(*nhead);
        }
 
@@ -2993,9 +3096,9 @@ static struct ceph_msg *create_request_message(struct ceph_mds_session *session,
        lhead->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
        lhead->op = cpu_to_le32(req->r_op);
        lhead->caller_uid = cpu_to_le32(from_kuid(&init_user_ns,
-                                                 req->r_cred->fsuid));
+                                                 caller_fsuid));
        lhead->caller_gid = cpu_to_le32(from_kgid(&init_user_ns,
-                                                 req->r_cred->fsgid));
+                                                 caller_fsgid));
        lhead->ino = cpu_to_le64(req->r_deleg_ino);
        lhead->args = req->r_args;
 
@@ -3099,6 +3202,7 @@ static int __prepare_send_request(struct ceph_mds_session *session,
 {
        int mds = session->s_mds;
        struct ceph_mds_client *mdsc = session->s_mdsc;
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_mds_request_head_legacy *lhead;
        struct ceph_mds_request_head *nhead;
        struct ceph_msg *msg;
@@ -3117,8 +3221,8 @@ static int __prepare_send_request(struct ceph_mds_session *session,
               old_max_retry = 1 << (old_max_retry * BITS_PER_BYTE);
               if ((old_version && req->r_attempts >= old_max_retry) ||
                   ((uint32_t)req->r_attempts >= U32_MAX)) {
-                       pr_warn_ratelimited("%s request tid %llu seq overflow\n",
-                                           __func__, req->r_tid);
+                       pr_warn_ratelimited_client(cl, "request tid %llu seq overflow\n",
+                                                  req->r_tid);
                        return -EMULTIHOP;
               }
        }
@@ -3133,8 +3237,8 @@ static int __prepare_send_request(struct ceph_mds_session *session,
                else
                        req->r_sent_on_mseq = -1;
        }
-       dout("%s %p tid %lld %s (attempt %d)\n", __func__, req,
-            req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
+       doutc(cl, "%p tid %lld %s (attempt %d)\n", req, req->r_tid,
+             ceph_mds_op_name(req->r_op), req->r_attempts);
 
        if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
                void *p;
@@ -3202,7 +3306,7 @@ static int __prepare_send_request(struct ceph_mds_session *session,
                nhead->ext_num_retry = cpu_to_le32(req->r_attempts - 1);
        }
 
-       dout(" r_parent = %p\n", req->r_parent);
+       doutc(cl, " r_parent = %p\n", req->r_parent);
        return 0;
 }
 
@@ -3230,6 +3334,7 @@ static int __send_request(struct ceph_mds_session *session,
 static void __do_request(struct ceph_mds_client *mdsc,
                        struct ceph_mds_request *req)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_mds_session *session = NULL;
        int mds = -1;
        int err = 0;
@@ -3242,29 +3347,29 @@ static void __do_request(struct ceph_mds_client *mdsc,
        }
 
        if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) {
-               dout("do_request metadata corrupted\n");
+               doutc(cl, "metadata corrupted\n");
                err = -EIO;
                goto finish;
        }
        if (req->r_timeout &&
            time_after_eq(jiffies, req->r_started + req->r_timeout)) {
-               dout("do_request timed out\n");
+               doutc(cl, "timed out\n");
                err = -ETIMEDOUT;
                goto finish;
        }
        if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
-               dout("do_request forced umount\n");
+               doutc(cl, "forced umount\n");
                err = -EIO;
                goto finish;
        }
        if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) {
                if (mdsc->mdsmap_err) {
                        err = mdsc->mdsmap_err;
-                       dout("do_request mdsmap err %d\n", err);
+                       doutc(cl, "mdsmap err %d\n", err);
                        goto finish;
                }
                if (mdsc->mdsmap->m_epoch == 0) {
-                       dout("do_request no mdsmap, waiting for map\n");
+                       doutc(cl, "no mdsmap, waiting for map\n");
                        list_add(&req->r_wait, &mdsc->waiting_for_map);
                        return;
                }
@@ -3285,7 +3390,7 @@ static void __do_request(struct ceph_mds_client *mdsc,
                        err = -EJUKEBOX;
                        goto finish;
                }
-               dout("do_request no mds or not active, waiting for map\n");
+               doutc(cl, "no mds or not active, waiting for map\n");
                list_add(&req->r_wait, &mdsc->waiting_for_map);
                return;
        }
@@ -3301,8 +3406,8 @@ static void __do_request(struct ceph_mds_client *mdsc,
        }
        req->r_session = ceph_get_mds_session(session);
 
-       dout("do_request mds%d session %p state %s\n", mds, session,
-            ceph_session_state_name(session->s_state));
+       doutc(cl, "mds%d session %p state %s\n", mds, session,
+             ceph_session_state_name(session->s_state));
 
        /*
         * The old ceph will crash the MDSs when see unknown OPs
@@ -3393,8 +3498,8 @@ static void __do_request(struct ceph_mds_client *mdsc,
                spin_lock(&ci->i_ceph_lock);
                cap = ci->i_auth_cap;
                if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE && mds != cap->mds) {
-                       dout("do_request session changed for auth cap %d -> %d\n",
-                            cap->session->s_mds, session->s_mds);
+                       doutc(cl, "session changed for auth cap %d -> %d\n",
+                             cap->session->s_mds, session->s_mds);
 
                        /* Remove the auth cap from old session */
                        spin_lock(&cap->session->s_cap_lock);
@@ -3421,7 +3526,7 @@ out_session:
        ceph_put_mds_session(session);
 finish:
        if (err) {
-               dout("__do_request early error %d\n", err);
+               doutc(cl, "early error %d\n", err);
                req->r_err = err;
                complete_request(mdsc, req);
                __unregister_request(mdsc, req);
@@ -3435,6 +3540,7 @@ finish:
 static void __wake_requests(struct ceph_mds_client *mdsc,
                            struct list_head *head)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_mds_request *req;
        LIST_HEAD(tmp_list);
 
@@ -3444,7 +3550,8 @@ static void __wake_requests(struct ceph_mds_client *mdsc,
                req = list_entry(tmp_list.next,
                                 struct ceph_mds_request, r_wait);
                list_del_init(&req->r_wait);
-               dout(" wake request %p tid %llu\n", req, req->r_tid);
+               doutc(cl, " wake request %p tid %llu\n", req,
+                     req->r_tid);
                __do_request(mdsc, req);
        }
 }
@@ -3455,10 +3562,11 @@ static void __wake_requests(struct ceph_mds_client *mdsc,
  */
 static void kick_requests(struct ceph_mds_client *mdsc, int mds)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_mds_request *req;
        struct rb_node *p = rb_first(&mdsc->request_tree);
 
-       dout("kick_requests mds%d\n", mds);
+       doutc(cl, "kick_requests mds%d\n", mds);
        while (p) {
                req = rb_entry(p, struct ceph_mds_request, r_node);
                p = rb_next(p);
@@ -3468,7 +3576,7 @@ static void kick_requests(struct ceph_mds_client *mdsc, int mds)
                        continue; /* only new requests */
                if (req->r_session &&
                    req->r_session->s_mds == mds) {
-                       dout(" kicking tid %llu\n", req->r_tid);
+                       doutc(cl, " kicking tid %llu\n", req->r_tid);
                        list_del_init(&req->r_wait);
                        __do_request(mdsc, req);
                }
@@ -3478,6 +3586,7 @@ static void kick_requests(struct ceph_mds_client *mdsc, int mds)
 int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
                              struct ceph_mds_request *req)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        int err = 0;
 
        /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
@@ -3499,8 +3608,7 @@ int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
        if (req->r_inode) {
                err = ceph_wait_on_async_create(req->r_inode);
                if (err) {
-                       dout("%s: wait for async create returned: %d\n",
-                            __func__, err);
+                       doutc(cl, "wait for async create returned: %d\n", err);
                        return err;
                }
        }
@@ -3508,13 +3616,12 @@ int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
        if (!err && req->r_old_inode) {
                err = ceph_wait_on_async_create(req->r_old_inode);
                if (err) {
-                       dout("%s: wait for async create returned: %d\n",
-                            __func__, err);
+                       doutc(cl, "wait for async create returned: %d\n", err);
                        return err;
                }
        }
 
-       dout("submit_request on %p for inode %p\n", req, dir);
+       doutc(cl, "submit_request on %p for inode %p\n", req, dir);
        mutex_lock(&mdsc->mutex);
        __register_request(mdsc, req, dir);
        __do_request(mdsc, req);
@@ -3527,10 +3634,11 @@ int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc,
                           struct ceph_mds_request *req,
                           ceph_mds_request_wait_callback_t wait_func)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        int err;
 
        /* wait */
-       dout("do_request waiting\n");
+       doutc(cl, "do_request waiting\n");
        if (wait_func) {
                err = wait_func(mdsc, req);
        } else {
@@ -3544,14 +3652,14 @@ int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc,
                else
                        err = timeleft;  /* killed */
        }
-       dout("do_request waited, got %d\n", err);
+       doutc(cl, "do_request waited, got %d\n", err);
        mutex_lock(&mdsc->mutex);
 
        /* only abort if we didn't race with a real reply */
        if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
                err = le32_to_cpu(req->r_reply_info.head->result);
        } else if (err < 0) {
-               dout("aborted request %lld with %d\n", req->r_tid, err);
+               doutc(cl, "aborted request %lld with %d\n", req->r_tid, err);
 
                /*
                 * ensure we aren't running concurrently with
@@ -3582,15 +3690,16 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
                         struct inode *dir,
                         struct ceph_mds_request *req)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        int err;
 
-       dout("do_request on %p\n", req);
+       doutc(cl, "do_request on %p\n", req);
 
        /* issue */
        err = ceph_mdsc_submit_request(mdsc, dir, req);
        if (!err)
                err = ceph_mdsc_wait_request(mdsc, req, NULL);
-       dout("do_request %p done, result %d\n", req, err);
+       doutc(cl, "do_request %p done, result %d\n", req, err);
        return err;
 }
 
@@ -3602,8 +3711,10 @@ void ceph_invalidate_dir_request(struct ceph_mds_request *req)
 {
        struct inode *dir = req->r_parent;
        struct inode *old_dir = req->r_old_dentry_dir;
+       struct ceph_client *cl = req->r_mdsc->fsc->client;
 
-       dout("invalidate_dir_request %p %p (complete, lease(s))\n", dir, old_dir);
+       doutc(cl, "invalidate_dir_request %p %p (complete, lease(s))\n",
+             dir, old_dir);
 
        ceph_dir_clear_complete(dir);
        if (old_dir)
@@ -3624,6 +3735,7 @@ void ceph_invalidate_dir_request(struct ceph_mds_request *req)
 static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
 {
        struct ceph_mds_client *mdsc = session->s_mdsc;
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_mds_request *req;
        struct ceph_mds_reply_head *head = msg->front.iov_base;
        struct ceph_mds_reply_info_parsed *rinfo;  /* parsed reply info */
@@ -3634,7 +3746,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
        bool close_sessions = false;
 
        if (msg->front.iov_len < sizeof(*head)) {
-               pr_err("mdsc_handle_reply got corrupt (short) reply\n");
+               pr_err_client(cl, "got corrupt (short) reply\n");
                ceph_msg_dump(msg);
                return;
        }
@@ -3644,17 +3756,17 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
        mutex_lock(&mdsc->mutex);
        req = lookup_get_request(mdsc, tid);
        if (!req) {
-               dout("handle_reply on unknown tid %llu\n", tid);
+               doutc(cl, "on unknown tid %llu\n", tid);
                mutex_unlock(&mdsc->mutex);
                return;
        }
-       dout("handle_reply %p\n", req);
+       doutc(cl, "handle_reply %p\n", req);
 
        /* correct session? */
        if (req->r_session != session) {
-               pr_err("mdsc_handle_reply got %llu on session mds%d"
-                      " not mds%d\n", tid, session->s_mds,
-                      req->r_session ? req->r_session->s_mds : -1);
+               pr_err_client(cl, "got %llu on session mds%d not mds%d\n",
+                             tid, session->s_mds,
+                             req->r_session ? req->r_session->s_mds : -1);
                mutex_unlock(&mdsc->mutex);
                goto out;
        }
@@ -3662,14 +3774,14 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
        /* dup? */
        if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) ||
            (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) {
-               pr_warn("got a dup %s reply on %llu from mds%d\n",
-                          head->safe ? "safe" : "unsafe", tid, mds);
+               pr_warn_client(cl, "got a dup %s reply on %llu from mds%d\n",
+                              head->safe ? "safe" : "unsafe", tid, mds);
                mutex_unlock(&mdsc->mutex);
                goto out;
        }
        if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) {
-               pr_warn("got unsafe after safe on %llu from mds%d\n",
-                          tid, mds);
+               pr_warn_client(cl, "got unsafe after safe on %llu from mds%d\n",
+                              tid, mds);
                mutex_unlock(&mdsc->mutex);
                goto out;
        }
@@ -3692,7 +3804,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
                         * response.  And even if it did, there is nothing
                         * useful we could do with a revised return value.
                         */
-                       dout("got safe reply %llu, mds%d\n", tid, mds);
+                       doutc(cl, "got safe reply %llu, mds%d\n", tid, mds);
 
                        mutex_unlock(&mdsc->mutex);
                        goto out;
@@ -3702,7 +3814,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
                list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
        }
 
-       dout("handle_reply tid %lld result %d\n", tid, result);
+       doutc(cl, "tid %lld result %d\n", tid, result);
        if (test_bit(CEPHFS_FEATURE_REPLY_ENCODING, &session->s_features))
                err = parse_reply_info(session, msg, req, (u64)-1);
        else
@@ -3742,7 +3854,8 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
 
        mutex_lock(&session->s_mutex);
        if (err < 0) {
-               pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid);
+               pr_err_client(cl, "got corrupt reply mds%d(tid:%lld)\n",
+                             mds, tid);
                ceph_msg_dump(msg);
                goto out_err;
        }
@@ -3806,7 +3919,7 @@ out_err:
                        set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags);
                }
        } else {
-               dout("reply arrived after request %lld was aborted\n", tid);
+               doutc(cl, "reply arrived after request %lld was aborted\n", tid);
        }
        mutex_unlock(&mdsc->mutex);
 
@@ -3835,6 +3948,7 @@ static void handle_forward(struct ceph_mds_client *mdsc,
                           struct ceph_mds_session *session,
                           struct ceph_msg *msg)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_mds_request *req;
        u64 tid = le64_to_cpu(msg->hdr.tid);
        u32 next_mds;
@@ -3852,12 +3966,12 @@ static void handle_forward(struct ceph_mds_client *mdsc,
        req = lookup_get_request(mdsc, tid);
        if (!req) {
                mutex_unlock(&mdsc->mutex);
-               dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
+               doutc(cl, "forward tid %llu to mds%d - req dne\n", tid, next_mds);
                return;  /* dup reply? */
        }
 
        if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
-               dout("forward tid %llu aborted, unregistering\n", tid);
+               doutc(cl, "forward tid %llu aborted, unregistering\n", tid);
                __unregister_request(mdsc, req);
        } else if (fwd_seq <= req->r_num_fwd || (uint32_t)fwd_seq >= U32_MAX) {
                /*
@@ -3873,10 +3987,11 @@ static void handle_forward(struct ceph_mds_client *mdsc,
                set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
                mutex_unlock(&req->r_fill_mutex);
                aborted = true;
-               pr_warn_ratelimited("forward tid %llu seq overflow\n", tid);
+               pr_warn_ratelimited_client(cl, "forward tid %llu seq overflow\n",
+                                          tid);
        } else {
                /* resend. forward race not possible; mds would drop */
-               dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
+               doutc(cl, "forward tid %llu to mds%d (we resend)\n", tid, next_mds);
                BUG_ON(req->r_err);
                BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags));
                req->r_attempts = 0;
@@ -3894,7 +4009,7 @@ static void handle_forward(struct ceph_mds_client *mdsc,
        return;
 
 bad:
-       pr_err("mdsc_handle_forward decode error err=%d\n", err);
+       pr_err_client(cl, "decode error err=%d\n", err);
        ceph_msg_dump(msg);
 }
 
@@ -3933,6 +4048,7 @@ static void handle_session(struct ceph_mds_session *session,
                           struct ceph_msg *msg)
 {
        struct ceph_mds_client *mdsc = session->s_mdsc;
+       struct ceph_client *cl = mdsc->fsc->client;
        int mds = session->s_mds;
        int msg_version = le16_to_cpu(msg->hdr.version);
        void *p = msg->front.iov_base;
@@ -3980,7 +4096,8 @@ static void handle_session(struct ceph_mds_session *session,
                /* version >= 5, flags   */
                ceph_decode_32_safe(&p, end, flags, bad);
                if (flags & CEPH_SESSION_BLOCKLISTED) {
-                       pr_warn("mds%d session blocklisted\n", session->s_mds);
+                       pr_warn_client(cl, "mds%d session blocklisted\n",
+                                      session->s_mds);
                        blocklisted = true;
                }
        }
@@ -3996,22 +4113,24 @@ static void handle_session(struct ceph_mds_session *session,
 
        mutex_lock(&session->s_mutex);
 
-       dout("handle_session mds%d %s %p state %s seq %llu\n",
-            mds, ceph_session_op_name(op), session,
-            ceph_session_state_name(session->s_state), seq);
+       doutc(cl, "mds%d %s %p state %s seq %llu\n", mds,
+             ceph_session_op_name(op), session,
+             ceph_session_state_name(session->s_state), seq);
 
        if (session->s_state == CEPH_MDS_SESSION_HUNG) {
                session->s_state = CEPH_MDS_SESSION_OPEN;
-               pr_info("mds%d came back\n", session->s_mds);
+               pr_info_client(cl, "mds%d came back\n", session->s_mds);
        }
 
        switch (op) {
        case CEPH_SESSION_OPEN:
                if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
-                       pr_info("mds%d reconnect success\n", session->s_mds);
+                       pr_info_client(cl, "mds%d reconnect success\n",
+                                      session->s_mds);
 
                if (session->s_state == CEPH_MDS_SESSION_OPEN) {
-                       pr_notice("mds%d is already opened\n", session->s_mds);
+                       pr_notice_client(cl, "mds%d is already opened\n",
+                                        session->s_mds);
                } else {
                        session->s_state = CEPH_MDS_SESSION_OPEN;
                        session->s_features = features;
@@ -4041,7 +4160,8 @@ static void handle_session(struct ceph_mds_session *session,
 
        case CEPH_SESSION_CLOSE:
                if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
-                       pr_info("mds%d reconnect denied\n", session->s_mds);
+                       pr_info_client(cl, "mds%d reconnect denied\n",
+                                      session->s_mds);
                session->s_state = CEPH_MDS_SESSION_CLOSED;
                cleanup_session_requests(mdsc, session);
                remove_session_caps(session);
@@ -4050,8 +4170,8 @@ static void handle_session(struct ceph_mds_session *session,
                break;
 
        case CEPH_SESSION_STALE:
-               pr_info("mds%d caps went stale, renewing\n",
-                       session->s_mds);
+               pr_info_client(cl, "mds%d caps went stale, renewing\n",
+                              session->s_mds);
                atomic_inc(&session->s_cap_gen);
                session->s_cap_ttl = jiffies - 1;
                send_renew_caps(mdsc, session);
@@ -4072,7 +4192,7 @@ static void handle_session(struct ceph_mds_session *session,
                break;
 
        case CEPH_SESSION_FORCE_RO:
-               dout("force_session_readonly %p\n", session);
+               doutc(cl, "force_session_readonly %p\n", session);
                spin_lock(&session->s_cap_lock);
                session->s_readonly = true;
                spin_unlock(&session->s_cap_lock);
@@ -4081,7 +4201,8 @@ static void handle_session(struct ceph_mds_session *session,
 
        case CEPH_SESSION_REJECT:
                WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING);
-               pr_info("mds%d rejected session\n", session->s_mds);
+               pr_info_client(cl, "mds%d rejected session\n",
+                              session->s_mds);
                session->s_state = CEPH_MDS_SESSION_REJECTED;
                cleanup_session_requests(mdsc, session);
                remove_session_caps(session);
@@ -4091,7 +4212,7 @@ static void handle_session(struct ceph_mds_session *session,
                break;
 
        default:
-               pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
+               pr_err_client(cl, "bad op %d mds%d\n", op, mds);
                WARN_ON(1);
        }
 
@@ -4108,30 +4229,32 @@ static void handle_session(struct ceph_mds_session *session,
        return;
 
 bad:
-       pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
-              (int)msg->front.iov_len);
+       pr_err_client(cl, "corrupt message mds%d len %d\n", mds,
+                     (int)msg->front.iov_len);
        ceph_msg_dump(msg);
        return;
 }
 
 void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req)
 {
+       struct ceph_client *cl = req->r_mdsc->fsc->client;
        int dcaps;
 
        dcaps = xchg(&req->r_dir_caps, 0);
        if (dcaps) {
-               dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
+               doutc(cl, "releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
                ceph_put_cap_refs(ceph_inode(req->r_parent), dcaps);
        }
 }
 
 void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req)
 {
+       struct ceph_client *cl = req->r_mdsc->fsc->client;
        int dcaps;
 
        dcaps = xchg(&req->r_dir_caps, 0);
        if (dcaps) {
-               dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
+               doutc(cl, "releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
                ceph_put_cap_refs_no_check_caps(ceph_inode(req->r_parent),
                                                dcaps);
        }
@@ -4146,7 +4269,7 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
        struct ceph_mds_request *req, *nreq;
        struct rb_node *p;
 
-       dout("replay_unsafe_requests mds%d\n", session->s_mds);
+       doutc(mdsc->fsc->client, "mds%d\n", session->s_mds);
 
        mutex_lock(&mdsc->mutex);
        list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item)
@@ -4290,6 +4413,8 @@ out_unlock:
  */
 static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
 {
+       struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        union {
                struct ceph_mds_cap_reconnect v2;
                struct ceph_mds_cap_reconnect_v1 v1;
@@ -4307,7 +4432,7 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
        dentry = d_find_primary(inode);
        if (dentry) {
                /* set pathbase to parent dir when msg_version >= 2 */
-               path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase,
+               path = ceph_mdsc_build_path(mdsc, dentry, &pathlen, &pathbase,
                                            recon_state->msg_version >= 2);
                dput(dentry);
                if (IS_ERR(path)) {
@@ -4326,9 +4451,9 @@ static int reconnect_caps_cb(struct inode *inode, int mds, void *arg)
                err = 0;
                goto out_err;
        }
-       dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
-            inode, ceph_vinop(inode), cap, cap->cap_id,
-            ceph_cap_string(cap->issued));
+       doutc(cl, " adding %p ino %llx.%llx cap %p %lld %s\n", inode,
+             ceph_vinop(inode), cap, cap->cap_id,
+             ceph_cap_string(cap->issued));
 
        cap->seq = 0;        /* reset cap seq */
        cap->issue_seq = 0;  /* and issue_seq */
@@ -4482,6 +4607,7 @@ static int encode_snap_realms(struct ceph_mds_client *mdsc,
 {
        struct rb_node *p;
        struct ceph_pagelist *pagelist = recon_state->pagelist;
+       struct ceph_client *cl = mdsc->fsc->client;
        int err = 0;
 
        if (recon_state->msg_version >= 4) {
@@ -4520,8 +4646,8 @@ static int encode_snap_realms(struct ceph_mds_client *mdsc,
                        ceph_pagelist_encode_32(pagelist, sizeof(sr_rec));
                }
 
-               dout(" adding snap realm %llx seq %lld parent %llx\n",
-                    realm->ino, realm->seq, realm->parent_ino);
+               doutc(cl, " adding snap realm %llx seq %lld parent %llx\n",
+                     realm->ino, realm->seq, realm->parent_ino);
                sr_rec.ino = cpu_to_le64(realm->ino);
                sr_rec.seq = cpu_to_le64(realm->seq);
                sr_rec.parent = cpu_to_le64(realm->parent_ino);
@@ -4550,6 +4676,7 @@ fail:
 static void send_mds_reconnect(struct ceph_mds_client *mdsc,
                               struct ceph_mds_session *session)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_msg *reply;
        int mds = session->s_mds;
        int err = -ENOMEM;
@@ -4558,7 +4685,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
        };
        LIST_HEAD(dispose);
 
-       pr_info("mds%d reconnect start\n", mds);
+       pr_info_client(cl, "mds%d reconnect start\n", mds);
 
        recon_state.pagelist = ceph_pagelist_alloc(GFP_NOFS);
        if (!recon_state.pagelist)
@@ -4574,8 +4701,8 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
        session->s_state = CEPH_MDS_SESSION_RECONNECTING;
        session->s_seq = 0;
 
-       dout("session %p state %s\n", session,
-            ceph_session_state_name(session->s_state));
+       doutc(cl, "session %p state %s\n", session,
+             ceph_session_state_name(session->s_state));
 
        atomic_inc(&session->s_cap_gen);
 
@@ -4709,7 +4836,8 @@ fail:
 fail_nomsg:
        ceph_pagelist_release(recon_state.pagelist);
 fail_nopagelist:
-       pr_err("error %d preparing reconnect for mds%d\n", err, mds);
+       pr_err_client(cl, "error %d preparing reconnect for mds%d\n",
+                     err, mds);
        return;
 }
 
@@ -4728,9 +4856,9 @@ static void check_new_map(struct ceph_mds_client *mdsc,
        int oldstate, newstate;
        struct ceph_mds_session *s;
        unsigned long targets[DIV_ROUND_UP(CEPH_MAX_MDS, sizeof(unsigned long))] = {0};
+       struct ceph_client *cl = mdsc->fsc->client;
 
-       dout("check_new_map new %u old %u\n",
-            newmap->m_epoch, oldmap->m_epoch);
+       doutc(cl, "new %u old %u\n", newmap->m_epoch, oldmap->m_epoch);
 
        if (newmap->m_info) {
                for (i = 0; i < newmap->possible_max_rank; i++) {
@@ -4746,12 +4874,12 @@ static void check_new_map(struct ceph_mds_client *mdsc,
                oldstate = ceph_mdsmap_get_state(oldmap, i);
                newstate = ceph_mdsmap_get_state(newmap, i);
 
-               dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
-                    i, ceph_mds_state_name(oldstate),
-                    ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
-                    ceph_mds_state_name(newstate),
-                    ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
-                    ceph_session_state_name(s->s_state));
+               doutc(cl, "mds%d state %s%s -> %s%s (session %s)\n",
+                     i, ceph_mds_state_name(oldstate),
+                     ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
+                     ceph_mds_state_name(newstate),
+                     ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
+                     ceph_session_state_name(s->s_state));
 
                if (i >= newmap->possible_max_rank) {
                        /* force close session for stopped mds */
@@ -4804,7 +4932,8 @@ static void check_new_map(struct ceph_mds_client *mdsc,
                    newstate >= CEPH_MDS_STATE_ACTIVE) {
                        if (oldstate != CEPH_MDS_STATE_CREATING &&
                            oldstate != CEPH_MDS_STATE_STARTING)
-                               pr_info("mds%d recovery completed\n", s->s_mds);
+                               pr_info_client(cl, "mds%d recovery completed\n",
+                                              s->s_mds);
                        kick_requests(mdsc, i);
                        mutex_unlock(&mdsc->mutex);
                        mutex_lock(&s->s_mutex);
@@ -4848,12 +4977,13 @@ static void check_new_map(struct ceph_mds_client *mdsc,
                        s = __open_export_target_session(mdsc, i);
                        if (IS_ERR(s)) {
                                err = PTR_ERR(s);
-                               pr_err("failed to open export target session, err %d\n",
-                                      err);
+                               pr_err_client(cl,
+                                             "failed to open export target session, err %d\n",
+                                             err);
                                continue;
                        }
                }
-               dout("send reconnect to export target mds.%d\n", i);
+               doutc(cl, "send reconnect to export target mds.%d\n", i);
                mutex_unlock(&mdsc->mutex);
                send_mds_reconnect(mdsc, s);
                ceph_put_mds_session(s);
@@ -4869,8 +4999,7 @@ static void check_new_map(struct ceph_mds_client *mdsc,
                if (s->s_state == CEPH_MDS_SESSION_OPEN ||
                    s->s_state == CEPH_MDS_SESSION_HUNG ||
                    s->s_state == CEPH_MDS_SESSION_CLOSING) {
-                       dout(" connecting to export targets of laggy mds%d\n",
-                            i);
+                       doutc(cl, " connecting to export targets of laggy mds%d\n", i);
                        __open_export_target_sessions(mdsc, s);
                }
        }
@@ -4897,6 +5026,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
                         struct ceph_mds_session *session,
                         struct ceph_msg *msg)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct super_block *sb = mdsc->fsc->sb;
        struct inode *inode;
        struct dentry *parent, *dentry;
@@ -4908,7 +5038,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
        struct qstr dname;
        int release = 0;
 
-       dout("handle_lease from mds%d\n", mds);
+       doutc(cl, "from mds%d\n", mds);
 
        if (!ceph_inc_mds_stopping_blocker(mdsc, session))
                return;
@@ -4926,20 +5056,19 @@ static void handle_lease(struct ceph_mds_client *mdsc,
 
        /* lookup inode */
        inode = ceph_find_inode(sb, vino);
-       dout("handle_lease %s, ino %llx %p %.*s\n",
-            ceph_lease_op_name(h->action), vino.ino, inode,
-            dname.len, dname.name);
+       doutc(cl, "%s, ino %llx %p %.*s\n", ceph_lease_op_name(h->action),
+             vino.ino, inode, dname.len, dname.name);
 
        mutex_lock(&session->s_mutex);
        if (!inode) {
-               dout("handle_lease no inode %llx\n", vino.ino);
+               doutc(cl, "no inode %llx\n", vino.ino);
                goto release;
        }
 
        /* dentry */
        parent = d_find_alias(inode);
        if (!parent) {
-               dout("no parent dentry on inode %p\n", inode);
+               doutc(cl, "no parent dentry on inode %p\n", inode);
                WARN_ON(1);
                goto release;  /* hrm... */
        }
@@ -4999,7 +5128,7 @@ out:
 bad:
        ceph_dec_mds_stopping_blocker(mdsc);
 
-       pr_err("corrupt lease message\n");
+       pr_err_client(cl, "corrupt lease message\n");
        ceph_msg_dump(msg);
 }
 
@@ -5007,13 +5136,14 @@ void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
                              struct dentry *dentry, char action,
                              u32 seq)
 {
+       struct ceph_client *cl = session->s_mdsc->fsc->client;
        struct ceph_msg *msg;
        struct ceph_mds_lease *lease;
        struct inode *dir;
        int len = sizeof(*lease) + sizeof(u32) + NAME_MAX;
 
-       dout("lease_send_msg identry %p %s to mds%d\n",
-            dentry, ceph_lease_op_name(action), session->s_mds);
+       doutc(cl, "identry %p %s to mds%d\n", dentry, ceph_lease_op_name(action),
+             session->s_mds);
 
        msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false);
        if (!msg)
@@ -5046,6 +5176,7 @@ static void lock_unlock_session(struct ceph_mds_session *s)
 
 static void maybe_recover_session(struct ceph_mds_client *mdsc)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_fs_client *fsc = mdsc->fsc;
 
        if (!ceph_test_mount_opt(fsc, CLEANRECOVER))
@@ -5057,17 +5188,19 @@ static void maybe_recover_session(struct ceph_mds_client *mdsc)
        if (!READ_ONCE(fsc->blocklisted))
                return;
 
-       pr_info("auto reconnect after blocklisted\n");
+       pr_info_client(cl, "auto reconnect after blocklisted\n");
        ceph_force_reconnect(fsc->sb);
 }
 
 bool check_session_state(struct ceph_mds_session *s)
 {
+       struct ceph_client *cl = s->s_mdsc->fsc->client;
+
        switch (s->s_state) {
        case CEPH_MDS_SESSION_OPEN:
                if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
                        s->s_state = CEPH_MDS_SESSION_HUNG;
-                       pr_info("mds%d hung\n", s->s_mds);
+                       pr_info_client(cl, "mds%d hung\n", s->s_mds);
                }
                break;
        case CEPH_MDS_SESSION_CLOSING:
@@ -5087,6 +5220,8 @@ bool check_session_state(struct ceph_mds_session *s)
  */
 void inc_session_sequence(struct ceph_mds_session *s)
 {
+       struct ceph_client *cl = s->s_mdsc->fsc->client;
+
        lockdep_assert_held(&s->s_mutex);
 
        s->s_seq++;
@@ -5094,11 +5229,11 @@ void inc_session_sequence(struct ceph_mds_session *s)
        if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
                int ret;
 
-               dout("resending session close request for mds%d\n", s->s_mds);
+               doutc(cl, "resending session close request for mds%d\n", s->s_mds);
                ret = request_close_session(s);
                if (ret < 0)
-                       pr_err("unable to close session to mds%d: %d\n",
-                              s->s_mds, ret);
+                       pr_err_client(cl, "unable to close session to mds%d: %d\n",
+                                     s->s_mds, ret);
        }
 }
 
@@ -5127,7 +5262,7 @@ static void delayed_work(struct work_struct *work)
        int renew_caps;
        int i;
 
-       dout("mdsc delayed_work\n");
+       doutc(mdsc->fsc->client, "mdsc delayed_work\n");
 
        if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHED)
                return;
@@ -5256,6 +5391,7 @@ err_mdsc:
  */
 static void wait_requests(struct ceph_mds_client *mdsc)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_options *opts = mdsc->fsc->client->options;
        struct ceph_mds_request *req;
 
@@ -5263,25 +5399,25 @@ static void wait_requests(struct ceph_mds_client *mdsc)
        if (__get_oldest_req(mdsc)) {
                mutex_unlock(&mdsc->mutex);
 
-               dout("wait_requests waiting for requests\n");
+               doutc(cl, "waiting for requests\n");
                wait_for_completion_timeout(&mdsc->safe_umount_waiters,
                                    ceph_timeout_jiffies(opts->mount_timeout));
 
                /* tear down remaining requests */
                mutex_lock(&mdsc->mutex);
                while ((req = __get_oldest_req(mdsc))) {
-                       dout("wait_requests timed out on tid %llu\n",
-                            req->r_tid);
+                       doutc(cl, "timed out on tid %llu\n", req->r_tid);
                        list_del_init(&req->r_wait);
                        __unregister_request(mdsc, req);
                }
        }
        mutex_unlock(&mdsc->mutex);
-       dout("wait_requests done\n");
+       doutc(cl, "done\n");
 }
 
 void send_flush_mdlog(struct ceph_mds_session *s)
 {
+       struct ceph_client *cl = s->s_mdsc->fsc->client;
        struct ceph_msg *msg;
 
        /*
@@ -5291,13 +5427,13 @@ void send_flush_mdlog(struct ceph_mds_session *s)
                return;
 
        mutex_lock(&s->s_mutex);
-       dout("request mdlog flush to mds%d (%s)s seq %lld\n", s->s_mds,
-            ceph_session_state_name(s->s_state), s->s_seq);
+       doutc(cl, "request mdlog flush to mds%d (%s)s seq %lld\n",
+             s->s_mds, ceph_session_state_name(s->s_state), s->s_seq);
        msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_FLUSH_MDLOG,
                                      s->s_seq);
        if (!msg) {
-               pr_err("failed to request mdlog flush to mds%d (%s) seq %lld\n",
-                      s->s_mds, ceph_session_state_name(s->s_state), s->s_seq);
+               pr_err_client(cl, "failed to request mdlog flush to mds%d (%s) seq %lld\n",
+                             s->s_mds, ceph_session_state_name(s->s_state), s->s_seq);
        } else {
                ceph_con_send(&s->s_con, msg);
        }
@@ -5310,7 +5446,7 @@ void send_flush_mdlog(struct ceph_mds_session *s)
  */
 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
 {
-       dout("pre_umount\n");
+       doutc(mdsc->fsc->client, "begin\n");
        mdsc->stopping = CEPH_MDSC_STOPPING_BEGIN;
 
        ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true);
@@ -5325,6 +5461,7 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
        ceph_msgr_flush();
 
        ceph_cleanup_quotarealms_inodes(mdsc);
+       doutc(mdsc->fsc->client, "done\n");
 }
 
 /*
@@ -5333,12 +5470,13 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
 static void flush_mdlog_and_wait_mdsc_unsafe_requests(struct ceph_mds_client *mdsc,
                                                 u64 want_tid)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_mds_request *req = NULL, *nextreq;
        struct ceph_mds_session *last_session = NULL;
        struct rb_node *n;
 
        mutex_lock(&mdsc->mutex);
-       dout("%s want %lld\n", __func__, want_tid);
+       doutc(cl, "want %lld\n", want_tid);
 restart:
        req = __get_oldest_req(mdsc);
        while (req && req->r_tid <= want_tid) {
@@ -5372,8 +5510,8 @@ restart:
                        } else {
                                ceph_put_mds_session(s);
                        }
-                       dout("%s wait on %llu (want %llu)\n", __func__,
-                            req->r_tid, want_tid);
+                       doutc(cl, "wait on %llu (want %llu)\n",
+                             req->r_tid, want_tid);
                        wait_for_completion(&req->r_safe_completion);
 
                        mutex_lock(&mdsc->mutex);
@@ -5391,17 +5529,18 @@ restart:
        }
        mutex_unlock(&mdsc->mutex);
        ceph_put_mds_session(last_session);
-       dout("%s done\n", __func__);
+       doutc(cl, "done\n");
 }
 
 void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        u64 want_tid, want_flush;
 
        if (READ_ONCE(mdsc->fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN)
                return;
 
-       dout("sync\n");
+       doutc(cl, "sync\n");
        mutex_lock(&mdsc->mutex);
        want_tid = mdsc->last_tid;
        mutex_unlock(&mdsc->mutex);
@@ -5417,8 +5556,7 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
        }
        spin_unlock(&mdsc->cap_dirty_lock);
 
-       dout("sync want tid %lld flush_seq %lld\n",
-            want_tid, want_flush);
+       doutc(cl, "sync want tid %lld flush_seq %lld\n", want_tid, want_flush);
 
        flush_mdlog_and_wait_mdsc_unsafe_requests(mdsc, want_tid);
        wait_caps_flush(mdsc, want_flush);
@@ -5440,11 +5578,12 @@ static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
 {
        struct ceph_options *opts = mdsc->fsc->client->options;
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_mds_session *session;
        int i;
        int skipped = 0;
 
-       dout("close_sessions\n");
+       doutc(cl, "begin\n");
 
        /* close sessions */
        mutex_lock(&mdsc->mutex);
@@ -5462,7 +5601,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
        }
        mutex_unlock(&mdsc->mutex);
 
-       dout("waiting for sessions to close\n");
+       doutc(cl, "waiting for sessions to close\n");
        wait_event_timeout(mdsc->session_close_wq,
                           done_closing_sessions(mdsc, skipped),
                           ceph_timeout_jiffies(opts->mount_timeout));
@@ -5490,7 +5629,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
        cancel_work_sync(&mdsc->cap_reclaim_work);
        cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
 
-       dout("stopped\n");
+       doutc(cl, "done\n");
 }
 
 void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
@@ -5498,7 +5637,7 @@ void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
        struct ceph_mds_session *session;
        int mds;
 
-       dout("force umount\n");
+       doutc(mdsc->fsc->client, "force umount\n");
 
        mutex_lock(&mdsc->mutex);
        for (mds = 0; mds < mdsc->max_sessions; mds++) {
@@ -5529,7 +5668,7 @@ void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
 
 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
 {
-       dout("stop\n");
+       doutc(mdsc->fsc->client, "stop\n");
        /*
         * Make sure the delayed work stopped before releasing
         * the resources.
@@ -5550,7 +5689,7 @@ static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
 void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
 {
        struct ceph_mds_client *mdsc = fsc->mdsc;
-       dout("mdsc_destroy %p\n", mdsc);
+       doutc(fsc->client, "%p\n", mdsc);
 
        if (!mdsc)
                return;
@@ -5564,12 +5703,13 @@ void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
 
        fsc->mdsc = NULL;
        kfree(mdsc);
-       dout("mdsc_destroy %p done\n", mdsc);
+       doutc(fsc->client, "%p done\n", mdsc);
 }
 
 void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
 {
        struct ceph_fs_client *fsc = mdsc->fsc;
+       struct ceph_client *cl = fsc->client;
        const char *mds_namespace = fsc->mount_options->mds_namespace;
        void *p = msg->front.iov_base;
        void *end = p + msg->front.iov_len;
@@ -5581,7 +5721,7 @@ void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
        ceph_decode_need(&p, end, sizeof(u32), bad);
        epoch = ceph_decode_32(&p);
 
-       dout("handle_fsmap epoch %u\n", epoch);
+       doutc(cl, "epoch %u\n", epoch);
 
        /* struct_v, struct_cv, map_len, epoch, legacy_client_fscid */
        ceph_decode_skip_n(&p, end, 2 + sizeof(u32) * 3, bad);
@@ -5626,7 +5766,8 @@ void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
        return;
 
 bad:
-       pr_err("error decoding fsmap %d. Shutting down mount.\n", err);
+       pr_err_client(cl, "error decoding fsmap %d. Shutting down mount.\n",
+                     err);
        ceph_umount_begin(mdsc->fsc->sb);
        ceph_msg_dump(msg);
 err_out:
@@ -5641,6 +5782,7 @@ err_out:
  */
 void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        u32 epoch;
        u32 maplen;
        void *p = msg->front.iov_base;
@@ -5655,18 +5797,17 @@ void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
                return;
        epoch = ceph_decode_32(&p);
        maplen = ceph_decode_32(&p);
-       dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
+       doutc(cl, "epoch %u len %d\n", epoch, (int)maplen);
 
        /* do we need it? */
        mutex_lock(&mdsc->mutex);
        if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
-               dout("handle_map epoch %u <= our %u\n",
-                    epoch, mdsc->mdsmap->m_epoch);
+               doutc(cl, "epoch %u <= our %u\n", epoch, mdsc->mdsmap->m_epoch);
                mutex_unlock(&mdsc->mutex);
                return;
        }
 
-       newmap = ceph_mdsmap_decode(&p, end, ceph_msgr2(mdsc->fsc->client));
+       newmap = ceph_mdsmap_decode(mdsc, &p, end, ceph_msgr2(mdsc->fsc->client));
        if (IS_ERR(newmap)) {
                err = PTR_ERR(newmap);
                goto bad_unlock;
@@ -5695,7 +5836,8 @@ void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
 bad_unlock:
        mutex_unlock(&mdsc->mutex);
 bad:
-       pr_err("error decoding mdsmap %d. Shutting down mount.\n", err);
+       pr_err_client(cl, "error decoding mdsmap %d. Shutting down mount.\n",
+                     err);
        ceph_umount_begin(mdsc->fsc->sb);
        ceph_msg_dump(msg);
        return;
@@ -5726,7 +5868,8 @@ static void mds_peer_reset(struct ceph_connection *con)
        struct ceph_mds_session *s = con->private;
        struct ceph_mds_client *mdsc = s->s_mdsc;
 
-       pr_warn("mds%d closed our session\n", s->s_mds);
+       pr_warn_client(mdsc->fsc->client, "mds%d closed our session\n",
+                      s->s_mds);
        if (READ_ONCE(mdsc->fsc->mount_state) != CEPH_MOUNT_FENCE_IO)
                send_mds_reconnect(mdsc, s);
 }
@@ -5735,6 +5878,7 @@ static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
 {
        struct ceph_mds_session *s = con->private;
        struct ceph_mds_client *mdsc = s->s_mdsc;
+       struct ceph_client *cl = mdsc->fsc->client;
        int type = le16_to_cpu(msg->hdr.type);
 
        mutex_lock(&mdsc->mutex);
@@ -5774,8 +5918,8 @@ static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
                break;
 
        default:
-               pr_err("received unknown message type %d %s\n", type,
-                      ceph_msg_type_name(type));
+               pr_err_client(cl, "received unknown message type %d %s\n",
+                             type, ceph_msg_type_name(type));
        }
 out:
        ceph_msg_put(msg);
index 5a3714bdd64a8e163077f4baba72b47b6087c564..2e6ddaa13d725016dc9a93c6ad1838806eac547e 100644 (file)
@@ -14,9 +14,9 @@
 
 #include <linux/ceph/types.h>
 #include <linux/ceph/messenger.h>
-#include <linux/ceph/mdsmap.h>
 #include <linux/ceph/auth.h>
 
+#include "mdsmap.h"
 #include "metric.h"
 #include "super.h"
 
@@ -33,8 +33,10 @@ enum ceph_feature_type {
        CEPHFS_FEATURE_NOTIFY_SESSION_STATE,
        CEPHFS_FEATURE_OP_GETVXATTR,
        CEPHFS_FEATURE_32BITS_RETRY_FWD,
+       CEPHFS_FEATURE_NEW_SNAPREALM_INFO,
+       CEPHFS_FEATURE_HAS_OWNER_UIDGID,
 
-       CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_32BITS_RETRY_FWD,
+       CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_HAS_OWNER_UIDGID,
 };
 
 #define CEPHFS_FEATURES_CLIENT_SUPPORTED {     \
@@ -49,6 +51,7 @@ enum ceph_feature_type {
        CEPHFS_FEATURE_NOTIFY_SESSION_STATE,    \
        CEPHFS_FEATURE_OP_GETVXATTR,            \
        CEPHFS_FEATURE_32BITS_RETRY_FWD,        \
+       CEPHFS_FEATURE_HAS_OWNER_UIDGID,        \
 }
 
 /*
@@ -300,6 +303,7 @@ struct ceph_mds_request {
        int r_fmode;        /* file mode, if expecting cap */
        int r_request_release_offset;
        const struct cred *r_cred;
+       struct mnt_idmap *r_mnt_idmap;
        struct timespec64 r_stamp;
 
        /* for choosing which mds to send this request to */
@@ -581,7 +585,8 @@ static inline void ceph_mdsc_free_path(char *path, int len)
                __putname(path - (PATH_MAX - 1 - len));
 }
 
-extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
+extern char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc,
+                                 struct dentry *dentry, int *plen, u64 *base,
                                  int for_wire);
 
 extern void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry);
@@ -614,4 +619,6 @@ static inline int ceph_wait_on_async_create(struct inode *inode)
 extern int ceph_wait_on_conflict_unlink(struct dentry *dentry);
 extern u64 ceph_get_deleg_ino(struct ceph_mds_session *session);
 extern int ceph_restore_deleg_ino(struct ceph_mds_session *session, u64 ino);
+
+extern bool enable_unsafe_idmap;
 #endif
index 7dac21ee6ce7682a22b82d36debef62931a5b58b..fae97c25ce58d5b268b7e3d73c5d4c94def4946d 100644 (file)
@@ -7,10 +7,11 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 
-#include <linux/ceph/mdsmap.h>
 #include <linux/ceph/messenger.h>
 #include <linux/ceph/decode.h>
 
+#include "mdsmap.h"
+#include "mds_client.h"
 #include "super.h"
 
 #define CEPH_MDS_IS_READY(i, ignore_laggy) \
@@ -114,8 +115,10 @@ bad:
  * Ignore any fields we don't care about (there are quite a few of
  * them).
  */
-struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
+struct ceph_mdsmap *ceph_mdsmap_decode(struct ceph_mds_client *mdsc, void **p,
+                                      void *end, bool msgr2)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_mdsmap *m;
        const void *start = *p;
        int i, j, n;
@@ -233,20 +236,18 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
                        *p = info_end;
                }
 
-               dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s%s\n",
-                    i+1, n, global_id, mds, inc,
-                    ceph_pr_addr(&addr),
-                    ceph_mds_state_name(state),
-                    laggy ? "(laggy)" : "");
+               doutc(cl, "%d/%d %lld mds%d.%d %s %s%s\n", i+1, n, global_id,
+                     mds, inc, ceph_pr_addr(&addr),
+                     ceph_mds_state_name(state), laggy ? "(laggy)" : "");
 
                if (mds < 0 || mds >= m->possible_max_rank) {
-                       pr_warn("mdsmap_decode got incorrect mds(%d)\n", mds);
+                       pr_warn_client(cl, "got incorrect mds(%d)\n", mds);
                        continue;
                }
 
                if (state <= 0) {
-                       dout("mdsmap_decode got incorrect state(%s)\n",
-                            ceph_mds_state_name(state));
+                       doutc(cl, "got incorrect state(%s)\n",
+                             ceph_mds_state_name(state));
                        continue;
                }
 
@@ -385,16 +386,16 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
                m->m_max_xattr_size = 0;
        }
 bad_ext:
-       dout("mdsmap_decode m_enabled: %d, m_damaged: %d, m_num_laggy: %d\n",
-            !!m->m_enabled, !!m->m_damaged, m->m_num_laggy);
+       doutc(cl, "m_enabled: %d, m_damaged: %d, m_num_laggy: %d\n",
+             !!m->m_enabled, !!m->m_damaged, m->m_num_laggy);
        *p = end;
-       dout("mdsmap_decode success epoch %u\n", m->m_epoch);
+       doutc(cl, "success epoch %u\n", m->m_epoch);
        return m;
 nomem:
        err = -ENOMEM;
        goto out_err;
 corrupt:
-       pr_err("corrupt mdsmap\n");
+       pr_err_client(cl, "corrupt mdsmap\n");
        print_hex_dump(KERN_DEBUG, "mdsmap: ",
                       DUMP_PREFIX_OFFSET, 16, 1,
                       start, end - start, true);
diff --git a/fs/ceph/mdsmap.h b/fs/ceph/mdsmap.h
new file mode 100644 (file)
index 0000000..89f1931
--- /dev/null
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _FS_CEPH_MDSMAP_H
+#define _FS_CEPH_MDSMAP_H
+
+#include <linux/bug.h>
+#include <linux/ceph/types.h>
+
+struct ceph_mds_client;
+
+/*
+ * mds map - describe servers in the mds cluster.
+ *
+ * we limit fields to those the client actually xcares about
+ */
+struct ceph_mds_info {
+       u64 global_id;
+       struct ceph_entity_addr addr;
+       s32 state;
+       int num_export_targets;
+       bool laggy;
+       u32 *export_targets;
+};
+
+struct ceph_mdsmap {
+       u32 m_epoch, m_client_epoch, m_last_failure;
+       u32 m_root;
+       u32 m_session_timeout;          /* seconds */
+       u32 m_session_autoclose;        /* seconds */
+       u64 m_max_file_size;
+       u64 m_max_xattr_size;           /* maximum size for xattrs blob */
+       u32 m_max_mds;                  /* expected up:active mds number */
+       u32 m_num_active_mds;           /* actual up:active mds number */
+       u32 possible_max_rank;          /* possible max rank index */
+       struct ceph_mds_info *m_info;
+
+       /* which object pools file data can be stored in */
+       int m_num_data_pg_pools;
+       u64 *m_data_pg_pools;
+       u64 m_cas_pg_pool;
+
+       bool m_enabled;
+       bool m_damaged;
+       int m_num_laggy;
+};
+
+static inline struct ceph_entity_addr *
+ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
+{
+       if (w >= m->possible_max_rank)
+               return NULL;
+       return &m->m_info[w].addr;
+}
+
+static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w)
+{
+       BUG_ON(w < 0);
+       if (w >= m->possible_max_rank)
+               return CEPH_MDS_STATE_DNE;
+       return m->m_info[w].state;
+}
+
+static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
+{
+       if (w >= 0 && w < m->possible_max_rank)
+               return m->m_info[w].laggy;
+       return false;
+}
+
+extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m);
+struct ceph_mdsmap *ceph_mdsmap_decode(struct ceph_mds_client *mdsc, void **p,
+                                      void *end, bool msgr2);
+extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m);
+extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m);
+
+#endif
index 6d3584f16f9a4eae62371c4c11d2ccbe3a85fddb..871c1090e520112d4a5641ab941bc1848b9a5e3d 100644 (file)
@@ -31,6 +31,7 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
        struct ceph_client_metric *m = &mdsc->metric;
        u64 nr_caps = atomic64_read(&m->total_caps);
        u32 header_len = sizeof(struct ceph_metric_header);
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_msg *msg;
        s64 sum;
        s32 items = 0;
@@ -51,8 +52,8 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
 
        msg = ceph_msg_new(CEPH_MSG_CLIENT_METRICS, len, GFP_NOFS, true);
        if (!msg) {
-               pr_err("send metrics to mds%d, failed to allocate message\n",
-                      s->s_mds);
+               pr_err_client(cl, "to mds%d, failed to allocate message\n",
+                             s->s_mds);
                return false;
        }
 
index f7fcf7f08ec64270319d7644d5fbf9a2d7ab7e9f..9d36c3532de14fc41e0517f494fd6cfb7713cc28 100644 (file)
@@ -43,6 +43,7 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
 {
        struct super_block *sb = mdsc->fsc->sb;
        struct ceph_mds_quota *h = msg->front.iov_base;
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_vino vino;
        struct inode *inode;
        struct ceph_inode_info *ci;
@@ -51,8 +52,8 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
                return;
 
        if (msg->front.iov_len < sizeof(*h)) {
-               pr_err("%s corrupt message mds%d len %d\n", __func__,
-                      session->s_mds, (int)msg->front.iov_len);
+               pr_err_client(cl, "corrupt message mds%d len %d\n",
+                             session->s_mds, (int)msg->front.iov_len);
                ceph_msg_dump(msg);
                goto out;
        }
@@ -62,7 +63,7 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
        vino.snap = CEPH_NOSNAP;
        inode = ceph_find_inode(sb, vino);
        if (!inode) {
-               pr_warn("Failed to find inode %llu\n", vino.ino);
+               pr_warn_client(cl, "failed to find inode %llx\n", vino.ino);
                goto out;
        }
        ci = ceph_inode(inode);
@@ -85,6 +86,7 @@ find_quotarealm_inode(struct ceph_mds_client *mdsc, u64 ino)
 {
        struct ceph_quotarealm_inode *qri = NULL;
        struct rb_node **node, *parent = NULL;
+       struct ceph_client *cl = mdsc->fsc->client;
 
        mutex_lock(&mdsc->quotarealms_inodes_mutex);
        node = &(mdsc->quotarealms_inodes.rb_node);
@@ -110,7 +112,7 @@ find_quotarealm_inode(struct ceph_mds_client *mdsc, u64 ino)
                        rb_link_node(&qri->node, parent, node);
                        rb_insert_color(&qri->node, &mdsc->quotarealms_inodes);
                } else
-                       pr_warn("Failed to alloc quotarealms_inode\n");
+                       pr_warn_client(cl, "Failed to alloc quotarealms_inode\n");
        }
        mutex_unlock(&mdsc->quotarealms_inodes_mutex);
 
@@ -129,6 +131,7 @@ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc,
                                             struct super_block *sb,
                                             struct ceph_snap_realm *realm)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_quotarealm_inode *qri;
        struct inode *in;
 
@@ -161,8 +164,8 @@ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc,
        }
 
        if (IS_ERR(in)) {
-               dout("Can't lookup inode %llx (err: %ld)\n",
-                    realm->ino, PTR_ERR(in));
+               doutc(cl, "Can't lookup inode %llx (err: %ld)\n", realm->ino,
+                     PTR_ERR(in));
                qri->timeout = jiffies + msecs_to_jiffies(60 * 1000); /* XXX */
        } else {
                qri->timeout = 0;
@@ -213,6 +216,7 @@ static struct ceph_snap_realm *get_quota_realm(struct ceph_mds_client *mdsc,
                                               enum quota_get_realm which_quota,
                                               bool retry)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_inode_info *ci = NULL;
        struct ceph_snap_realm *realm, *next;
        struct inode *in;
@@ -226,8 +230,9 @@ restart:
        if (realm)
                ceph_get_snap_realm(mdsc, realm);
        else
-               pr_err_ratelimited("get_quota_realm: ino (%llx.%llx) "
-                                  "null i_snap_realm\n", ceph_vinop(inode));
+               pr_err_ratelimited_client(cl,
+                               "%p %llx.%llx null i_snap_realm\n",
+                               inode, ceph_vinop(inode));
        while (realm) {
                bool has_inode;
 
@@ -317,6 +322,7 @@ static bool check_quota_exceeded(struct inode *inode, enum quota_check_op op,
                                 loff_t delta)
 {
        struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_inode_info *ci;
        struct ceph_snap_realm *realm, *next;
        struct inode *in;
@@ -332,8 +338,9 @@ restart:
        if (realm)
                ceph_get_snap_realm(mdsc, realm);
        else
-               pr_err_ratelimited("check_quota_exceeded: ino (%llx.%llx) "
-                                  "null i_snap_realm\n", ceph_vinop(inode));
+               pr_err_ratelimited_client(cl,
+                               "%p %llx.%llx null i_snap_realm\n",
+                               inode, ceph_vinop(inode));
        while (realm) {
                bool has_inode;
 
@@ -383,7 +390,7 @@ restart:
                        break;
                default:
                        /* Shouldn't happen */
-                       pr_warn("Invalid quota check op (%d)\n", op);
+                       pr_warn_client(cl, "Invalid quota check op (%d)\n", op);
                        exceeded = true; /* Just break the loop */
                }
                iput(in);
index 6732e1ea97d99126f2f18c42cddb82f21ff07272..c65f2b202b2b3e8bf9055f046a5d9a848c13b6b0 100644 (file)
@@ -138,7 +138,7 @@ static struct ceph_snap_realm *ceph_create_snap_realm(
        __insert_snap_realm(&mdsc->snap_realms, realm);
        mdsc->num_snap_realms++;
 
-       dout("%s %llx %p\n", __func__, realm->ino, realm);
+       doutc(mdsc->fsc->client, "%llx %p\n", realm->ino, realm);
        return realm;
 }
 
@@ -150,6 +150,7 @@ static struct ceph_snap_realm *ceph_create_snap_realm(
 static struct ceph_snap_realm *__lookup_snap_realm(struct ceph_mds_client *mdsc,
                                                   u64 ino)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct rb_node *n = mdsc->snap_realms.rb_node;
        struct ceph_snap_realm *r;
 
@@ -162,7 +163,7 @@ static struct ceph_snap_realm *__lookup_snap_realm(struct ceph_mds_client *mdsc,
                else if (ino > r->ino)
                        n = n->rb_right;
                else {
-                       dout("%s %llx %p\n", __func__, r->ino, r);
+                       doutc(cl, "%llx %p\n", r->ino, r);
                        return r;
                }
        }
@@ -188,9 +189,10 @@ static void __put_snap_realm(struct ceph_mds_client *mdsc,
 static void __destroy_snap_realm(struct ceph_mds_client *mdsc,
                                 struct ceph_snap_realm *realm)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        lockdep_assert_held_write(&mdsc->snap_rwsem);
 
-       dout("%s %p %llx\n", __func__, realm, realm->ino);
+       doutc(cl, "%p %llx\n", realm, realm->ino);
 
        rb_erase(&realm->node, &mdsc->snap_realms);
        mdsc->num_snap_realms--;
@@ -290,6 +292,7 @@ static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc,
                                    struct ceph_snap_realm *realm,
                                    u64 parentino)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_snap_realm *parent;
 
        lockdep_assert_held_write(&mdsc->snap_rwsem);
@@ -303,8 +306,8 @@ static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc,
                if (IS_ERR(parent))
                        return PTR_ERR(parent);
        }
-       dout("%s %llx %p: %llx %p -> %llx %p\n", __func__, realm->ino,
-            realm, realm->parent_ino, realm->parent, parentino, parent);
+       doutc(cl, "%llx %p: %llx %p -> %llx %p\n", realm->ino, realm,
+             realm->parent_ino, realm->parent, parentino, parent);
        if (realm->parent) {
                list_del_init(&realm->child_item);
                ceph_put_snap_realm(mdsc, realm->parent);
@@ -329,10 +332,12 @@ static int cmpu64_rev(const void *a, const void *b)
 /*
  * build the snap context for a given realm.
  */
-static int build_snap_context(struct ceph_snap_realm *realm,
+static int build_snap_context(struct ceph_mds_client *mdsc,
+                             struct ceph_snap_realm *realm,
                              struct list_head *realm_queue,
                              struct list_head *dirty_realms)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_snap_realm *parent = realm->parent;
        struct ceph_snap_context *snapc;
        int err = 0;
@@ -360,10 +365,10 @@ static int build_snap_context(struct ceph_snap_realm *realm,
            realm->cached_context->seq == realm->seq &&
            (!parent ||
             realm->cached_context->seq >= parent->cached_context->seq)) {
-               dout("%s %llx %p: %p seq %lld (%u snaps) (unchanged)\n",
-                    __func__, realm->ino, realm, realm->cached_context,
-                    realm->cached_context->seq,
-                    (unsigned int)realm->cached_context->num_snaps);
+               doutc(cl, "%llx %p: %p seq %lld (%u snaps) (unchanged)\n",
+                     realm->ino, realm, realm->cached_context,
+                     realm->cached_context->seq,
+                     (unsigned int)realm->cached_context->num_snaps);
                return 0;
        }
 
@@ -400,8 +405,8 @@ static int build_snap_context(struct ceph_snap_realm *realm,
 
        sort(snapc->snaps, num, sizeof(u64), cmpu64_rev, NULL);
        snapc->num_snaps = num;
-       dout("%s %llx %p: %p seq %lld (%u snaps)\n", __func__, realm->ino,
-            realm, snapc, snapc->seq, (unsigned int) snapc->num_snaps);
+       doutc(cl, "%llx %p: %p seq %lld (%u snaps)\n", realm->ino, realm,
+             snapc, snapc->seq, (unsigned int) snapc->num_snaps);
 
        ceph_put_snap_context(realm->cached_context);
        realm->cached_context = snapc;
@@ -418,16 +423,18 @@ fail:
                ceph_put_snap_context(realm->cached_context);
                realm->cached_context = NULL;
        }
-       pr_err("%s %llx %p fail %d\n", __func__, realm->ino, realm, err);
+       pr_err_client(cl, "%llx %p fail %d\n", realm->ino, realm, err);
        return err;
 }
 
 /*
  * rebuild snap context for the given realm and all of its children.
  */
-static void rebuild_snap_realms(struct ceph_snap_realm *realm,
+static void rebuild_snap_realms(struct ceph_mds_client *mdsc,
+                               struct ceph_snap_realm *realm,
                                struct list_head *dirty_realms)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        LIST_HEAD(realm_queue);
        int last = 0;
        bool skip = false;
@@ -451,9 +458,10 @@ static void rebuild_snap_realms(struct ceph_snap_realm *realm,
                        continue;
                }
 
-               last = build_snap_context(_realm, &realm_queue, dirty_realms);
-               dout("%s %llx %p, %s\n", __func__, _realm->ino, _realm,
-                    last > 0 ? "is deferred" : !last ? "succeeded" : "failed");
+               last = build_snap_context(mdsc, _realm, &realm_queue,
+                                         dirty_realms);
+               doutc(cl, "%llx %p, %s\n", realm->ino, realm,
+                     last > 0 ? "is deferred" : !last ? "succeeded" : "failed");
 
                /* is any child in the list ? */
                list_for_each_entry(child, &_realm->children, child_item) {
@@ -523,6 +531,7 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci,
                                struct ceph_cap_snap **pcapsnap)
 {
        struct inode *inode = &ci->netfs.inode;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_snap_context *old_snapc, *new_snapc;
        struct ceph_cap_snap *capsnap = *pcapsnap;
        struct ceph_buffer *old_blob = NULL;
@@ -548,14 +557,14 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci,
                   as no new writes are allowed to start when pending, so any
                   writes in progress now were started before the previous
                   cap_snap.  lucky us. */
-               dout("%s %p %llx.%llx already pending\n",
-                    __func__, inode, ceph_vinop(inode));
+               doutc(cl, "%p %llx.%llx already pending\n", inode,
+                     ceph_vinop(inode));
                goto update_snapc;
        }
        if (ci->i_wrbuffer_ref_head == 0 &&
            !(dirty & (CEPH_CAP_ANY_EXCL|CEPH_CAP_FILE_WR))) {
-               dout("%s %p %llx.%llx nothing dirty|writing\n",
-                    __func__, inode, ceph_vinop(inode));
+               doutc(cl, "%p %llx.%llx nothing dirty|writing\n", inode,
+                     ceph_vinop(inode));
                goto update_snapc;
        }
 
@@ -575,15 +584,15 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci,
        } else {
                if (!(used & CEPH_CAP_FILE_WR) &&
                    ci->i_wrbuffer_ref_head == 0) {
-                       dout("%s %p %llx.%llx no new_snap|dirty_page|writing\n",
-                            __func__, inode, ceph_vinop(inode));
+                       doutc(cl, "%p %llx.%llx no new_snap|dirty_page|writing\n",
+                             inode, ceph_vinop(inode));
                        goto update_snapc;
                }
        }
 
-       dout("%s %p %llx.%llx cap_snap %p queuing under %p %s %s\n",
-            __func__, inode, ceph_vinop(inode), capsnap, old_snapc,
-            ceph_cap_string(dirty), capsnap->need_flush ? "" : "no_flush");
+       doutc(cl, "%p %llx.%llx cap_snap %p queuing under %p %s %s\n",
+             inode, ceph_vinop(inode), capsnap, old_snapc,
+             ceph_cap_string(dirty), capsnap->need_flush ? "" : "no_flush");
        ihold(inode);
 
        capsnap->follows = old_snapc->seq;
@@ -615,9 +624,9 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci,
        list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
 
        if (used & CEPH_CAP_FILE_WR) {
-               dout("%s %p %llx.%llx cap_snap %p snapc %p seq %llu used WR,"
-                    " now pending\n", __func__, inode, ceph_vinop(inode),
-                    capsnap, old_snapc, old_snapc->seq);
+               doutc(cl, "%p %llx.%llx cap_snap %p snapc %p seq %llu used WR,"
+                     " now pending\n", inode, ceph_vinop(inode), capsnap,
+                     old_snapc, old_snapc->seq);
                capsnap->writing = 1;
        } else {
                /* note mtime, size NOW. */
@@ -634,7 +643,7 @@ update_snapc:
                ci->i_head_snapc = NULL;
        } else {
                ci->i_head_snapc = ceph_get_snap_context(new_snapc);
-               dout(" new snapc is %p\n", new_snapc);
+               doutc(cl, " new snapc is %p\n", new_snapc);
        }
        spin_unlock(&ci->i_ceph_lock);
 
@@ -655,6 +664,7 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
 {
        struct inode *inode = &ci->netfs.inode;
        struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
+       struct ceph_client *cl = mdsc->fsc->client;
 
        BUG_ON(capsnap->writing);
        capsnap->size = i_size_read(inode);
@@ -667,11 +677,12 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
        capsnap->truncate_size = ci->i_truncate_size;
        capsnap->truncate_seq = ci->i_truncate_seq;
        if (capsnap->dirty_pages) {
-               dout("%s %p %llx.%llx cap_snap %p snapc %p %llu %s s=%llu "
-                    "still has %d dirty pages\n", __func__, inode,
-                    ceph_vinop(inode), capsnap, capsnap->context,
-                    capsnap->context->seq, ceph_cap_string(capsnap->dirty),
-                    capsnap->size, capsnap->dirty_pages);
+               doutc(cl, "%p %llx.%llx cap_snap %p snapc %p %llu %s "
+                     "s=%llu still has %d dirty pages\n", inode,
+                     ceph_vinop(inode), capsnap, capsnap->context,
+                     capsnap->context->seq,
+                     ceph_cap_string(capsnap->dirty),
+                     capsnap->size, capsnap->dirty_pages);
                return 0;
        }
 
@@ -680,20 +691,20 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
         * And trigger to flush the buffer immediately.
         */
        if (ci->i_wrbuffer_ref) {
-               dout("%s %p %llx.%llx cap_snap %p snapc %p %llu %s s=%llu "
-                    "used WRBUFFER, delaying\n", __func__, inode,
-                    ceph_vinop(inode), capsnap, capsnap->context,
-                    capsnap->context->seq, ceph_cap_string(capsnap->dirty),
-                    capsnap->size);
+               doutc(cl, "%p %llx.%llx cap_snap %p snapc %p %llu %s "
+                     "s=%llu used WRBUFFER, delaying\n", inode,
+                     ceph_vinop(inode), capsnap, capsnap->context,
+                     capsnap->context->seq, ceph_cap_string(capsnap->dirty),
+                     capsnap->size);
                ceph_queue_writeback(inode);
                return 0;
        }
 
        ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
-       dout("%s %p %llx.%llx cap_snap %p snapc %p %llu %s s=%llu\n",
-            __func__, inode, ceph_vinop(inode), capsnap, capsnap->context,
-            capsnap->context->seq, ceph_cap_string(capsnap->dirty),
-            capsnap->size);
+       doutc(cl, "%p %llx.%llx cap_snap %p snapc %p %llu %s s=%llu\n",
+             inode, ceph_vinop(inode), capsnap, capsnap->context,
+             capsnap->context->seq, ceph_cap_string(capsnap->dirty),
+             capsnap->size);
 
        spin_lock(&mdsc->snap_flush_lock);
        if (list_empty(&ci->i_snap_flush_item)) {
@@ -708,13 +719,15 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
  * Queue cap_snaps for snap writeback for this realm and its children.
  * Called under snap_rwsem, so realm topology won't change.
  */
-static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
+static void queue_realm_cap_snaps(struct ceph_mds_client *mdsc,
+                                 struct ceph_snap_realm *realm)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_inode_info *ci;
        struct inode *lastinode = NULL;
        struct ceph_cap_snap *capsnap = NULL;
 
-       dout("%s %p %llx inode\n", __func__, realm, realm->ino);
+       doutc(cl, "%p %llx inode\n", realm, realm->ino);
 
        spin_lock(&realm->inodes_with_caps_lock);
        list_for_each_entry(ci, &realm->inodes_with_caps, i_snap_realm_item) {
@@ -733,8 +746,9 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
                if (!capsnap) {
                        capsnap = kmem_cache_zalloc(ceph_cap_snap_cachep, GFP_NOFS);
                        if (!capsnap) {
-                               pr_err("ENOMEM allocating ceph_cap_snap on %p\n",
-                                      inode);
+                               pr_err_client(cl,
+                                       "ENOMEM allocating ceph_cap_snap on %p\n",
+                                       inode);
                                return;
                        }
                }
@@ -752,7 +766,7 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
 
        if (capsnap)
                kmem_cache_free(ceph_cap_snap_cachep, capsnap);
-       dout("%s %p %llx done\n", __func__, realm, realm->ino);
+       doutc(cl, "%p %llx done\n", realm, realm->ino);
 }
 
 /*
@@ -766,6 +780,7 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
                           void *p, void *e, bool deletion,
                           struct ceph_snap_realm **realm_ret)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_mds_snap_realm *ri;    /* encoded */
        __le64 *snaps;                     /* encoded */
        __le64 *prior_parent_snaps;        /* encoded */
@@ -780,7 +795,7 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
 
        lockdep_assert_held_write(&mdsc->snap_rwsem);
 
-       dout("%s deletion=%d\n", __func__, deletion);
+       doutc(cl, "deletion=%d\n", deletion);
 more:
        realm = NULL;
        rebuild_snapcs = 0;
@@ -810,8 +825,8 @@ more:
        rebuild_snapcs += err;
 
        if (le64_to_cpu(ri->seq) > realm->seq) {
-               dout("%s updating %llx %p %lld -> %lld\n", __func__,
-                    realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
+               doutc(cl, "updating %llx %p %lld -> %lld\n", realm->ino,
+                     realm, realm->seq, le64_to_cpu(ri->seq));
                /* update realm parameters, snap lists */
                realm->seq = le64_to_cpu(ri->seq);
                realm->created = le64_to_cpu(ri->created);
@@ -834,16 +849,16 @@ more:
 
                rebuild_snapcs = 1;
        } else if (!realm->cached_context) {
-               dout("%s %llx %p seq %lld new\n", __func__,
-                    realm->ino, realm, realm->seq);
+               doutc(cl, "%llx %p seq %lld new\n", realm->ino, realm,
+                     realm->seq);
                rebuild_snapcs = 1;
        } else {
-               dout("%s %llx %p seq %lld unchanged\n", __func__,
-                    realm->ino, realm, realm->seq);
+               doutc(cl, "%llx %p seq %lld unchanged\n", realm->ino, realm,
+                     realm->seq);
        }
 
-       dout("done with %llx %p, rebuild_snapcs=%d, %p %p\n", realm->ino,
-            realm, rebuild_snapcs, p, e);
+       doutc(cl, "done with %llx %p, rebuild_snapcs=%d, %p %p\n", realm->ino,
+             realm, rebuild_snapcs, p, e);
 
        /*
         * this will always track the uppest parent realm from which
@@ -855,7 +870,7 @@ more:
 
        /* rebuild_snapcs when we reach the _end_ (root) of the trace */
        if (realm_to_rebuild && p >= e)
-               rebuild_snap_realms(realm_to_rebuild, &dirty_realms);
+               rebuild_snap_realms(mdsc, realm_to_rebuild, &dirty_realms);
 
        if (!first_realm)
                first_realm = realm;
@@ -873,7 +888,7 @@ more:
                realm = list_first_entry(&dirty_realms, struct ceph_snap_realm,
                                         dirty_item);
                list_del_init(&realm->dirty_item);
-               queue_realm_cap_snaps(realm);
+               queue_realm_cap_snaps(mdsc, realm);
        }
 
        if (realm_ret)
@@ -891,7 +906,7 @@ fail:
                ceph_put_snap_realm(mdsc, realm);
        if (first_realm)
                ceph_put_snap_realm(mdsc, first_realm);
-       pr_err("%s error %d\n", __func__, err);
+       pr_err_client(cl, "error %d\n", err);
 
        /*
         * When receiving a corrupted snap trace we don't know what
@@ -905,11 +920,12 @@ fail:
        WRITE_ONCE(mdsc->fsc->mount_state, CEPH_MOUNT_FENCE_IO);
        ret = ceph_monc_blocklist_add(&client->monc, &client->msgr.inst.addr);
        if (ret)
-               pr_err("%s failed to blocklist %s: %d\n", __func__,
-                      ceph_pr_addr(&client->msgr.inst.addr), ret);
+               pr_err_client(cl, "failed to blocklist %s: %d\n",
+                             ceph_pr_addr(&client->msgr.inst.addr), ret);
 
-       WARN(1, "%s: %s%sdo remount to continue%s",
-            __func__, ret ? "" : ceph_pr_addr(&client->msgr.inst.addr),
+       WARN(1, "[client.%lld] %s %s%sdo remount to continue%s",
+            client->monc.auth->global_id, __func__,
+            ret ? "" : ceph_pr_addr(&client->msgr.inst.addr),
             ret ? "" : " was blocklisted, ",
             err == -EIO ? " after corrupted snaptrace is fixed" : "");
 
@@ -925,11 +941,12 @@ fail:
  */
 static void flush_snaps(struct ceph_mds_client *mdsc)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_inode_info *ci;
        struct inode *inode;
        struct ceph_mds_session *session = NULL;
 
-       dout("%s\n", __func__);
+       doutc(cl, "begin\n");
        spin_lock(&mdsc->snap_flush_lock);
        while (!list_empty(&mdsc->snap_flush_list)) {
                ci = list_first_entry(&mdsc->snap_flush_list,
@@ -944,7 +961,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
        spin_unlock(&mdsc->snap_flush_lock);
 
        ceph_put_mds_session(session);
-       dout("%s done\n", __func__);
+       doutc(cl, "done\n");
 }
 
 /**
@@ -960,7 +977,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
 void ceph_change_snap_realm(struct inode *inode, struct ceph_snap_realm *realm)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
        struct ceph_snap_realm *oldrealm = ci->i_snap_realm;
 
        lockdep_assert_held(&ci->i_ceph_lock);
@@ -1000,6 +1017,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
                      struct ceph_mds_session *session,
                      struct ceph_msg *msg)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct super_block *sb = mdsc->fsc->sb;
        int mds = session->s_mds;
        u64 split;
@@ -1030,8 +1048,8 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
        trace_len = le32_to_cpu(h->trace_len);
        p += sizeof(*h);
 
-       dout("%s from mds%d op %s split %llx tracelen %d\n", __func__,
-            mds, ceph_snap_op_name(op), split, trace_len);
+       doutc(cl, "from mds%d op %s split %llx tracelen %d\n", mds,
+             ceph_snap_op_name(op), split, trace_len);
 
        down_write(&mdsc->snap_rwsem);
        locked_rwsem = 1;
@@ -1062,7 +1080,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
                                goto out;
                }
 
-               dout("splitting snap_realm %llx %p\n", realm->ino, realm);
+               doutc(cl, "splitting snap_realm %llx %p\n", realm->ino, realm);
                for (i = 0; i < num_split_inos; i++) {
                        struct ceph_vino vino = {
                                .ino = le64_to_cpu(split_inos[i]),
@@ -1087,13 +1105,13 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
                         */
                        if (ci->i_snap_realm->created >
                            le64_to_cpu(ri->created)) {
-                               dout(" leaving %p %llx.%llx in newer realm %llx %p\n",
-                                    inode, ceph_vinop(inode), ci->i_snap_realm->ino,
-                                    ci->i_snap_realm);
+                               doutc(cl, " leaving %p %llx.%llx in newer realm %llx %p\n",
+                                     inode, ceph_vinop(inode), ci->i_snap_realm->ino,
+                                     ci->i_snap_realm);
                                goto skip_inode;
                        }
-                       dout(" will move %p %llx.%llx to split realm %llx %p\n",
-                            inode, ceph_vinop(inode), realm->ino, realm);
+                       doutc(cl, " will move %p %llx.%llx to split realm %llx %p\n",
+                             inode, ceph_vinop(inode), realm->ino, realm);
 
                        ceph_get_snap_realm(mdsc, realm);
                        ceph_change_snap_realm(inode, realm);
@@ -1154,7 +1172,7 @@ skip_inode:
        return;
 
 bad:
-       pr_err("%s corrupt snap message from mds%d\n", __func__, mds);
+       pr_err_client(cl, "corrupt snap message from mds%d\n", mds);
        ceph_msg_dump(msg);
 out:
        if (locked_rwsem)
@@ -1170,6 +1188,7 @@ out:
 struct ceph_snapid_map* ceph_get_snapid_map(struct ceph_mds_client *mdsc,
                                            u64 snap)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_snapid_map *sm, *exist;
        struct rb_node **p, *parent;
        int ret;
@@ -1192,8 +1211,8 @@ struct ceph_snapid_map* ceph_get_snapid_map(struct ceph_mds_client *mdsc,
        }
        spin_unlock(&mdsc->snapid_map_lock);
        if (exist) {
-               dout("%s found snapid map %llx -> %x\n", __func__,
-                    exist->snap, exist->dev);
+               doutc(cl, "found snapid map %llx -> %x\n", exist->snap,
+                     exist->dev);
                return exist;
        }
 
@@ -1237,13 +1256,12 @@ struct ceph_snapid_map* ceph_get_snapid_map(struct ceph_mds_client *mdsc,
        if (exist) {
                free_anon_bdev(sm->dev);
                kfree(sm);
-               dout("%s found snapid map %llx -> %x\n", __func__,
-                    exist->snap, exist->dev);
+               doutc(cl, "found snapid map %llx -> %x\n", exist->snap,
+                     exist->dev);
                return exist;
        }
 
-       dout("%s create snapid map %llx -> %x\n", __func__,
-            sm->snap, sm->dev);
+       doutc(cl, "create snapid map %llx -> %x\n", sm->snap, sm->dev);
        return sm;
 }
 
@@ -1268,6 +1286,7 @@ void ceph_put_snapid_map(struct ceph_mds_client* mdsc,
 
 void ceph_trim_snapid_map(struct ceph_mds_client *mdsc)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_snapid_map *sm;
        unsigned long now;
        LIST_HEAD(to_free);
@@ -1289,7 +1308,7 @@ void ceph_trim_snapid_map(struct ceph_mds_client *mdsc)
        while (!list_empty(&to_free)) {
                sm = list_first_entry(&to_free, struct ceph_snapid_map, lru);
                list_del(&sm->lru);
-               dout("trim snapid map %llx -> %x\n", sm->snap, sm->dev);
+               doutc(cl, "trim snapid map %llx -> %x\n", sm->snap, sm->dev);
                free_anon_bdev(sm->dev);
                kfree(sm);
        }
@@ -1297,6 +1316,7 @@ void ceph_trim_snapid_map(struct ceph_mds_client *mdsc)
 
 void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc)
 {
+       struct ceph_client *cl = mdsc->fsc->client;
        struct ceph_snapid_map *sm;
        struct rb_node *p;
        LIST_HEAD(to_free);
@@ -1315,8 +1335,8 @@ void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc)
                list_del(&sm->lru);
                free_anon_bdev(sm->dev);
                if (WARN_ON_ONCE(atomic_read(&sm->ref))) {
-                       pr_err("snapid map %llx -> %x still in use\n",
-                              sm->snap, sm->dev);
+                       pr_err_client(cl, "snapid map %llx -> %x still in use\n",
+                                     sm->snap, sm->dev);
                }
                kfree(sm);
        }
index 2d7f5a8d4a926017524791a69af8b727e1f49731..5ec102f6b1ac514113dadf1b199732a565b9279c 100644 (file)
@@ -44,28 +44,29 @@ static LIST_HEAD(ceph_fsc_list);
  */
 static void ceph_put_super(struct super_block *s)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(s);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(s);
 
-       dout("put_super\n");
+       doutc(fsc->client, "begin\n");
        ceph_fscrypt_free_dummy_policy(fsc);
        ceph_mdsc_close_sessions(fsc->mdsc);
+       doutc(fsc->client, "done\n");
 }
 
 static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
-       struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(d_inode(dentry));
        struct ceph_mon_client *monc = &fsc->client->monc;
        struct ceph_statfs st;
        int i, err;
        u64 data_pool;
 
+       doutc(fsc->client, "begin\n");
        if (fsc->mdsc->mdsmap->m_num_data_pg_pools == 1) {
                data_pool = fsc->mdsc->mdsmap->m_data_pg_pools[0];
        } else {
                data_pool = CEPH_NOPOOL;
        }
 
-       dout("statfs\n");
        err = ceph_monc_do_statfs(monc, data_pool, &st);
        if (err < 0)
                return err;
@@ -113,24 +114,26 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
        /* fold the fs_cluster_id into the upper bits */
        buf->f_fsid.val[1] = monc->fs_cluster_id;
 
+       doutc(fsc->client, "done\n");
        return 0;
 }
 
 static int ceph_sync_fs(struct super_block *sb, int wait)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+       struct ceph_client *cl = fsc->client;
 
        if (!wait) {
-               dout("sync_fs (non-blocking)\n");
+               doutc(cl, "(non-blocking)\n");
                ceph_flush_dirty_caps(fsc->mdsc);
-               dout("sync_fs (non-blocking) done\n");
+               doutc(cl, "(non-blocking) done\n");
                return 0;
        }
 
-       dout("sync_fs (blocking)\n");
+       doutc(cl, "(blocking)\n");
        ceph_osdc_sync(&fsc->client->osdc);
        ceph_mdsc_sync(fsc->mdsc);
-       dout("sync_fs (blocking) done\n");
+       doutc(cl, "(blocking) done\n");
        return 0;
 }
 
@@ -341,7 +344,7 @@ static int ceph_parse_source(struct fs_parameter *param, struct fs_context *fc)
        char *dev_name = param->string, *dev_name_end;
        int ret;
 
-       dout("%s '%s'\n", __func__, dev_name);
+       dout("'%s'\n", dev_name);
        if (!dev_name || !*dev_name)
                return invalfc(fc, "Empty source");
 
@@ -413,7 +416,7 @@ static int ceph_parse_mount_param(struct fs_context *fc,
                return ret;
 
        token = fs_parse(fc, ceph_mount_parameters, param, &result);
-       dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
+       dout("%s: fs_parse '%s' token %d\n",__func__, param->key, token);
        if (token < 0)
                return token;
 
@@ -684,7 +687,7 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
  */
 static int ceph_show_options(struct seq_file *m, struct dentry *root)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(root->d_sb);
        struct ceph_mount_options *fsopt = fsc->mount_options;
        size_t pos;
        int ret;
@@ -881,7 +884,7 @@ static void flush_fs_workqueues(struct ceph_fs_client *fsc)
 
 static void destroy_fs_client(struct ceph_fs_client *fsc)
 {
-       dout("destroy_fs_client %p\n", fsc);
+       doutc(fsc->client, "%p\n", fsc);
 
        spin_lock(&ceph_fsc_lock);
        list_del(&fsc->metric_wakeup);
@@ -896,7 +899,7 @@ static void destroy_fs_client(struct ceph_fs_client *fsc)
        ceph_destroy_client(fsc->client);
 
        kfree(fsc);
-       dout("destroy_fs_client %p done\n", fsc);
+       dout("%s: %p done\n", __func__, fsc);
 }
 
 /*
@@ -1015,9 +1018,9 @@ static void __ceph_umount_begin(struct ceph_fs_client *fsc)
  */
 void ceph_umount_begin(struct super_block *sb)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
 
-       dout("ceph_umount_begin - starting forced umount\n");
+       doutc(fsc->client, "starting forced umount\n");
        if (!fsc)
                return;
        fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
@@ -1045,13 +1048,14 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
                                       const char *path,
                                       unsigned long started)
 {
+       struct ceph_client *cl = fsc->client;
        struct ceph_mds_client *mdsc = fsc->mdsc;
        struct ceph_mds_request *req = NULL;
        int err;
        struct dentry *root;
 
        /* open dir */
-       dout("open_root_inode opening '%s'\n", path);
+       doutc(cl, "opening '%s'\n", path);
        req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
        if (IS_ERR(req))
                return ERR_CAST(req);
@@ -1071,13 +1075,13 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
        if (err == 0) {
                struct inode *inode = req->r_target_inode;
                req->r_target_inode = NULL;
-               dout("open_root_inode success\n");
+               doutc(cl, "success\n");
                root = d_make_root(inode);
                if (!root) {
                        root = ERR_PTR(-ENOMEM);
                        goto out;
                }
-               dout("open_root_inode success, root dentry is %p\n", root);
+               doutc(cl, "success, root dentry is %p\n", root);
        } else {
                root = ERR_PTR(err);
        }
@@ -1136,11 +1140,12 @@ static int ceph_apply_test_dummy_encryption(struct super_block *sb,
 static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
                                      struct fs_context *fc)
 {
+       struct ceph_client *cl = fsc->client;
        int err;
        unsigned long started = jiffies;  /* note the start time */
        struct dentry *root;
 
-       dout("mount start %p\n", fsc);
+       doutc(cl, "mount start %p\n", fsc);
        mutex_lock(&fsc->client->mount_mutex);
 
        if (!fsc->sb->s_root) {
@@ -1163,7 +1168,7 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
                if (err)
                        goto out;
 
-               dout("mount opening path '%s'\n", path);
+               doutc(cl, "mount opening path '%s'\n", path);
 
                ceph_fs_debugfs_init(fsc);
 
@@ -1178,7 +1183,7 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
        }
 
        fsc->mount_state = CEPH_MOUNT_MOUNTED;
-       dout("mount success\n");
+       doutc(cl, "mount success\n");
        mutex_unlock(&fsc->client->mount_mutex);
        return root;
 
@@ -1191,9 +1196,10 @@ out:
 static int ceph_set_super(struct super_block *s, struct fs_context *fc)
 {
        struct ceph_fs_client *fsc = s->s_fs_info;
+       struct ceph_client *cl = fsc->client;
        int ret;
 
-       dout("set_super %p\n", s);
+       doutc(cl, "%p\n", s);
 
        s->s_maxbytes = MAX_LFS_FILESIZE;
 
@@ -1226,31 +1232,32 @@ static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
        struct ceph_fs_client *new = fc->s_fs_info;
        struct ceph_mount_options *fsopt = new->mount_options;
        struct ceph_options *opt = new->client->options;
-       struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
+       struct ceph_client *cl = fsc->client;
 
-       dout("ceph_compare_super %p\n", sb);
+       doutc(cl, "%p\n", sb);
 
        if (compare_mount_options(fsopt, opt, fsc)) {
-               dout("monitor(s)/mount options don't match\n");
+               doutc(cl, "monitor(s)/mount options don't match\n");
                return 0;
        }
        if ((opt->flags & CEPH_OPT_FSID) &&
            ceph_fsid_compare(&opt->fsid, &fsc->client->fsid)) {
-               dout("fsid doesn't match\n");
+               doutc(cl, "fsid doesn't match\n");
                return 0;
        }
        if (fc->sb_flags != (sb->s_flags & ~SB_BORN)) {
-               dout("flags differ\n");
+               doutc(cl, "flags differ\n");
                return 0;
        }
 
        if (fsc->blocklisted && !ceph_test_mount_opt(fsc, CLEANRECOVER)) {
-               dout("client is blocklisted (and CLEANRECOVER is not set)\n");
+               doutc(cl, "client is blocklisted (and CLEANRECOVER is not set)\n");
                return 0;
        }
 
        if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) {
-               dout("client has been forcibly unmounted\n");
+               doutc(cl, "client has been forcibly unmounted\n");
                return 0;
        }
 
@@ -1322,9 +1329,9 @@ static int ceph_get_tree(struct fs_context *fc)
                goto out;
        }
 
-       if (ceph_sb_to_client(sb) != fsc) {
+       if (ceph_sb_to_fs_client(sb) != fsc) {
                destroy_fs_client(fsc);
-               fsc = ceph_sb_to_client(sb);
+               fsc = ceph_sb_to_fs_client(sb);
                dout("get_sb got existing client %p\n", fsc);
        } else {
                dout("get_sb using new client %p\n", fsc);
@@ -1338,8 +1345,9 @@ static int ceph_get_tree(struct fs_context *fc)
                err = PTR_ERR(res);
                goto out_splat;
        }
-       dout("root %p inode %p ino %llx.%llx\n", res,
-            d_inode(res), ceph_vinop(d_inode(res)));
+
+       doutc(fsc->client, "root %p inode %p ino %llx.%llx\n", res,
+                   d_inode(res), ceph_vinop(d_inode(res)));
        fc->root = fsc->sb->s_root;
        return 0;
 
@@ -1377,7 +1385,7 @@ static int ceph_reconfigure_fc(struct fs_context *fc)
        struct ceph_parse_opts_ctx *pctx = fc->fs_private;
        struct ceph_mount_options *fsopt = pctx->opts;
        struct super_block *sb = fc->root->d_sb;
-       struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
 
        err = ceph_apply_test_dummy_encryption(sb, fc, fsopt);
        if (err)
@@ -1397,7 +1405,8 @@ static int ceph_reconfigure_fc(struct fs_context *fc)
                kfree(fsc->mount_options->mon_addr);
                fsc->mount_options->mon_addr = fsopt->mon_addr;
                fsopt->mon_addr = NULL;
-               pr_notice("ceph: monitor addresses recorded, but not used for reconnection");
+               pr_notice_client(fsc->client,
+                       "monitor addresses recorded, but not used for reconnection");
        }
 
        sync_filesystem(sb);
@@ -1516,11 +1525,12 @@ void ceph_dec_osd_stopping_blocker(struct ceph_mds_client *mdsc)
 
 static void ceph_kill_sb(struct super_block *s)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(s);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(s);
+       struct ceph_client *cl = fsc->client;
        struct ceph_mds_client *mdsc = fsc->mdsc;
        bool wait;
 
-       dout("kill_sb %p\n", s);
+       doutc(cl, "%p\n", s);
 
        ceph_mdsc_pre_umount(mdsc);
        flush_fs_workqueues(fsc);
@@ -1551,9 +1561,9 @@ static void ceph_kill_sb(struct super_block *s)
                                        &mdsc->stopping_waiter,
                                        fsc->client->options->mount_timeout);
                if (!timeleft) /* timed out */
-                       pr_warn("umount timed out, %ld\n", timeleft);
+                       pr_warn_client(cl, "umount timed out, %ld\n", timeleft);
                else if (timeleft < 0) /* killed */
-                       pr_warn("umount was killed, %ld\n", timeleft);
+                       pr_warn_client(cl, "umount was killed, %ld\n", timeleft);
        }
 
        mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHED;
@@ -1572,13 +1582,13 @@ static struct file_system_type ceph_fs_type = {
        .name           = "ceph",
        .init_fs_context = ceph_init_fs_context,
        .kill_sb        = ceph_kill_sb,
-       .fs_flags       = FS_RENAME_DOES_D_MOVE,
+       .fs_flags       = FS_RENAME_DOES_D_MOVE | FS_ALLOW_IDMAP,
 };
 MODULE_ALIAS_FS("ceph");
 
 int ceph_force_reconnect(struct super_block *sb)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
        int err = 0;
 
        fsc->mount_state = CEPH_MOUNT_RECOVER;
@@ -1671,6 +1681,11 @@ static const struct kernel_param_ops param_ops_mount_syntax = {
 module_param_cb(mount_syntax_v1, &param_ops_mount_syntax, &mount_support, 0444);
 module_param_cb(mount_syntax_v2, &param_ops_mount_syntax, &mount_support, 0444);
 
+bool enable_unsafe_idmap = false;
+module_param(enable_unsafe_idmap, bool, 0644);
+MODULE_PARM_DESC(enable_unsafe_idmap,
+                "Allow to use idmapped mounts with MDS without CEPHFS_FEATURE_HAS_OWNER_UIDGID");
+
 module_init(init_ceph);
 module_exit(exit_ceph);
 
index 98844fc8a2f7af21c53923291bf54fc55b96fa00..fe0f64a0acb27058014b188bec906e07310fad1f 100644 (file)
@@ -488,13 +488,13 @@ ceph_inode(const struct inode *inode)
 }
 
 static inline struct ceph_fs_client *
-ceph_inode_to_client(const struct inode *inode)
+ceph_inode_to_fs_client(const struct inode *inode)
 {
        return (struct ceph_fs_client *)inode->i_sb->s_fs_info;
 }
 
 static inline struct ceph_fs_client *
-ceph_sb_to_client(const struct super_block *sb)
+ceph_sb_to_fs_client(const struct super_block *sb)
 {
        return (struct ceph_fs_client *)sb->s_fs_info;
 }
@@ -502,7 +502,13 @@ ceph_sb_to_client(const struct super_block *sb)
 static inline struct ceph_mds_client *
 ceph_sb_to_mdsc(const struct super_block *sb)
 {
-       return (struct ceph_mds_client *)ceph_sb_to_client(sb)->mdsc;
+       return (struct ceph_mds_client *)ceph_sb_to_fs_client(sb)->mdsc;
+}
+
+static inline struct ceph_client *
+ceph_inode_to_client(const struct inode *inode)
+{
+       return (struct ceph_client *)ceph_inode_to_fs_client(inode)->client;
 }
 
 static inline struct ceph_vino
@@ -558,7 +564,7 @@ static inline u64 ceph_snap(struct inode *inode)
  */
 static inline u64 ceph_present_ino(struct super_block *sb, u64 ino)
 {
-       if (unlikely(ceph_test_mount_opt(ceph_sb_to_client(sb), INO32)))
+       if (unlikely(ceph_test_mount_opt(ceph_sb_to_fs_client(sb), INO32)))
                return ceph_ino_to_ino32(ino);
        return ino;
 }
@@ -1094,8 +1100,8 @@ struct ceph_iattr {
        struct ceph_fscrypt_auth        *fscrypt_auth;
 };
 
-extern int __ceph_setattr(struct inode *inode, struct iattr *attr,
-                         struct ceph_iattr *cia);
+extern int __ceph_setattr(struct mnt_idmap *idmap, struct inode *inode,
+                         struct iattr *attr, struct ceph_iattr *cia);
 extern int ceph_setattr(struct mnt_idmap *idmap,
                        struct dentry *dentry, struct iattr *attr);
 extern int ceph_getattr(struct mnt_idmap *idmap,
@@ -1106,7 +1112,7 @@ void ceph_inode_shutdown(struct inode *inode);
 static inline bool ceph_inode_is_shutdown(struct inode *inode)
 {
        unsigned long flags = READ_ONCE(ceph_inode(inode)->i_ceph_flags);
-       struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+       struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
        int state = READ_ONCE(fsc->mount_state);
 
        return (flags & CEPH_I_SHUTDOWN) || state >= CEPH_MOUNT_SHUTDOWN;
@@ -1223,7 +1229,8 @@ extern void ceph_add_cap(struct inode *inode,
                         unsigned cap, unsigned seq, u64 realmino, int flags,
                         struct ceph_cap **new_cap);
 extern void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
-extern void ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
+extern void ceph_remove_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
+                           bool queue_release);
 extern void __ceph_remove_caps(struct ceph_inode_info *ci);
 extern void ceph_put_cap(struct ceph_mds_client *mdsc,
                         struct ceph_cap *cap);
index 097ce7f74073d4f4ce01b6e7187bce4121a8f050..e066a556eccbfe370afdbf6f4aade3e26df80bb9 100644 (file)
@@ -57,7 +57,8 @@ static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci)
 static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
                                    size_t size)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
+       struct ceph_client *cl = fsc->client;
        struct ceph_osd_client *osdc = &fsc->client->osdc;
        struct ceph_string *pool_ns;
        s64 pool = ci->i_layout.pool_id;
@@ -69,7 +70,7 @@ static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
 
        pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
 
-       dout("ceph_vxattrcb_layout %p\n", &ci->netfs.inode);
+       doutc(cl, "%p\n", &ci->netfs.inode);
        down_read(&osdc->lock);
        pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
        if (pool_name) {
@@ -161,7 +162,7 @@ static ssize_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
                                         char *val, size_t size)
 {
        ssize_t ret;
-       struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
        struct ceph_osd_client *osdc = &fsc->client->osdc;
        s64 pool = ci->i_layout.pool_id;
        const char *pool_name;
@@ -313,7 +314,7 @@ static ssize_t ceph_vxattrcb_snap_btime(struct ceph_inode_info *ci, char *val,
 static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
                                          char *val, size_t size)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
 
        return ceph_fmt_xattr(val, size, "%pU", &fsc->client->fsid);
 }
@@ -321,7 +322,7 @@ static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
 static ssize_t ceph_vxattrcb_client_id(struct ceph_inode_info *ci,
                                       char *val, size_t size)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
 
        return ceph_fmt_xattr(val, size, "client%lld",
                              ceph_client_gid(fsc->client));
@@ -570,6 +571,8 @@ static int __set_xattr(struct ceph_inode_info *ci,
                           int flags, int update_xattr,
                           struct ceph_inode_xattr **newxattr)
 {
+       struct inode *inode = &ci->netfs.inode;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct rb_node **p;
        struct rb_node *parent = NULL;
        struct ceph_inode_xattr *xattr = NULL;
@@ -626,7 +629,7 @@ static int __set_xattr(struct ceph_inode_info *ci,
                xattr->should_free_name = update_xattr;
 
                ci->i_xattrs.count++;
-               dout("%s count=%d\n", __func__, ci->i_xattrs.count);
+               doutc(cl, "count=%d\n", ci->i_xattrs.count);
        } else {
                kfree(*newxattr);
                *newxattr = NULL;
@@ -654,13 +657,13 @@ static int __set_xattr(struct ceph_inode_info *ci,
        if (new) {
                rb_link_node(&xattr->node, parent, p);
                rb_insert_color(&xattr->node, &ci->i_xattrs.index);
-               dout("%s p=%p\n", __func__, p);
+               doutc(cl, "p=%p\n", p);
        }
 
-       dout("%s added %llx.%llx xattr %p %.*s=%.*s%s\n", __func__,
-            ceph_vinop(&ci->netfs.inode), xattr, name_len, name,
-            min(val_len, MAX_XATTR_VAL_PRINT_LEN), val,
-            val_len > MAX_XATTR_VAL_PRINT_LEN ? "..." : "");
+       doutc(cl, "added %p %llx.%llx xattr %p %.*s=%.*s%s\n", inode,
+             ceph_vinop(inode), xattr, name_len, name, min(val_len,
+             MAX_XATTR_VAL_PRINT_LEN), val,
+             val_len > MAX_XATTR_VAL_PRINT_LEN ? "..." : "");
 
        return 0;
 }
@@ -668,6 +671,7 @@ static int __set_xattr(struct ceph_inode_info *ci,
 static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
                           const char *name)
 {
+       struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
        struct rb_node **p;
        struct rb_node *parent = NULL;
        struct ceph_inode_xattr *xattr = NULL;
@@ -688,13 +692,13 @@ static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
                else {
                        int len = min(xattr->val_len, MAX_XATTR_VAL_PRINT_LEN);
 
-                       dout("%s %s: found %.*s%s\n", __func__, name, len,
-                            xattr->val, xattr->val_len > len ? "..." : "");
+                       doutc(cl, "%s found %.*s%s\n", name, len, xattr->val,
+                             xattr->val_len > len ? "..." : "");
                        return xattr;
                }
        }
 
-       dout("%s %s: not found\n", __func__, name);
+       doutc(cl, "%s not found\n", name);
 
        return NULL;
 }
@@ -735,19 +739,20 @@ static int __remove_xattr(struct ceph_inode_info *ci,
 static char *__copy_xattr_names(struct ceph_inode_info *ci,
                                char *dest)
 {
+       struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
        struct rb_node *p;
        struct ceph_inode_xattr *xattr = NULL;
 
        p = rb_first(&ci->i_xattrs.index);
-       dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
+       doutc(cl, "count=%d\n", ci->i_xattrs.count);
 
        while (p) {
                xattr = rb_entry(p, struct ceph_inode_xattr, node);
                memcpy(dest, xattr->name, xattr->name_len);
                dest[xattr->name_len] = '\0';
 
-               dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
-                    xattr->name_len, ci->i_xattrs.names_size);
+               doutc(cl, "dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
+                     xattr->name_len, ci->i_xattrs.names_size);
 
                dest += xattr->name_len + 1;
                p = rb_next(p);
@@ -758,19 +763,19 @@ static char *__copy_xattr_names(struct ceph_inode_info *ci,
 
 void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
 {
+       struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
        struct rb_node *p, *tmp;
        struct ceph_inode_xattr *xattr = NULL;
 
        p = rb_first(&ci->i_xattrs.index);
 
-       dout("__ceph_destroy_xattrs p=%p\n", p);
+       doutc(cl, "p=%p\n", p);
 
        while (p) {
                xattr = rb_entry(p, struct ceph_inode_xattr, node);
                tmp = p;
                p = rb_next(tmp);
-               dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
-                    xattr->name_len, xattr->name);
+               doutc(cl, "next p=%p (%.*s)\n", p, xattr->name_len, xattr->name);
                rb_erase(tmp, &ci->i_xattrs.index);
 
                __free_xattr(xattr);
@@ -787,6 +792,7 @@ static int __build_xattrs(struct inode *inode)
        __releases(ci->i_ceph_lock)
        __acquires(ci->i_ceph_lock)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        u32 namelen;
        u32 numattr = 0;
        void *p, *end;
@@ -798,8 +804,8 @@ static int __build_xattrs(struct inode *inode)
        int err = 0;
        int i;
 
-       dout("__build_xattrs() len=%d\n",
-            ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
+       doutc(cl, "len=%d\n",
+             ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
 
        if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
                return 0; /* already built */
@@ -874,6 +880,8 @@ bad:
 static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
                                    int val_size)
 {
+       struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
+
        /*
         * 4 bytes for the length, and additional 4 bytes per each xattr name,
         * 4 bytes per each value
@@ -881,9 +889,8 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
        int size = 4 + ci->i_xattrs.count*(4 + 4) +
                             ci->i_xattrs.names_size +
                             ci->i_xattrs.vals_size;
-       dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
-            ci->i_xattrs.count, ci->i_xattrs.names_size,
-            ci->i_xattrs.vals_size);
+       doutc(cl, "c=%d names.size=%d vals.size=%d\n", ci->i_xattrs.count,
+             ci->i_xattrs.names_size, ci->i_xattrs.vals_size);
 
        if (name_size)
                size += 4 + 4 + name_size + val_size;
@@ -899,12 +906,14 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
  */
 struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
 {
+       struct inode *inode = &ci->netfs.inode;
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct rb_node *p;
        struct ceph_inode_xattr *xattr = NULL;
        struct ceph_buffer *old_blob = NULL;
        void *dest;
 
-       dout("__build_xattrs_blob %p\n", &ci->netfs.inode);
+       doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
        if (ci->i_xattrs.dirty) {
                int need = __get_required_blob_size(ci, 0, 0);
 
@@ -962,6 +971,7 @@ static inline int __get_request_mask(struct inode *in) {
 ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
                      size_t size)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_inode_xattr *xattr;
        struct ceph_vxattr *vxattr;
@@ -1000,8 +1010,9 @@ handle_non_vxattrs:
        req_mask = __get_request_mask(inode);
 
        spin_lock(&ci->i_ceph_lock);
-       dout("getxattr %p name '%s' ver=%lld index_ver=%lld\n", inode, name,
-            ci->i_xattrs.version, ci->i_xattrs.index_version);
+       doutc(cl, "%p %llx.%llx name '%s' ver=%lld index_ver=%lld\n", inode,
+             ceph_vinop(inode), name, ci->i_xattrs.version,
+             ci->i_xattrs.index_version);
 
        if (ci->i_xattrs.version == 0 ||
            !((req_mask & CEPH_CAP_XATTR_SHARED) ||
@@ -1010,8 +1021,9 @@ handle_non_vxattrs:
 
                /* security module gets xattr while filling trace */
                if (current->journal_info) {
-                       pr_warn_ratelimited("sync getxattr %p "
-                                           "during filling trace\n", inode);
+                       pr_warn_ratelimited_client(cl,
+                               "sync %p %llx.%llx during filling trace\n",
+                               inode, ceph_vinop(inode));
                        return -EBUSY;
                }
 
@@ -1053,14 +1065,16 @@ out:
 ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
 {
        struct inode *inode = d_inode(dentry);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
        bool len_only = (size == 0);
        u32 namelen;
        int err;
 
        spin_lock(&ci->i_ceph_lock);
-       dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
-            ci->i_xattrs.version, ci->i_xattrs.index_version);
+       doutc(cl, "%p %llx.%llx ver=%lld index_ver=%lld\n", inode,
+             ceph_vinop(inode), ci->i_xattrs.version,
+             ci->i_xattrs.index_version);
 
        if (ci->i_xattrs.version == 0 ||
            !__ceph_caps_issued_mask_metric(ci, CEPH_CAP_XATTR_SHARED, 1)) {
@@ -1094,7 +1108,8 @@ out:
 static int ceph_sync_setxattr(struct inode *inode, const char *name,
                              const char *value, size_t size, int flags)
 {
-       struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
+       struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
        struct ceph_mds_request *req;
        struct ceph_mds_client *mdsc = fsc->mdsc;
@@ -1119,7 +1134,7 @@ static int ceph_sync_setxattr(struct inode *inode, const char *name,
                        flags |= CEPH_XATTR_REMOVE;
        }
 
-       dout("setxattr value size: %zu\n", size);
+       doutc(cl, "name %s value size %zu\n", name, size);
 
        /* do request */
        req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
@@ -1148,10 +1163,10 @@ static int ceph_sync_setxattr(struct inode *inode, const char *name,
        req->r_num_caps = 1;
        req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
 
-       dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
+       doutc(cl, "xattr.ver (before): %lld\n", ci->i_xattrs.version);
        err = ceph_mdsc_do_request(mdsc, NULL, req);
        ceph_mdsc_put_request(req);
-       dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
+       doutc(cl, "xattr.ver (after): %lld\n", ci->i_xattrs.version);
 
 out:
        if (pagelist)
@@ -1162,9 +1177,10 @@ out:
 int __ceph_setxattr(struct inode *inode, const char *name,
                        const void *value, size_t size, int flags)
 {
+       struct ceph_client *cl = ceph_inode_to_client(inode);
        struct ceph_vxattr *vxattr;
        struct ceph_inode_info *ci = ceph_inode(inode);
-       struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+       struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
        struct ceph_cap_flush *prealloc_cf = NULL;
        struct ceph_buffer *old_blob = NULL;
        int issued;
@@ -1220,9 +1236,9 @@ retry:
        required_blob_size = __get_required_blob_size(ci, name_len, val_len);
        if ((ci->i_xattrs.version == 0) || !(issued & CEPH_CAP_XATTR_EXCL) ||
            (required_blob_size > mdsc->mdsmap->m_max_xattr_size)) {
-               dout("%s do sync setxattr: version: %llu size: %d max: %llu\n",
-                    __func__, ci->i_xattrs.version, required_blob_size,
-                    mdsc->mdsmap->m_max_xattr_size);
+               doutc(cl, "sync version: %llu size: %d max: %llu\n",
+                     ci->i_xattrs.version, required_blob_size,
+                     mdsc->mdsmap->m_max_xattr_size);
                goto do_sync;
        }
 
@@ -1236,8 +1252,8 @@ retry:
                }
        }
 
-       dout("setxattr %p name '%s' issued %s\n", inode, name,
-            ceph_cap_string(issued));
+       doutc(cl, "%p %llx.%llx name '%s' issued %s\n", inode,
+             ceph_vinop(inode), name, ceph_cap_string(issued));
        __build_xattrs(inode);
 
        if (!ci->i_xattrs.prealloc_blob ||
@@ -1246,7 +1262,8 @@ retry:
 
                spin_unlock(&ci->i_ceph_lock);
                ceph_buffer_put(old_blob); /* Shouldn't be required */
-               dout(" pre-allocating new blob size=%d\n", required_blob_size);
+               doutc(cl, " pre-allocating new blob size=%d\n",
+                     required_blob_size);
                blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
                if (!blob)
                        goto do_sync_unlocked;
@@ -1285,8 +1302,9 @@ do_sync_unlocked:
 
        /* security module set xattr while filling trace */
        if (current->journal_info) {
-               pr_warn_ratelimited("sync setxattr %p "
-                                   "during filling trace\n", inode);
+               pr_warn_ratelimited_client(cl,
+                               "sync %p %llx.%llx during filling trace\n",
+                               inode, ceph_vinop(inode));
                err = -EBUSY;
        } else {
                err = ceph_sync_setxattr(inode, name, value, size, flags);
index b784c393a5041e05abcf06440b6fddba013debba..77240953a92e0e1d65a49fc946bd1c830836b909 100644 (file)
@@ -46,6 +46,7 @@ static int efivarfs_statfs(struct dentry *dentry, struct kstatfs *buf)
                         EFI_VARIABLE_BOOTSERVICE_ACCESS |
                         EFI_VARIABLE_RUNTIME_ACCESS;
        u64 storage_space, remaining_space, max_variable_size;
+       u64 id = huge_encode_dev(dentry->d_sb->s_dev);
        efi_status_t status;
 
        /* Some UEFI firmware does not implement QueryVariableInfo() */
@@ -69,6 +70,7 @@ static int efivarfs_statfs(struct dentry *dentry, struct kstatfs *buf)
        buf->f_blocks   = storage_space;
        buf->f_bfree    = remaining_space;
        buf->f_type     = dentry->d_sb->s_magic;
+       buf->f_fsid     = u64_to_fsid(id);
 
        /*
         * In f_bavail we declare the free space that the kernel will allow writing
index b287f47c165ba8775d978343164d80e472f4e4d7..f17fdac76b2eea716631f63bce3ca2c66b14a2fc 100644 (file)
@@ -123,6 +123,7 @@ static const struct super_operations efs_superblock_operations = {
 };
 
 static const struct export_operations efs_export_ops = {
+       .encode_fh      = generic_encode_ino32_fh,
        .fh_to_dentry   = efs_fh_to_dentry,
        .fh_to_parent   = efs_fh_to_parent,
        .get_parent     = efs_get_parent,
index 976dc39a88f74e858423eb1ab7bcbfd900f7a326..3789d62245136a488d7b5d1bf30e40bc53314ca5 100644 (file)
@@ -567,6 +567,7 @@ static struct dentry *erofs_get_parent(struct dentry *child)
 }
 
 static const struct export_operations erofs_export_ops = {
+       .encode_fh = generic_encode_ino32_fh,
        .fh_to_dentry = erofs_fh_to_dentry,
        .fh_to_parent = erofs_fh_to_parent,
        .get_parent = erofs_get_parent,
index 02c4e2937879e991b640aa9e39e06aa35ca91515..bfdfafe0099309f59da5c19b27eee2f4ed10db31 100644 (file)
@@ -295,6 +295,7 @@ int exfat_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
        if (attr->ia_valid & ATTR_SIZE)
                inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
 
+       setattr_copy(&nop_mnt_idmap, inode, attr);
        exfat_truncate_inode_atime(inode);
 
        if (attr->ia_valid & ATTR_SIZE) {
index 875234179d1f61e9e49a5e79700334159d5dc189..e7ff58b8e68c78d53c01c8126cd1b6276c632a8c 100644 (file)
@@ -56,18 +56,18 @@ int __exfat_write_inode(struct inode *inode, int sync)
                        &ep->dentry.file.create_time,
                        &ep->dentry.file.create_date,
                        &ep->dentry.file.create_time_cs);
+       ts = inode_get_mtime(inode);
        exfat_set_entry_time(sbi, &ts,
                             &ep->dentry.file.modify_tz,
                             &ep->dentry.file.modify_time,
                             &ep->dentry.file.modify_date,
                             &ep->dentry.file.modify_time_cs);
-       inode_set_mtime_to_ts(inode, ts);
+       ts = inode_get_atime(inode);
        exfat_set_entry_time(sbi, &ts,
                             &ep->dentry.file.access_tz,
                             &ep->dentry.file.access_time,
                             &ep->dentry.file.access_date,
                             NULL);
-       inode_set_atime_to_ts(inode, ts);
 
        /* File size should be zero if there is no cluster allocated */
        on_disk_size = i_size_read(inode);
index c20704aa21b3d0d058dbcc915c59d66b4e337497..3ae0154c5680b2c06771a3819d825a1953aa707f 100644 (file)
@@ -342,43 +342,30 @@ out:
        return error;
 }
 
+#define FILEID_INO64_GEN_LEN 3
+
 /**
- * export_encode_fh - default export_operations->encode_fh function
+ * exportfs_encode_ino64_fid - encode non-decodeable 64bit ino file id
  * @inode:   the object to encode
  * @fid:     where to store the file handle fragment
- * @max_len: maximum length to store there
- * @parent:  parent directory inode, if wanted
+ * @max_len: maximum length to store there (in 4 byte units)
  *
- * This default encode_fh function assumes that the 32 inode number
- * is suitable for locating an inode, and that the generation number
- * can be used to check that it is still valid.  It places them in the
- * filehandle fragment where export_decode_fh expects to find them.
+ * This generic function is used to encode a non-decodeable file id for
+ * fanotify for filesystems that do not support NFS export.
  */
-static int export_encode_fh(struct inode *inode, struct fid *fid,
-               int *max_len, struct inode *parent)
+static int exportfs_encode_ino64_fid(struct inode *inode, struct fid *fid,
+                                    int *max_len)
 {
-       int len = *max_len;
-       int type = FILEID_INO32_GEN;
-
-       if (parent && (len < 4)) {
-               *max_len = 4;
-               return FILEID_INVALID;
-       } else if (len < 2) {
-               *max_len = 2;
+       if (*max_len < FILEID_INO64_GEN_LEN) {
+               *max_len = FILEID_INO64_GEN_LEN;
                return FILEID_INVALID;
        }
 
-       len = 2;
-       fid->i32.ino = inode->i_ino;
-       fid->i32.gen = inode->i_generation;
-       if (parent) {
-               fid->i32.parent_ino = parent->i_ino;
-               fid->i32.parent_gen = parent->i_generation;
-               len = 4;
-               type = FILEID_INO32_GEN_PARENT;
-       }
-       *max_len = len;
-       return type;
+       fid->i64.ino = inode->i_ino;
+       fid->i64.gen = inode->i_generation;
+       *max_len = FILEID_INO64_GEN_LEN;
+
+       return FILEID_INO64_GEN;
 }
 
 /**
@@ -396,17 +383,13 @@ int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid,
 {
        const struct export_operations *nop = inode->i_sb->s_export_op;
 
-       /*
-        * If a decodeable file handle was requested, we need to make sure that
-        * filesystem can decode file handles.
-        */
-       if (nop && !(flags & EXPORT_FH_FID) && !nop->fh_to_dentry)
+       if (!exportfs_can_encode_fh(nop, flags))
                return -EOPNOTSUPP;
 
-       if (nop && nop->encode_fh)
-               return nop->encode_fh(inode, fid->raw, max_len, parent);
+       if (!nop && (flags & EXPORT_FH_FID))
+               return exportfs_encode_ino64_fid(inode, fid, max_len);
 
-       return export_encode_fh(inode, fid, max_len, parent);
+       return nop->encode_fh(inode, fid->raw, max_len, parent);
 }
 EXPORT_SYMBOL_GPL(exportfs_encode_inode_fh);
 
@@ -456,7 +439,7 @@ exportfs_decode_fh_raw(struct vfsmount *mnt, struct fid *fid, int fh_len,
        /*
         * Try to get any dentry for the given file handle from the filesystem.
         */
-       if (!nop || !nop->fh_to_dentry)
+       if (!exportfs_can_decode_fh(nop))
                return ERR_PTR(-ESTALE);
        result = nop->fh_to_dentry(mnt->mnt_sb, fid, fh_len, fileid_type);
        if (IS_ERR_OR_NULL(result))
index 645ee6142f69e3bfa78b8c2eedd0325e134e68e7..01f9addc8b1f60ab0ae8533e3673baa38fdae934 100644 (file)
@@ -397,6 +397,7 @@ static struct dentry *ext2_fh_to_parent(struct super_block *sb, struct fid *fid,
 }
 
 static const struct export_operations ext2_export_ops = {
+       .encode_fh = generic_encode_ino32_fh,
        .fh_to_dentry = ext2_fh_to_dentry,
        .fh_to_parent = ext2_fh_to_parent,
        .get_parent = ext2_get_parent,
index 54a9dde7483a5a505f41f75fde05a763a114c443..c5fcf377ab1faad832c9165fd2ed299d9c3e94f6 100644 (file)
@@ -1654,6 +1654,7 @@ static const struct super_operations ext4_sops = {
 };
 
 static const struct export_operations ext4_export_ops = {
+       .encode_fh = generic_encode_ino32_fh,
        .fh_to_dentry = ext4_fh_to_dentry,
        .fh_to_parent = ext4_fh_to_parent,
        .get_parent = ext4_get_parent,
index d820801f473e56ab7ea8d66f97c97e95293c4463..36e5dab6baaee99141ab0a42741e7fdf9879fe08 100644 (file)
@@ -893,14 +893,15 @@ static bool cluster_has_invalid_data(struct compress_ctx *cc)
 
 bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
 {
+#ifdef CONFIG_F2FS_CHECK_FS
        struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
        unsigned int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
-       bool compressed = dn->data_blkaddr == COMPRESS_ADDR;
        int cluster_end = 0;
+       unsigned int count;
        int i;
        char *reason = "";
 
-       if (!compressed)
+       if (dn->data_blkaddr != COMPRESS_ADDR)
                return false;
 
        /* [..., COMPR_ADDR, ...] */
@@ -909,7 +910,7 @@ bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
                goto out;
        }
 
-       for (i = 1; i < cluster_size; i++) {
+       for (i = 1, count = 1; i < cluster_size; i++, count++) {
                block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
                                                        dn->ofs_in_node + i);
 
@@ -929,19 +930,42 @@ bool f2fs_sanity_check_cluster(struct dnode_of_data *dn)
                        goto out;
                }
        }
+
+       f2fs_bug_on(F2FS_I_SB(dn->inode), count != cluster_size &&
+               !is_inode_flag_set(dn->inode, FI_COMPRESS_RELEASED));
+
        return false;
 out:
        f2fs_warn(sbi, "access invalid cluster, ino:%lu, nid:%u, ofs_in_node:%u, reason:%s",
                        dn->inode->i_ino, dn->nid, dn->ofs_in_node, reason);
        set_sbi_flag(sbi, SBI_NEED_FSCK);
        return true;
+#else
+       return false;
+#endif
+}
+
+static int __f2fs_get_cluster_blocks(struct inode *inode,
+                                       struct dnode_of_data *dn)
+{
+       unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
+       int count, i;
+
+       for (i = 1, count = 1; i < cluster_size; i++) {
+               block_t blkaddr = data_blkaddr(dn->inode, dn->node_page,
+                                                       dn->ofs_in_node + i);
+
+               if (__is_valid_data_blkaddr(blkaddr))
+                       count++;
+       }
+
+       return count;
 }
 
 static int __f2fs_cluster_blocks(struct inode *inode,
-                               unsigned int cluster_idx, bool compr)
+                               unsigned int cluster_idx, bool compr_blks)
 {
        struct dnode_of_data dn;
-       unsigned int cluster_size = F2FS_I(inode)->i_cluster_size;
        unsigned int start_idx = cluster_idx <<
                                F2FS_I(inode)->i_log_cluster_size;
        int ret;
@@ -956,31 +980,14 @@ static int __f2fs_cluster_blocks(struct inode *inode,
 
        if (f2fs_sanity_check_cluster(&dn)) {
                ret = -EFSCORRUPTED;
-               f2fs_handle_error(F2FS_I_SB(inode), ERROR_CORRUPTED_CLUSTER);
                goto fail;
        }
 
        if (dn.data_blkaddr == COMPRESS_ADDR) {
-               int i;
-
-               ret = 1;
-               for (i = 1; i < cluster_size; i++) {
-                       block_t blkaddr;
-
-                       blkaddr = data_blkaddr(dn.inode,
-                                       dn.node_page, dn.ofs_in_node + i);
-                       if (compr) {
-                               if (__is_valid_data_blkaddr(blkaddr))
-                                       ret++;
-                       } else {
-                               if (blkaddr != NULL_ADDR)
-                                       ret++;
-                       }
-               }
-
-               f2fs_bug_on(F2FS_I_SB(inode),
-                       !compr && ret != cluster_size &&
-                       !is_inode_flag_set(inode, FI_COMPRESS_RELEASED));
+               if (compr_blks)
+                       ret = __f2fs_get_cluster_blocks(inode, &dn);
+               else
+                       ret = 1;
        }
 fail:
        f2fs_put_dnode(&dn);
@@ -993,7 +1000,7 @@ static int f2fs_compressed_blocks(struct compress_ctx *cc)
        return __f2fs_cluster_blocks(cc->inode, cc->cluster_idx, true);
 }
 
-/* return # of valid blocks in compressed cluster */
+/* return whether cluster is compressed one or not */
 int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
 {
        return __f2fs_cluster_blocks(inode,
@@ -1976,7 +1983,7 @@ void f2fs_destroy_compress_inode(struct f2fs_sb_info *sbi)
 int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
 {
        dev_t dev = sbi->sb->s_bdev->bd_dev;
-       char slab_name[32];
+       char slab_name[35];
 
        if (!f2fs_sb_has_compression(sbi))
                return 0;
index 916e317ac925fc3e2b3b8ba5679654babfb9b08e..4e42b5f24debec2c1c940b034f622671adb4994c 100644 (file)
@@ -1690,9 +1690,7 @@ next_block:
                        map->m_flags |= F2FS_MAP_NEW;
        } else if (is_hole) {
                if (f2fs_compressed_file(inode) &&
-                   f2fs_sanity_check_cluster(&dn) &&
-                   (flag != F2FS_GET_BLOCK_FIEMAP ||
-                    IS_ENABLED(CONFIG_F2FS_CHECK_FS))) {
+                   f2fs_sanity_check_cluster(&dn)) {
                        err = -EFSCORRUPTED;
                        f2fs_handle_error(sbi,
                                        ERROR_CORRUPTED_CLUSTER);
@@ -2344,8 +2342,10 @@ skip_reading_dnode:
                f2fs_wait_on_block_writeback(inode, blkaddr);
 
                if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
-                       if (atomic_dec_and_test(&dic->remaining_pages))
+                       if (atomic_dec_and_test(&dic->remaining_pages)) {
                                f2fs_decompress_cluster(dic, true);
+                               break;
+                       }
                        continue;
                }
 
@@ -2665,6 +2665,11 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
                return true;
        if (f2fs_is_atomic_file(inode))
                return true;
+       /* rewrite low ratio compress data w/ OPU mode to avoid fragmentation */
+       if (f2fs_compressed_file(inode) &&
+               F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER &&
+               is_inode_flag_set(inode, FI_ENABLE_COMPRESS))
+               return true;
 
        /* swap file is migrating in aligned write mode */
        if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
@@ -3023,7 +3028,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
 {
        int ret = 0;
        int done = 0, retry = 0;
-       struct page *pages[F2FS_ONSTACK_PAGES];
+       struct page *pages_local[F2FS_ONSTACK_PAGES];
+       struct page **pages = pages_local;
        struct folio_batch fbatch;
        struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
        struct bio *bio = NULL;
@@ -3047,6 +3053,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
 #endif
        int nr_folios, p, idx;
        int nr_pages;
+       unsigned int max_pages = F2FS_ONSTACK_PAGES;
        pgoff_t index;
        pgoff_t end;            /* Inclusive */
        pgoff_t done_index;
@@ -3056,6 +3063,15 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
        int submitted = 0;
        int i;
 
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+       if (f2fs_compressed_file(inode) &&
+               1 << cc.log_cluster_size > F2FS_ONSTACK_PAGES) {
+               pages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
+                               cc.log_cluster_size, GFP_NOFS | __GFP_NOFAIL);
+               max_pages = 1 << cc.log_cluster_size;
+       }
+#endif
+
        folio_batch_init(&fbatch);
 
        if (get_dirty_pages(mapping->host) <=
@@ -3101,7 +3117,7 @@ again:
 add_more:
                        pages[nr_pages] = folio_page(folio, idx);
                        folio_get(folio);
-                       if (++nr_pages == F2FS_ONSTACK_PAGES) {
+                       if (++nr_pages == max_pages) {
                                index = folio->index + idx + 1;
                                folio_batch_release(&fbatch);
                                goto write;
@@ -3283,6 +3299,11 @@ next:
        if (bio)
                f2fs_submit_merged_ipu_write(sbi, &bio, NULL);
 
+#ifdef CONFIG_F2FS_FS_COMPRESSION
+       if (pages != pages_local)
+               kfree(pages);
+#endif
+
        return ret;
 }
 
@@ -4055,7 +4076,7 @@ next:
        sis->highest_bit = cur_lblock - 1;
 out:
        if (not_aligned)
-               f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%u * N)",
+               f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%lu * N)",
                          not_aligned, blks_per_sec * F2FS_BLKSIZE);
        return ret;
 }
index 0e2d49140c07f15f35bc81b8ad179d82a74f178a..ad8dfac73bd4461d95d92e965a079d3968d1a229 100644 (file)
@@ -74,40 +74,14 @@ static void __set_extent_info(struct extent_info *ei,
        }
 }
 
-static bool __may_read_extent_tree(struct inode *inode)
-{
-       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-
-       if (!test_opt(sbi, READ_EXTENT_CACHE))
-               return false;
-       if (is_inode_flag_set(inode, FI_NO_EXTENT))
-               return false;
-       if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
-                        !f2fs_sb_has_readonly(sbi))
-               return false;
-       return S_ISREG(inode->i_mode);
-}
-
-static bool __may_age_extent_tree(struct inode *inode)
-{
-       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-
-       if (!test_opt(sbi, AGE_EXTENT_CACHE))
-               return false;
-       if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
-               return false;
-       if (file_is_cold(inode))
-               return false;
-
-       return S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode);
-}
-
 static bool __init_may_extent_tree(struct inode *inode, enum extent_type type)
 {
        if (type == EX_READ)
-               return __may_read_extent_tree(inode);
-       else if (type == EX_BLOCK_AGE)
-               return __may_age_extent_tree(inode);
+               return test_opt(F2FS_I_SB(inode), READ_EXTENT_CACHE) &&
+                       S_ISREG(inode->i_mode);
+       if (type == EX_BLOCK_AGE)
+               return test_opt(F2FS_I_SB(inode), AGE_EXTENT_CACHE) &&
+                       (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode));
        return false;
 }
 
@@ -120,7 +94,22 @@ static bool __may_extent_tree(struct inode *inode, enum extent_type type)
        if (list_empty(&F2FS_I_SB(inode)->s_list))
                return false;
 
-       return __init_may_extent_tree(inode, type);
+       if (!__init_may_extent_tree(inode, type))
+               return false;
+
+       if (type == EX_READ) {
+               if (is_inode_flag_set(inode, FI_NO_EXTENT))
+                       return false;
+               if (is_inode_flag_set(inode, FI_COMPRESSED_FILE) &&
+                                !f2fs_sb_has_readonly(F2FS_I_SB(inode)))
+                       return false;
+       } else if (type == EX_BLOCK_AGE) {
+               if (is_inode_flag_set(inode, FI_COMPRESSED_FILE))
+                       return false;
+               if (file_is_cold(inode))
+                       return false;
+       }
+       return true;
 }
 
 static void __try_update_largest_extent(struct extent_tree *et,
index dd99abbb7186aa340ae1636f52b7416519a92514..e50363583f019a227ce3280ddb3ecd85760f8226 100644 (file)
@@ -3258,11 +3258,12 @@ int f2fs_precache_extents(struct inode *inode)
                return -EOPNOTSUPP;
 
        map.m_lblk = 0;
+       map.m_pblk = 0;
        map.m_next_pgofs = NULL;
        map.m_next_extent = &m_next_extent;
        map.m_seg_type = NO_CHECK_TYPE;
        map.m_may_create = false;
-       end = max_file_blocks(inode);
+       end = F2FS_BLK_ALIGN(i_size_read(inode));
 
        while (map.m_lblk < end) {
                map.m_len = end - map.m_lblk;
@@ -3270,7 +3271,7 @@ int f2fs_precache_extents(struct inode *inode)
                f2fs_down_write(&fi->i_gc_rwsem[WRITE]);
                err = f2fs_map_blocks(inode, &map, F2FS_GET_BLOCK_PRECACHE);
                f2fs_up_write(&fi->i_gc_rwsem[WRITE]);
-               if (err)
+               if (err || !map.m_len)
                        return err;
 
                map.m_lblk = m_next_extent;
@@ -4005,6 +4006,15 @@ static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
        F2FS_I(inode)->i_compress_algorithm = option.algorithm;
        F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
        F2FS_I(inode)->i_cluster_size = BIT(option.log_cluster_size);
+       /* Set default level */
+       if (F2FS_I(inode)->i_compress_algorithm == COMPRESS_ZSTD)
+               F2FS_I(inode)->i_compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
+       else
+               F2FS_I(inode)->i_compress_level = 0;
+       /* Adjust mount option level */
+       if (option.algorithm == F2FS_OPTION(sbi).compress_algorithm &&
+           F2FS_OPTION(sbi).compress_level)
+               F2FS_I(inode)->i_compress_level = F2FS_OPTION(sbi).compress_level;
        f2fs_mark_inode_dirty_sync(inode, true);
 
        if (!f2fs_is_compress_backend_ready(inode))
@@ -4849,6 +4859,9 @@ static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
                filp->f_mode &= ~FMODE_RANDOM;
                spin_unlock(&filp->f_lock);
                return 0;
+       } else if (advice == POSIX_FADV_WILLNEED && offset == 0) {
+               /* Load extent cache at the first readahead. */
+               f2fs_precache_extents(inode);
        }
 
        err = generic_fadvise(filp, offset, len, advice);
index 5779c7edd49b753d3e032d50f00ed0f79ecfa65b..560bfcad1af2356c761446c93c67d90110f041f3 100644 (file)
@@ -315,7 +315,7 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
                        f2fs_has_inline_xattr(inode) &&
                        (!fi->i_inline_xattr_size ||
                        fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
-                       f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
+                       f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %lu",
                                  __func__, inode->i_ino, fi->i_inline_xattr_size,
                                  MAX_INLINE_XATTR_SIZE);
                        return false;
index ee2e1dd64f256f5c8c60a76dd9e46f711e1bdb9b..6c7f6a649d272fbfe7139063976bf96bf043255a 100644 (file)
@@ -633,7 +633,7 @@ static void f2fs_ra_node_pages(struct page *parent, int start, int n)
 
        /* Then, try readahead for siblings of the desired node */
        end = start + n;
-       end = min(end, NIDS_PER_BLOCK);
+       end = min(end, (int)NIDS_PER_BLOCK);
        for (i = start; i < end; i++) {
                nid = get_nid(parent, i, false);
                f2fs_ra_node_page(sbi, nid);
@@ -1467,7 +1467,8 @@ page_hit:
                          ofs_of_node(page), cpver_of_node(page),
                          next_blkaddr_of_node(page));
        set_sbi_flag(sbi, SBI_NEED_FSCK);
-       err = -EINVAL;
+       f2fs_handle_error(sbi, ERROR_INCONSISTENT_FOOTER);
+       err = -EFSCORRUPTED;
 out_err:
        ClearPageUptodate(page);
 out_put_err:
@@ -2389,7 +2390,7 @@ static int scan_nat_page(struct f2fs_sb_info *sbi,
                blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
 
                if (blk_addr == NEW_ADDR)
-                       return -EINVAL;
+                       return -EFSCORRUPTED;
 
                if (blk_addr == NULL_ADDR) {
                        add_free_nid(sbi, start_nid, true, true);
@@ -2504,7 +2505,14 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
 
                        if (ret) {
                                f2fs_up_read(&nm_i->nat_tree_lock);
-                               f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
+
+                               if (ret == -EFSCORRUPTED) {
+                                       f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
+                                       set_sbi_flag(sbi, SBI_NEED_FSCK);
+                                       f2fs_handle_error(sbi,
+                                               ERROR_INCONSISTENT_NAT);
+                               }
+
                                return ret;
                        }
                }
@@ -2743,7 +2751,9 @@ recover_xnid:
        f2fs_update_inode_page(inode);
 
        /* 3: update and set xattr node page dirty */
-       memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
+       if (page)
+               memcpy(F2FS_NODE(xpage), F2FS_NODE(page),
+                               VALID_XATTR_BLOCK_SIZE);
 
        set_page_dirty(xpage);
        f2fs_put_page(xpage, 1);
index d05b41608fc00513a186bdf4339fbc81505d925f..727d016318f985da124966e51fd2d0d14dce5708 100644 (file)
@@ -4910,22 +4910,31 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
        }
 
        /*
-        * The write pointer matches with the valid blocks or
-        * already points to the end of the zone.
+        * When safely unmounted in the previous mount, we can trust write
+        * pointers. Otherwise, finish zones.
         */
-       if ((last_valid_block + 1 == wp_block) ||
-                       (zone->wp == zone->start + zone->len))
-               return 0;
-
-       if (last_valid_block + 1 == zone_block) {
+       if (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
                /*
-                * If there is no valid block in the zone and if write pointer
-                * is not at zone start, reset the write pointer.
+                * The write pointer matches with the valid blocks or
+                * already points to the end of the zone.
                 */
-               f2fs_notice(sbi,
-                           "Zone without valid block has non-zero write "
-                           "pointer. Reset the write pointer: wp[0x%x,0x%x]",
-                           wp_segno, wp_blkoff);
+               if ((last_valid_block + 1 == wp_block) ||
+                               (zone->wp == zone->start + zone->len))
+                       return 0;
+       }
+
+       if (last_valid_block + 1 == zone_block) {
+               if (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
+                       /*
+                        * If there is no valid block in the zone and if write
+                        * pointer is not at zone start, reset the write
+                        * pointer.
+                        */
+                       f2fs_notice(sbi,
+                             "Zone without valid block has non-zero write "
+                             "pointer. Reset the write pointer: wp[0x%x,0x%x]",
+                             wp_segno, wp_blkoff);
+               }
                ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
                                        zone->len >> log_sectors_per_block);
                if (ret)
@@ -4935,18 +4944,20 @@ static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
                return ret;
        }
 
-       /*
-        * If there are valid blocks and the write pointer doesn't
-        * match with them, we need to report the inconsistency and
-        * fill the zone till the end to close the zone. This inconsistency
-        * does not cause write error because the zone will not be selected
-        * for write operation until it get discarded.
-        */
-       f2fs_notice(sbi, "Valid blocks are not aligned with write pointer: "
-                   "valid block[0x%x,0x%x] wp[0x%x,0x%x]",
-                   GET_SEGNO(sbi, last_valid_block),
-                   GET_BLKOFF_FROM_SEG0(sbi, last_valid_block),
-                   wp_segno, wp_blkoff);
+       if (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
+               /*
+                * If there are valid blocks and the write pointer doesn't match
+                * with them, we need to report the inconsistency and fill
+                * the zone till the end to close the zone. This inconsistency
+                * does not cause write error because the zone will not be
+                * selected for write operation until it get discarded.
+                */
+               f2fs_notice(sbi, "Valid blocks are not aligned with write "
+                           "pointer: valid block[0x%x,0x%x] wp[0x%x,0x%x]",
+                           GET_SEGNO(sbi, last_valid_block),
+                           GET_BLKOFF_FROM_SEG0(sbi, last_valid_block),
+                           wp_segno, wp_blkoff);
+       }
 
        ret = blkdev_zone_mgmt(fdev->bdev, REQ_OP_ZONE_FINISH,
                                zone->start, zone->len, GFP_NOFS);
@@ -5020,18 +5031,27 @@ static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
        if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
                return 0;
 
-       wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
-       wp_segno = GET_SEGNO(sbi, wp_block);
-       wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
-       wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
-
-       if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
-               wp_sector_off == 0)
-               return 0;
+       /*
+        * When safely unmounted in the previous mount, we could use current
+        * segments. Otherwise, allocate new sections.
+        */
+       if (is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
+               wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
+               wp_segno = GET_SEGNO(sbi, wp_block);
+               wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
+               wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
+
+               if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
+                               wp_sector_off == 0)
+                       return 0;
 
-       f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
-                   "curseg[0x%x,0x%x] wp[0x%x,0x%x]",
-                   type, cs->segno, cs->next_blkoff, wp_segno, wp_blkoff);
+               f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
+                           "curseg[0x%x,0x%x] wp[0x%x,0x%x]", type, cs->segno,
+                           cs->next_blkoff, wp_segno, wp_blkoff);
+       } else {
+               f2fs_notice(sbi, "Not successfully unmounted in the previous "
+                           "mount");
+       }
 
        f2fs_notice(sbi, "Assign new section to curseg[%d]: "
                    "curseg[0x%x,0x%x]", type, cs->segno, cs->next_blkoff);
index 2ca8fb5d0dc4db3f69d09ea6704b54cd5bd51da1..8129be788bd5615b99a666f6a240b16c315a0cc0 100644 (file)
@@ -108,11 +108,11 @@ static inline void sanity_check_seg_type(struct f2fs_sb_info *sbi,
        ((sbi)->segs_per_sec - ((sbi)->unusable_blocks_per_sec >>\
        (sbi)->log_blocks_per_seg))
 #define GET_SEC_FROM_SEG(sbi, segno)                           \
-       (((segno) == -1) ? -1: (segno) / (sbi)->segs_per_sec)
+       (((segno) == -1) ? -1 : (segno) / (sbi)->segs_per_sec)
 #define GET_SEG_FROM_SEC(sbi, secno)                           \
        ((secno) * (sbi)->segs_per_sec)
 #define GET_ZONE_FROM_SEC(sbi, secno)                          \
-       (((secno) == -1) ? -1: (secno) / (sbi)->secs_per_zone)
+       (((secno) == -1) ? -1 : (secno) / (sbi)->secs_per_zone)
 #define GET_ZONE_FROM_SEG(sbi, segno)                          \
        GET_ZONE_FROM_SEC(sbi, GET_SEC_FROM_SEG(sbi, segno))
 
index 05f9f7b6ebf8c63a2f482bd1ba1a50836727cd40..033af907c3b1dc3059874fc689a32c0f0aaa45b2 100644 (file)
@@ -562,6 +562,29 @@ static int f2fs_set_test_dummy_encryption(struct super_block *sb,
 }
 
 #ifdef CONFIG_F2FS_FS_COMPRESSION
+static bool is_compress_extension_exist(struct f2fs_sb_info *sbi,
+                                       const char *new_ext, bool is_ext)
+{
+       unsigned char (*ext)[F2FS_EXTENSION_LEN];
+       int ext_cnt;
+       int i;
+
+       if (is_ext) {
+               ext = F2FS_OPTION(sbi).extensions;
+               ext_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
+       } else {
+               ext = F2FS_OPTION(sbi).noextensions;
+               ext_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
+       }
+
+       for (i = 0; i < ext_cnt; i++) {
+               if (!strcasecmp(new_ext, ext[i]))
+                       return true;
+       }
+
+       return false;
+}
+
 /*
  * 1. The same extension name cannot not appear in both compress and non-compress extension
  * at the same time.
@@ -1164,6 +1187,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
                                return -EINVAL;
                        }
 
+                       if (is_compress_extension_exist(sbi, name, true)) {
+                               kfree(name);
+                               break;
+                       }
+
                        strcpy(ext[ext_cnt], name);
                        F2FS_OPTION(sbi).compress_ext_cnt++;
                        kfree(name);
@@ -1188,6 +1216,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
                                return -EINVAL;
                        }
 
+                       if (is_compress_extension_exist(sbi, name, false)) {
+                               kfree(name);
+                               break;
+                       }
+
                        strcpy(noext[noext_cnt], name);
                        F2FS_OPTION(sbi).nocompress_ext_cnt++;
                        kfree(name);
@@ -1644,7 +1677,7 @@ static void f2fs_put_super(struct super_block *sb)
 
        f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
 
-       if (err) {
+       if (err || f2fs_cp_error(sbi)) {
                truncate_inode_pages_final(NODE_MAPPING(sbi));
                truncate_inode_pages_final(META_MAPPING(sbi));
        }
@@ -2301,9 +2334,9 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
        unsigned long old_sb_flags;
        int err;
        bool need_restart_gc = false, need_stop_gc = false;
-       bool need_restart_ckpt = false, need_stop_ckpt = false;
        bool need_restart_flush = false, need_stop_flush = false;
        bool need_restart_discard = false, need_stop_discard = false;
+       bool need_enable_checkpoint = false, need_disable_checkpoint = false;
        bool no_read_extent_cache = !test_opt(sbi, READ_EXTENT_CACHE);
        bool no_age_extent_cache = !test_opt(sbi, AGE_EXTENT_CACHE);
        bool enable_checkpoint = !test_opt(sbi, DISABLE_CHECKPOINT);
@@ -2467,24 +2500,6 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
                clear_sbi_flag(sbi, SBI_IS_CLOSE);
        }
 
-       if ((*flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) ||
-                       !test_opt(sbi, MERGE_CHECKPOINT)) {
-               f2fs_stop_ckpt_thread(sbi);
-               need_restart_ckpt = true;
-       } else {
-               /* Flush if the prevous checkpoint, if exists. */
-               f2fs_flush_ckpt_thread(sbi);
-
-               err = f2fs_start_ckpt_thread(sbi);
-               if (err) {
-                       f2fs_err(sbi,
-                           "Failed to start F2FS issue_checkpoint_thread (%d)",
-                           err);
-                       goto restore_gc;
-               }
-               need_stop_ckpt = true;
-       }
-
        /*
         * We stop issue flush thread if FS is mounted as RO
         * or if flush_merge is not passed in mount option.
@@ -2496,7 +2511,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
        } else {
                err = f2fs_create_flush_cmd_control(sbi);
                if (err)
-                       goto restore_ckpt;
+                       goto restore_gc;
                need_stop_flush = true;
        }
 
@@ -2518,8 +2533,31 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
                        err = f2fs_disable_checkpoint(sbi);
                        if (err)
                                goto restore_discard;
+                       need_enable_checkpoint = true;
                } else {
                        f2fs_enable_checkpoint(sbi);
+                       need_disable_checkpoint = true;
+               }
+       }
+
+       /*
+        * Place this routine at the end, since a new checkpoint would be
+        * triggered while remount and we need to take care of it before
+        * returning from remount.
+        */
+       if ((*flags & SB_RDONLY) || test_opt(sbi, DISABLE_CHECKPOINT) ||
+                       !test_opt(sbi, MERGE_CHECKPOINT)) {
+               f2fs_stop_ckpt_thread(sbi);
+       } else {
+               /* Flush if the prevous checkpoint, if exists. */
+               f2fs_flush_ckpt_thread(sbi);
+
+               err = f2fs_start_ckpt_thread(sbi);
+               if (err) {
+                       f2fs_err(sbi,
+                           "Failed to start F2FS issue_checkpoint_thread (%d)",
+                           err);
+                       goto restore_checkpoint;
                }
        }
 
@@ -2537,6 +2575,13 @@ skip:
        adjust_unusable_cap_perc(sbi);
        *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
        return 0;
+restore_checkpoint:
+       if (need_enable_checkpoint) {
+               f2fs_enable_checkpoint(sbi);
+       } else if (need_disable_checkpoint) {
+               if (f2fs_disable_checkpoint(sbi))
+                       f2fs_warn(sbi, "checkpoint has not been disabled");
+       }
 restore_discard:
        if (need_restart_discard) {
                if (f2fs_start_discard_thread(sbi))
@@ -2552,13 +2597,6 @@ restore_flush:
                clear_opt(sbi, FLUSH_MERGE);
                f2fs_destroy_flush_cmd_control(sbi, false);
        }
-restore_ckpt:
-       if (need_restart_ckpt) {
-               if (f2fs_start_ckpt_thread(sbi))
-                       f2fs_warn(sbi, "background ckpt thread has stopped");
-       } else if (need_stop_ckpt) {
-               f2fs_stop_ckpt_thread(sbi);
-       }
 restore_gc:
        if (need_restart_gc) {
                if (f2fs_start_gc_thread(sbi))
@@ -3292,6 +3330,7 @@ static struct dentry *f2fs_fh_to_parent(struct super_block *sb, struct fid *fid,
 }
 
 static const struct export_operations f2fs_export_ops = {
+       .encode_fh = generic_encode_ino32_fh,
        .fh_to_dentry = f2fs_fh_to_dentry,
        .fh_to_parent = f2fs_fh_to_parent,
        .get_parent = f2fs_get_parent,
@@ -3479,7 +3518,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
                return -EFSCORRUPTED;
        }
 
-       /* Currently, support 512/1024/2048/4096 bytes sector size */
+       /* Currently, support 512/1024/2048/4096/16K bytes sector size */
        if (le32_to_cpu(raw_super->log_sectorsize) >
                                F2FS_MAX_LOG_SECTOR_SIZE ||
                le32_to_cpu(raw_super->log_sectorsize) <
@@ -4926,7 +4965,7 @@ static int __init init_f2fs_fs(void)
        int err;
 
        if (PAGE_SIZE != F2FS_BLKSIZE) {
-               printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n",
+               printk("F2FS not supported on PAGE_SIZE(%lu) != BLOCK_SIZE(%lu)\n",
                                PAGE_SIZE, F2FS_BLKSIZE);
                return -EINVAL;
        }
index 4314456854f64cbebc0b1fa6e47504da957e7639..47e88b4d4e7d01aaadc38c61922060e5a8884399 100644 (file)
@@ -364,10 +364,10 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
 
        *xe = __find_xattr(cur_addr, last_txattr_addr, NULL, index, len, name);
        if (!*xe) {
-               f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
+               f2fs_err(F2FS_I_SB(inode), "lookup inode (%lu) has corrupted xattr",
                                                                inode->i_ino);
                set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
-               err = -EFSCORRUPTED;
+               err = -ENODATA;
                f2fs_handle_error(F2FS_I_SB(inode),
                                        ERROR_CORRUPTED_XATTR);
                goto out;
@@ -584,13 +584,12 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
 
                if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
                        (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) {
-                       f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
+                       f2fs_err(F2FS_I_SB(inode), "list inode (%lu) has corrupted xattr",
                                                inode->i_ino);
                        set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
-                       error = -EFSCORRUPTED;
                        f2fs_handle_error(F2FS_I_SB(inode),
                                                ERROR_CORRUPTED_XATTR);
-                       goto cleanup;
+                       break;
                }
 
                if (!prefix)
@@ -650,7 +649,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
 
        if (size > MAX_VALUE_LEN(inode))
                return -E2BIG;
-
+retry:
        error = read_all_xattrs(inode, ipage, &base_addr);
        if (error)
                return error;
@@ -660,7 +659,14 @@ static int __f2fs_setxattr(struct inode *inode, int index,
        /* find entry with wanted name. */
        here = __find_xattr(base_addr, last_base_addr, NULL, index, len, name);
        if (!here) {
-               f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
+               if (!F2FS_I(inode)->i_xattr_nid) {
+                       f2fs_notice(F2FS_I_SB(inode),
+                               "recover xattr in inode (%lu)", inode->i_ino);
+                       f2fs_recover_xattr_data(inode, NULL);
+                       kfree(base_addr);
+                       goto retry;
+               }
+               f2fs_err(F2FS_I_SB(inode), "set inode (%lu) has corrupted xattr",
                                                                inode->i_ino);
                set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
                error = -EFSCORRUPTED;
index 3626eb585a983e9d1a639df5da5e56bf9f19a118..c52e63e10d35cd3ccacee22341bed2679df1db80 100644 (file)
@@ -279,6 +279,7 @@ static struct dentry *fat_get_parent(struct dentry *child_dir)
 }
 
 const struct export_operations fat_export_ops = {
+       .encode_fh      = generic_encode_ino32_fh,
        .fh_to_dentry   = fat_fh_to_dentry,
        .fh_to_parent   = fat_fh_to_parent,
        .get_parent     = fat_get_parent,
index 6ea8d35a9382ace3fac22b89c818c74f0f123c34..18b3ba8dc8ead7c6016a1f76d96275268b0667f0 100644 (file)
@@ -26,12 +26,8 @@ static long do_sys_name_to_handle(const struct path *path,
        /*
         * We need to make sure whether the file system support decoding of
         * the file handle if decodeable file handle was requested.
-        * Otherwise, even empty export_operations are sufficient to opt-in
-        * to encoding FIDs.
         */
-       if (!path->dentry->d_sb->s_export_op ||
-           (!(fh_flags & EXPORT_FH_FID) &&
-            !path->dentry->d_sb->s_export_op->fh_to_dentry))
+       if (!exportfs_can_encode_fh(path->dentry->d_sb->s_export_op, fh_flags))
                return -EOPNOTSUPP;
 
        if (copy_from_user(&f_handle, ufh, sizeof(struct file_handle)))
index 310d73e254df2cea38dce331715c55606d728735..e6e2a2185e7c5dbe78b51e47d71d24f2b982d701 100644 (file)
@@ -76,6 +76,7 @@ vxfs_statfs(struct dentry *dentry, struct kstatfs *bufp)
 {
        struct vxfs_sb_info             *infp = VXFS_SBI(dentry->d_sb);
        struct vxfs_sb *raw_sb = infp->vsi_raw;
+       u64 id = huge_encode_dev(dentry->d_sb->s_bdev->bd_dev);
 
        bufp->f_type = VXFS_SUPER_MAGIC;
        bufp->f_bsize = dentry->d_sb->s_blocksize;
@@ -84,6 +85,7 @@ vxfs_statfs(struct dentry *dentry, struct kstatfs *bufp)
        bufp->f_bavail = 0;
        bufp->f_files = 0;
        bufp->f_ffree = fs32_to_cpu(infp, raw_sb->vs_ifree);
+       bufp->f_fsid = u64_to_fsid(id);
        bufp->f_namelen = VXFS_NAMELEN;
 
        return 0;
index caa8121ad99c71fbc07a7a27e27de688c7df3561..74d4f09d5827e8af92aef881eb8060fa69e4afca 100644 (file)
@@ -999,7 +999,7 @@ static int fuse_encode_fh(struct inode *inode, u32 *fh, int *max_len,
        }
 
        *max_len = len;
-       return parent ? 0x82 : 0x81;
+       return parent ? FILEID_INO64_GEN_PARENT : FILEID_INO64_GEN;
 }
 
 static struct dentry *fuse_fh_to_dentry(struct super_block *sb,
@@ -1007,7 +1007,8 @@ static struct dentry *fuse_fh_to_dentry(struct super_block *sb,
 {
        struct fuse_inode_handle handle;
 
-       if ((fh_type != 0x81 && fh_type != 0x82) || fh_len < 3)
+       if ((fh_type != FILEID_INO64_GEN &&
+            fh_type != FILEID_INO64_GEN_PARENT) || fh_len < 3)
                return NULL;
 
        handle.nodeid = (u64) fid->raw[0] << 32;
@@ -1021,7 +1022,7 @@ static struct dentry *fuse_fh_to_parent(struct super_block *sb,
 {
        struct fuse_inode_handle parent;
 
-       if (fh_type != 0x82 || fh_len < 6)
+       if (fh_type != FILEID_INO64_GEN_PARENT || fh_len < 6)
                return NULL;
 
        parent.nodeid = (u64) fid->raw[3] << 32;
index d4deb2b1995952dabbd50a892f500f355445d9a4..82f5b09c04e6691c1bbe109a30168f33875d0c6b 100644 (file)
@@ -11,9 +11,9 @@
 
 #define GFS2_ACL_MAX_ENTRIES(sdp) ((300 << (sdp)->sd_sb.sb_bsize_shift) >> 12)
 
-extern struct posix_acl *gfs2_get_acl(struct inode *inode, int type, bool rcu);
-extern int __gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type);
-extern int gfs2_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
-                       struct posix_acl *acl, int type);
+struct posix_acl *gfs2_get_acl(struct inode *inode, int type, bool rcu);
+int __gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+int gfs2_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
+                struct posix_acl *acl, int type);
 
 #endif /* __ACL_DOT_H__ */
index 6b060fc9e26091fa3633e58d9381fda8ed07820e..9611bfceda4b6f8465a0b15cb640aa2696fbe8fd 100644 (file)
@@ -155,7 +155,7 @@ static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
 
-       if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
+       if (gfs2_assert_withdraw(sdp, ip->i_gl->gl_state == LM_ST_EXCLUSIVE))
                goto out;
        if (folio_test_checked(folio) || current->journal_info)
                goto out_ignore;
@@ -214,12 +214,12 @@ static int gfs2_write_jdata_batch(struct address_space *mapping,
        unsigned nrblocks;
        int i;
        int ret;
-       int nr_pages = 0;
+       size_t size = 0;
        int nr_folios = folio_batch_count(fbatch);
 
        for (i = 0; i < nr_folios; i++)
-               nr_pages += folio_nr_pages(fbatch->folios[i]);
-       nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
+               size += folio_size(fbatch->folios[i]);
+       nrblocks = size >> inode->i_blkbits;
 
        ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
        if (ret < 0)
@@ -403,27 +403,27 @@ static int gfs2_jdata_writepages(struct address_space *mapping,
 }
 
 /**
- * stuffed_readpage - Fill in a Linux page with stuffed file data
+ * stuffed_readpage - Fill in a Linux folio with stuffed file data
  * @ip: the inode
- * @page: the page
+ * @folio: the folio
  *
  * Returns: errno
  */
-static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
+static int stuffed_readpage(struct gfs2_inode *ip, struct folio *folio)
 {
        struct buffer_head *dibh;
-       u64 dsize = i_size_read(&ip->i_inode);
-       void *kaddr;
+       size_t i_size = i_size_read(&ip->i_inode);
+       void *data;
        int error;
 
        /*
         * Due to the order of unstuffing files and ->fault(), we can be
-        * asked for a zero page in the case of a stuffed file being extended,
+        * asked for a zero folio in the case of a stuffed file being extended,
         * so we need to supply one here. It doesn't happen often.
         */
-       if (unlikely(page->index)) {
-               zero_user(page, 0, PAGE_SIZE);
-               SetPageUptodate(page);
+       if (unlikely(folio->index)) {
+               folio_zero_range(folio, 0, folio_size(folio));
+               folio_mark_uptodate(folio);
                return 0;
        }
 
@@ -431,13 +431,11 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
        if (error)
                return error;
 
-       kaddr = kmap_local_page(page);
-       memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
-       memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
-       kunmap_local(kaddr);
-       flush_dcache_page(page);
+       data = dibh->b_data + sizeof(struct gfs2_dinode);
+       memcpy_to_folio(folio, 0, data, i_size);
+       folio_zero_range(folio, i_size, folio_size(folio) - i_size);
        brelse(dibh);
-       SetPageUptodate(page);
+       folio_mark_uptodate(folio);
 
        return 0;
 }
@@ -458,7 +456,7 @@ static int gfs2_read_folio(struct file *file, struct folio *folio)
            (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
                error = iomap_read_folio(folio, &gfs2_iomap_ops);
        } else if (gfs2_is_stuffed(ip)) {
-               error = stuffed_readpage(ip, &folio->page);
+               error = stuffed_readpage(ip, folio);
                folio_unlock(folio);
        } else {
                error = mpage_read_folio(folio, gfs2_block_map);
@@ -479,31 +477,29 @@ static int gfs2_read_folio(struct file *file, struct folio *folio)
  *
  */
 
-int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
-                       unsigned size)
+ssize_t gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
+                          size_t size)
 {
        struct address_space *mapping = ip->i_inode.i_mapping;
        unsigned long index = *pos >> PAGE_SHIFT;
-       unsigned offset = *pos & (PAGE_SIZE - 1);
-       unsigned copied = 0;
-       unsigned amt;
-       struct page *page;
+       size_t copied = 0;
 
        do {
-               page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
-               if (IS_ERR(page)) {
-                       if (PTR_ERR(page) == -EINTR)
+               size_t offset, chunk;
+               struct folio *folio;
+
+               folio = read_cache_folio(mapping, index, gfs2_read_folio, NULL);
+               if (IS_ERR(folio)) {
+                       if (PTR_ERR(folio) == -EINTR)
                                continue;
-                       return PTR_ERR(page);
+                       return PTR_ERR(folio);
                }
-               amt = size - copied;
-               if (offset + size > PAGE_SIZE)
-                       amt = PAGE_SIZE - offset;
-               memcpy_from_page(buf + copied, page, offset, amt);
-               put_page(page);
-               copied += amt;
-               index++;
-               offset = 0;
+               offset = *pos + copied - folio_pos(folio);
+               chunk = min(size - copied, folio_size(folio) - offset);
+               memcpy_from_folio(buf + copied, folio, offset, chunk);
+               index = folio_next_index(folio);
+               folio_put(folio);
+               copied += chunk;
        } while(copied < size);
        (*pos) += size;
        return size;
index f08322ef41cfd3f3d96b28cfae987ec140cd3351..a10c4334d2489316c264f8de0a3a14e7a00e1ff7 100644 (file)
@@ -8,8 +8,8 @@
 
 #include "incore.h"
 
-extern void adjust_fs_space(struct inode *inode);
-extern void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio,
-                                   size_t from, size_t len);
+void adjust_fs_space(struct inode *inode);
+void gfs2_trans_add_databufs(struct gfs2_inode *ip, struct folio *folio,
+                            size_t from, size_t len);
 
 #endif /* __AOPS_DOT_H__ */
index 6eb6f1bd9e34b59c130cecd02a5a43c03bf9bfaf..d9ccfd27e4f11fe4ecc7ce36981cbef469942847 100644 (file)
@@ -104,7 +104,7 @@ static int __gfs2_unstuff_inode(struct gfs2_inode *ip, struct folio *folio)
                   and write it out to disk */
 
                unsigned int n = 1;
-               error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
+               error = gfs2_alloc_blocks(ip, &block, &n, 0);
                if (error)
                        goto out_brelse;
                if (isdir) {
@@ -315,6 +315,12 @@ static void gfs2_metapath_ra(struct gfs2_glock *gl, __be64 *start, __be64 *end)
        }
 }
 
+static inline struct buffer_head *
+metapath_dibh(struct metapath *mp)
+{
+       return mp->mp_bh[0];
+}
+
 static int __fillup_metapath(struct gfs2_inode *ip, struct metapath *mp,
                             unsigned int x, unsigned int h)
 {
@@ -413,13 +419,12 @@ static void release_metapath(struct metapath *mp)
  * gfs2_extent_length - Returns length of an extent of blocks
  * @bh: The metadata block
  * @ptr: Current position in @bh
- * @limit: Max extent length to return
  * @eob: Set to 1 if we hit "end of block"
  *
  * Returns: The length of the extent (minimum of one block)
  */
 
-static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, size_t limit, int *eob)
+static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *ptr, int *eob)
 {
        const __be64 *end = (__be64 *)(bh->b_data + bh->b_size);
        const __be64 *first = ptr;
@@ -658,7 +663,7 @@ static int __gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
 {
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
-       struct buffer_head *dibh = mp->mp_bh[0];
+       struct buffer_head *dibh = metapath_dibh(mp);
        u64 bn;
        unsigned n, i, blks, alloced = 0, iblks = 0, branch_start = 0;
        size_t dblks = iomap->length >> inode->i_blkbits;
@@ -700,7 +705,7 @@ static int __gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap,
        i = mp->mp_aheight;
        do {
                n = blks - alloced;
-               ret = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
+               ret = gfs2_alloc_blocks(ip, &bn, &n, 0);
                if (ret)
                        goto out;
                alloced += n;
@@ -911,7 +916,7 @@ unstuff:
                goto do_alloc;
 
        bh = mp->mp_bh[ip->i_height - 1];
-       len = gfs2_extent_length(bh, ptr, len, &eob);
+       len = gfs2_extent_length(bh, ptr, &eob);
 
        iomap->addr = be64_to_cpu(*ptr) << inode->i_blkbits;
        iomap->length = len << inode->i_blkbits;
index e5b7d17131ed31f124db164b2a0a1c283478f3bd..4e8b1e8ebdf390d9432694dfbd81d1aad117a00d 100644 (file)
@@ -46,24 +46,24 @@ static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip,
 extern const struct iomap_ops gfs2_iomap_ops;
 extern const struct iomap_writeback_ops gfs2_writeback_ops;
 
-extern int gfs2_unstuff_dinode(struct gfs2_inode *ip);
-extern int gfs2_block_map(struct inode *inode, sector_t lblock,
-                         struct buffer_head *bh, int create);
-extern int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
-                         struct iomap *iomap);
-extern int gfs2_iomap_alloc(struct inode *inode, loff_t pos, loff_t length,
-                           struct iomap *iomap);
-extern int gfs2_get_extent(struct inode *inode, u64 lblock, u64 *dblock,
-                          unsigned int *extlen);
-extern int gfs2_alloc_extent(struct inode *inode, u64 lblock, u64 *dblock,
-                            unsigned *extlen, bool *new);
-extern int gfs2_setattr_size(struct inode *inode, u64 size);
-extern int gfs2_truncatei_resume(struct gfs2_inode *ip);
-extern int gfs2_file_dealloc(struct gfs2_inode *ip);
-extern int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
-                                    unsigned int len);
-extern int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd);
-extern void gfs2_free_journal_extents(struct gfs2_jdesc *jd);
-extern int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length);
+int gfs2_unstuff_dinode(struct gfs2_inode *ip);
+int gfs2_block_map(struct inode *inode, sector_t lblock,
+                  struct buffer_head *bh, int create);
+int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
+                  struct iomap *iomap);
+int gfs2_iomap_alloc(struct inode *inode, loff_t pos, loff_t length,
+                    struct iomap *iomap);
+int gfs2_get_extent(struct inode *inode, u64 lblock, u64 *dblock,
+                   unsigned int *extlen);
+int gfs2_alloc_extent(struct inode *inode, u64 lblock, u64 *dblock,
+                     unsigned *extlen, bool *new);
+int gfs2_setattr_size(struct inode *inode, u64 size);
+int gfs2_truncatei_resume(struct gfs2_inode *ip);
+int gfs2_file_dealloc(struct gfs2_inode *ip);
+int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset,
+                             unsigned int len);
+int gfs2_map_journal_extents(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd);
+void gfs2_free_journal_extents(struct gfs2_jdesc *jd);
+int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length);
 
 #endif /* __BMAP_DOT_H__ */
index 61ddd03ea111efe6980a0152982dd20a7d6ff246..560e4624c09f2d5e72834c71cb4956d1db509c9f 100644 (file)
@@ -868,7 +868,7 @@ static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh,
        struct gfs2_dirent *dent;
        struct timespec64 tv = current_time(inode);
 
-       error = gfs2_alloc_blocks(ip, &bn, &n, 0, NULL);
+       error = gfs2_alloc_blocks(ip, &bn, &n, 0);
        if (error)
                return NULL;
        bh = gfs2_meta_new(ip->i_gl, bn);
index 5b76480c17c9e90c8c8699fd6c5ce931ec493418..25a857c78b538b1448ebe6c8f098d0172d21aabc 100644 (file)
@@ -23,32 +23,32 @@ struct gfs2_diradd {
        int save_loc;
 };
 
-extern struct inode *gfs2_dir_search(struct inode *dir,
-                                    const struct qstr *filename,
-                                    bool fail_on_exist);
-extern int gfs2_dir_check(struct inode *dir, const struct qstr *filename,
-                         const struct gfs2_inode *ip);
-extern int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
-                       const struct gfs2_inode *ip, struct gfs2_diradd *da);
+struct inode *gfs2_dir_search(struct inode *dir,
+                             const struct qstr *filename,
+                             bool fail_on_exist);
+int gfs2_dir_check(struct inode *dir, const struct qstr *filename,
+                  const struct gfs2_inode *ip);
+int gfs2_dir_add(struct inode *inode, const struct qstr *filename,
+                const struct gfs2_inode *ip, struct gfs2_diradd *da);
 static inline void gfs2_dir_no_add(struct gfs2_diradd *da)
 {
        brelse(da->bh);
        da->bh = NULL;
 }
-extern int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry);
-extern int gfs2_dir_read(struct inode *inode, struct dir_context *ctx,
-                        struct file_ra_state *f_ra);
-extern int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
-                         const struct gfs2_inode *nip, unsigned int new_type);
+int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry);
+int gfs2_dir_read(struct inode *inode, struct dir_context *ctx,
+                 struct file_ra_state *f_ra);
+int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
+                  const struct gfs2_inode *nip, unsigned int new_type);
 
-extern int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip);
+int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip);
 
-extern int gfs2_diradd_alloc_required(struct inode *dir,
-                                     const struct qstr *filename,
-                                     struct gfs2_diradd *da);
-extern int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
-                                  struct buffer_head **bhp);
-extern void gfs2_dir_hash_inval(struct gfs2_inode *ip);
+int gfs2_diradd_alloc_required(struct inode *dir,
+                              const struct qstr *filename,
+                              struct gfs2_diradd *da);
+int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
+                           struct buffer_head **bhp);
+void gfs2_dir_hash_inval(struct gfs2_inode *ip);
 
 static inline u32 gfs2_disk_hash(const char *data, int len)
 {
index f2700477a3001bcad1e50b9e1868d48e12cb969b..4b66efc1a82aa1c1a58fe04733f81f02d69dbe44 100644 (file)
@@ -418,7 +418,7 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
        struct inode *inode = file_inode(vmf->vma->vm_file);
        struct gfs2_inode *ip = GFS2_I(inode);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
-       struct gfs2_alloc_parms ap = { .aflags = 0, };
+       struct gfs2_alloc_parms ap = {};
        u64 offset = page_offset(page);
        unsigned int data_blocks, ind_blocks, rblocks;
        vm_fault_t ret = VM_FAULT_LOCKED;
@@ -1120,14 +1120,16 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        if (ret)
                goto out_unlock;
 
-       ret = file_update_time(file);
-       if (ret)
-               goto out_unlock;
-
        if (iocb->ki_flags & IOCB_DIRECT) {
                struct address_space *mapping = file->f_mapping;
                ssize_t buffered, ret2;
 
+               /*
+                * Note that under direct I/O, we don't allow and inode
+                * timestamp updates, so we're not calling file_update_time()
+                * here.
+                */
+
                ret = gfs2_file_direct_write(iocb, from, &gh);
                if (ret < 0 || !iov_iter_count(from))
                        goto out_unlock;
@@ -1154,6 +1156,10 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
                if (!ret || ret2 > 0)
                        ret += ret2;
        } else {
+               ret = file_update_time(file);
+               if (ret)
+                       goto out_unlock;
+
                ret = gfs2_file_buffered_write(iocb, from, &gh);
                if (likely(ret > 0))
                        ret = generic_write_sync(iocb, ret);
@@ -1245,7 +1251,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t
        struct inode *inode = file_inode(file);
        struct gfs2_sbd *sdp = GFS2_SB(inode);
        struct gfs2_inode *ip = GFS2_I(inode);
-       struct gfs2_alloc_parms ap = { .aflags = 0, };
+       struct gfs2_alloc_parms ap = {};
        unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
        loff_t bytes, max_bytes, max_blks;
        int error;
index d5fa75eac0bfee8ea4c677c798313eb79e3175ea..d6bf1f8c25dcd19ecd62d3e819aa608126ae6ac3 100644 (file)
@@ -1524,7 +1524,6 @@ fail:
                return;
        }
        list_add_tail(&gh->gh_list, insert_pt);
-       gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
        spin_unlock(&gl->gl_lockref.lock);
        if (sdp->sd_lockstruct.ls_ops->lm_cancel)
                sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
index c8685ca7d2a26a751553a25d95fd3b6e66765cb9..61197598abfd339c4e896e9909752a2abd9b31fc 100644 (file)
@@ -156,21 +156,6 @@ out:
        return gh;
 }
 
-static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl)
-{
-       return gl->gl_state == LM_ST_EXCLUSIVE;
-}
-
-static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl)
-{
-       return gl->gl_state == LM_ST_DEFERRED;
-}
-
-static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
-{
-       return gl->gl_state == LM_ST_SHARED;
-}
-
 static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
 {
        if (gl->gl_ops->go_flags & GLOF_ASPACE) {
@@ -181,40 +166,40 @@ static inline struct address_space *gfs2_glock2aspace(struct gfs2_glock *gl)
        return NULL;
 }
 
-extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
-                         const struct gfs2_glock_operations *glops,
-                         int create, struct gfs2_glock **glp);
-extern struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl);
-extern void gfs2_glock_put(struct gfs2_glock *gl);
-extern void gfs2_glock_queue_put(struct gfs2_glock *gl);
+int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
+                  const struct gfs2_glock_operations *glops,
+                  int create, struct gfs2_glock **glp);
+struct gfs2_glock *gfs2_glock_hold(struct gfs2_glock *gl);
+void gfs2_glock_put(struct gfs2_glock *gl);
+void gfs2_glock_queue_put(struct gfs2_glock *gl);
 
-extern void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
-                              u16 flags, struct gfs2_holder *gh,
-                              unsigned long ip);
+void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
+                       u16 flags, struct gfs2_holder *gh,
+                       unsigned long ip);
 static inline void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
                                    u16 flags, struct gfs2_holder *gh) {
        __gfs2_holder_init(gl, state, flags, gh, _RET_IP_);
 }
 
-extern void gfs2_holder_reinit(unsigned int state, u16 flags,
-                              struct gfs2_holder *gh);
-extern void gfs2_holder_uninit(struct gfs2_holder *gh);
-extern int gfs2_glock_nq(struct gfs2_holder *gh);
-extern int gfs2_glock_poll(struct gfs2_holder *gh);
-extern int gfs2_instantiate(struct gfs2_holder *gh);
-extern int gfs2_glock_holder_ready(struct gfs2_holder *gh);
-extern int gfs2_glock_wait(struct gfs2_holder *gh);
-extern int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs);
-extern void gfs2_glock_dq(struct gfs2_holder *gh);
-extern void gfs2_glock_dq_wait(struct gfs2_holder *gh);
-extern void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
-extern int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
-                            const struct gfs2_glock_operations *glops,
-                            unsigned int state, u16 flags,
-                            struct gfs2_holder *gh);
-extern int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
-extern void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
-extern void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl,
+void gfs2_holder_reinit(unsigned int state, u16 flags,
+                       struct gfs2_holder *gh);
+void gfs2_holder_uninit(struct gfs2_holder *gh);
+int gfs2_glock_nq(struct gfs2_holder *gh);
+int gfs2_glock_poll(struct gfs2_holder *gh);
+int gfs2_instantiate(struct gfs2_holder *gh);
+int gfs2_glock_holder_ready(struct gfs2_holder *gh);
+int gfs2_glock_wait(struct gfs2_holder *gh);
+int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs);
+void gfs2_glock_dq(struct gfs2_holder *gh);
+void gfs2_glock_dq_wait(struct gfs2_holder *gh);
+void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
+int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
+                     const struct gfs2_glock_operations *glops,
+                     unsigned int state, u16 flags,
+                     struct gfs2_holder *gh);
+int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
+void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
+void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl,
                            bool fsid);
 #define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) {             \
                        gfs2_dump_glock(NULL, gl, true);        \
@@ -228,7 +213,7 @@ extern void gfs2_dump_glock(struct seq_file *seq, struct gfs2_glock *gl,
                        gfs2_assert_withdraw((gl)->gl_name.ln_sbd, (x)); } } \
        while (0)
 
-extern __printf(2, 3)
+__printf(2, 3)
 void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
 
 /**
@@ -256,27 +241,27 @@ static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
        return error;
 }
 
-extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
-extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
-extern bool gfs2_queue_try_to_evict(struct gfs2_glock *gl);
-extern void gfs2_cancel_delete_work(struct gfs2_glock *gl);
-extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
-extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
-extern void gfs2_gl_dq_holders(struct gfs2_sbd *sdp);
-extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
-extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
-extern void gfs2_glock_free(struct gfs2_glock *gl);
+void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
+void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
+bool gfs2_queue_try_to_evict(struct gfs2_glock *gl);
+void gfs2_cancel_delete_work(struct gfs2_glock *gl);
+void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
+void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
+void gfs2_gl_dq_holders(struct gfs2_sbd *sdp);
+void gfs2_glock_thaw(struct gfs2_sbd *sdp);
+void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
+void gfs2_glock_free(struct gfs2_glock *gl);
 
-extern int __init gfs2_glock_init(void);
-extern void gfs2_glock_exit(void);
+int __init gfs2_glock_init(void);
+void gfs2_glock_exit(void);
 
-extern void gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
-extern void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
-extern void gfs2_register_debugfs(void);
-extern void gfs2_unregister_debugfs(void);
+void gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
+void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
+void gfs2_register_debugfs(void);
+void gfs2_unregister_debugfs(void);
 
-extern void glock_set_object(struct gfs2_glock *gl, void *object);
-extern void glock_clear_object(struct gfs2_glock *gl, void *object);
+void glock_set_object(struct gfs2_glock *gl, void *object);
+void glock_clear_object(struct gfs2_glock *gl, void *object);
 
 extern const struct lm_lockops gfs2_dlm_ops;
 
@@ -295,7 +280,7 @@ static inline bool gfs2_holder_queued(struct gfs2_holder *gh)
        return !list_empty(&gh->gh_list);
 }
 
-extern void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation);
-extern bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation);
+void gfs2_inode_remember_delete(struct gfs2_glock *gl, u64 generation);
+bool gfs2_inode_already_deleted(struct gfs2_glock *gl, u64 generation);
 
 #endif /* __GLOCK_DOT_H__ */
index e7d334c277a1477f182459a1ccce0486192e258f..b41c78bd2cc053838db577e1cfa943b6b96c5b97 100644 (file)
@@ -614,18 +614,6 @@ static int freeze_go_xmote_bh(struct gfs2_glock *gl)
        return 0;
 }
 
-/**
- * freeze_go_demote_ok
- * @gl: the glock
- *
- * Always returns 0
- */
-
-static int freeze_go_demote_ok(const struct gfs2_glock *gl)
-{
-       return 0;
-}
-
 /**
  * iopen_go_callback - schedule the dcache entry for the inode to be deleted
  * @gl: the glock
@@ -745,7 +733,6 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
 
 const struct gfs2_glock_operations gfs2_freeze_glops = {
        .go_xmote_bh = freeze_go_xmote_bh,
-       .go_demote_ok = freeze_go_demote_ok,
        .go_callback = freeze_go_callback,
        .go_type = LM_TYPE_NONDISK,
        .go_flags = GLOF_NONDISK,
index 695898afcaf1fbf6c9d3be048a1cfa90ce11e225..9341423798df8cb68f82b193baa3001b51370270 100644 (file)
@@ -22,7 +22,7 @@ extern const struct gfs2_glock_operations gfs2_quota_glops;
 extern const struct gfs2_glock_operations gfs2_journal_glops;
 extern const struct gfs2_glock_operations *gfs2_glops_list[];
 
-extern int gfs2_inode_metasync(struct gfs2_glock *gl);
-extern void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync);
+int gfs2_inode_metasync(struct gfs2_glock *gl);
+void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync);
 
 #endif /* __GLOPS_DOT_H__ */
index a8c95c5293c6cf854ca6c0124964a9e49b2f9bb0..95a334d64da2a362983a59c06a328e3fe0b75065 100644 (file)
@@ -863,7 +863,7 @@ static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which)
        preempt_enable();
 }
 
-extern struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl);
+struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl);
 
 static inline unsigned gfs2_max_stuffed_size(const struct gfs2_inode *ip)
 {
index 7fe77bc771e5ac659a8792d5c12c2d54e6498277..1b95db2c3aac3c9a9d5d881985e70622342b52ab 100644 (file)
@@ -266,17 +266,18 @@ fail_iput:
 }
 
 
-struct inode *gfs2_lookup_simple(struct inode *dip, const char *name)
+/**
+ * gfs2_lookup_meta - Look up an inode in a metadata directory
+ * @dip: The directory
+ * @name: The name of the inode
+ */
+struct inode *gfs2_lookup_meta(struct inode *dip, const char *name)
 {
        struct qstr qstr;
        struct inode *inode;
+
        gfs2_str2qstr(&qstr, name);
        inode = gfs2_lookupi(dip, &qstr, 1);
-       /* gfs2_lookupi has inconsistent callers: vfs
-        * related routines expect NULL for no entry found,
-        * gfs2_lookup_simple callers expect ENOENT
-        * and do not check for NULL.
-        */
        if (IS_ERR_OR_NULL(inode))
                return inode ? inode : ERR_PTR(-ENOENT);
 
@@ -418,7 +419,7 @@ static int alloc_dinode(struct gfs2_inode *ip, u32 flags, unsigned *dblocks)
        if (error)
                goto out_ipreserv;
 
-       error = gfs2_alloc_blocks(ip, &ip->i_no_addr, dblocks, 1, &ip->i_generation);
+       error = gfs2_alloc_blocks(ip, &ip->i_no_addr, dblocks, 1);
        if (error)
                goto out_trans_end;
 
@@ -1867,16 +1868,24 @@ out:
 int gfs2_permission(struct mnt_idmap *idmap, struct inode *inode,
                    int mask)
 {
+       int may_not_block = mask & MAY_NOT_BLOCK;
        struct gfs2_inode *ip;
        struct gfs2_holder i_gh;
+       struct gfs2_glock *gl;
        int error;
 
        gfs2_holder_mark_uninitialized(&i_gh);
        ip = GFS2_I(inode);
-       if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
-               if (mask & MAY_NOT_BLOCK)
+       gl = rcu_dereference_check(ip->i_gl, !may_not_block);
+       if (unlikely(!gl)) {
+               /* inode is getting torn down, must be RCU mode */
+               WARN_ON_ONCE(!may_not_block);
+               return -ECHILD;
+        }
+       if (gfs2_glock_is_locked_by_me(gl) == NULL) {
+               if (may_not_block)
                        return -ECHILD;
-               error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
+               error = gfs2_glock_nq_init(gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
                if (error)
                        return error;
        }
@@ -1921,7 +1930,7 @@ static int setattr_chown(struct inode *inode, struct iattr *attr)
        kuid_t ouid, nuid;
        kgid_t ogid, ngid;
        int error;
-       struct gfs2_alloc_parms ap;
+       struct gfs2_alloc_parms ap = {};
 
        ouid = inode->i_uid;
        ogid = inode->i_gid;
@@ -2154,7 +2163,7 @@ static int gfs2_update_time(struct inode *inode, int flags)
        int error;
 
        gh = gfs2_glock_is_locked_by_me(gl);
-       if (gh && !gfs2_glock_is_held_excl(gl)) {
+       if (gh && gl->gl_state != LM_ST_EXCLUSIVE) {
                gfs2_glock_dq(gh);
                gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, gh);
                error = gfs2_glock_nq(gh);
index c8c5814e7295d9dd83b1c6c6693a6660a478e14b..fd15d1c6b6fb1eac71d185855f0d3713155409d5 100644 (file)
@@ -13,9 +13,9 @@
 #include "util.h"
 
 bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask);
-extern int gfs2_internal_read(struct gfs2_inode *ip,
-                             char *buf, loff_t *pos, unsigned size);
-extern void gfs2_set_aops(struct inode *inode);
+ssize_t gfs2_internal_read(struct gfs2_inode *ip,
+                          char *buf, loff_t *pos, size_t size);
+void gfs2_set_aops(struct inode *inode);
 
 static inline int gfs2_is_stuffed(const struct gfs2_inode *ip)
 {
@@ -44,19 +44,17 @@ static inline int gfs2_is_dir(const struct gfs2_inode *ip)
 
 static inline void gfs2_set_inode_blocks(struct inode *inode, u64 blocks)
 {
-       inode->i_blocks = blocks <<
-               (GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
+       inode->i_blocks = blocks << (inode->i_blkbits - 9);
 }
 
 static inline u64 gfs2_get_inode_blocks(const struct inode *inode)
 {
-       return inode->i_blocks >>
-               (GFS2_SB(inode)->sd_sb.sb_bsize_shift - GFS2_BASIC_BLOCK_SHIFT);
+       return inode->i_blocks >> (inode->i_blkbits - 9);
 }
 
 static inline void gfs2_add_inode_blocks(struct inode *inode, s64 change)
 {
-       change <<= inode->i_blkbits - GFS2_BASIC_BLOCK_SHIFT;
+       change <<= inode->i_blkbits - 9;
        gfs2_assert(GFS2_SB(inode), (change >= 0 || inode->i_blocks >= -change));
        inode->i_blocks += change;
 }
@@ -88,33 +86,33 @@ err:
        return -EIO;
 }
 
-extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type, 
-                                      u64 no_addr, u64 no_formal_ino,
-                                      unsigned int blktype);
-extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
-                                        u64 no_formal_ino,
-                                        unsigned int blktype);
-
-extern int gfs2_inode_refresh(struct gfs2_inode *ip);
-
-extern struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
-                                 int is_root);
-extern int gfs2_permission(struct mnt_idmap *idmap,
-                          struct inode *inode, int mask);
-extern struct inode *gfs2_lookup_simple(struct inode *dip, const char *name);
-extern void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
-extern int gfs2_open_common(struct inode *inode, struct file *file);
-extern loff_t gfs2_seek_data(struct file *file, loff_t offset);
-extern loff_t gfs2_seek_hole(struct file *file, loff_t offset);
+struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
+                               u64 no_addr, u64 no_formal_ino,
+                               unsigned int blktype);
+struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
+                                 u64 no_formal_ino,
+                                 unsigned int blktype);
+
+int gfs2_inode_refresh(struct gfs2_inode *ip);
+
+struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
+                          int is_root);
+int gfs2_permission(struct mnt_idmap *idmap,
+                   struct inode *inode, int mask);
+struct inode *gfs2_lookup_meta(struct inode *dip, const char *name);
+void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf);
+int gfs2_open_common(struct inode *inode, struct file *file);
+loff_t gfs2_seek_data(struct file *file, loff_t offset);
+loff_t gfs2_seek_hole(struct file *file, loff_t offset);
 
 extern const struct file_operations gfs2_file_fops_nolock;
 extern const struct file_operations gfs2_dir_fops_nolock;
 
-extern int gfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa);
-extern int gfs2_fileattr_set(struct mnt_idmap *idmap,
-                            struct dentry *dentry, struct fileattr *fa);
-extern void gfs2_set_inode_flags(struct inode *inode);
+int gfs2_fileattr_get(struct dentry *dentry, struct fileattr *fa);
+int gfs2_fileattr_set(struct mnt_idmap *idmap,
+                     struct dentry *dentry, struct fileattr *fa);
+void gfs2_set_inode_flags(struct inode *inode);
+
 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
 extern const struct file_operations gfs2_file_fops;
 extern const struct file_operations gfs2_dir_fops;
index 653cffcbf86945f3013256dd252caa7fb679b425..c27b05099c1e403ed18f9049fb13a9501a58d320 100644 (file)
@@ -70,29 +70,29 @@ static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
        }
 }
 
-extern void gfs2_ordered_del_inode(struct gfs2_inode *ip);
-extern unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct);
-extern void gfs2_remove_from_ail(struct gfs2_bufdata *bd);
-extern bool gfs2_log_is_empty(struct gfs2_sbd *sdp);
-extern void gfs2_log_release_revokes(struct gfs2_sbd *sdp, unsigned int revokes);
-extern void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks);
-extern bool gfs2_log_try_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
-                                unsigned int *extra_revokes);
-extern void gfs2_log_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
-                            unsigned int *extra_revokes);
-extern void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
-                                 u64 seq, u32 tail, u32 lblock, u32 flags,
-                                 blk_opf_t op_flags);
-extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
-                          u32 type);
-extern void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
-extern void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc);
-extern void log_flush_wait(struct gfs2_sbd *sdp);
+void gfs2_ordered_del_inode(struct gfs2_inode *ip);
+unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct);
+void gfs2_remove_from_ail(struct gfs2_bufdata *bd);
+bool gfs2_log_is_empty(struct gfs2_sbd *sdp);
+void gfs2_log_release_revokes(struct gfs2_sbd *sdp, unsigned int revokes);
+void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks);
+bool gfs2_log_try_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
+                         unsigned int *extra_revokes);
+void gfs2_log_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
+                     unsigned int *extra_revokes);
+void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+                          u64 seq, u32 tail, u32 lblock, u32 flags,
+                          blk_opf_t op_flags);
+void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
+                   u32 type);
+void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *trans);
+void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc);
+void log_flush_wait(struct gfs2_sbd *sdp);
 
-extern int gfs2_logd(void *data);
-extern void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
-extern void gfs2_glock_remove_revoke(struct gfs2_glock *gl);
-extern void gfs2_flush_revokes(struct gfs2_sbd *sdp);
-extern void gfs2_ail_drain(struct gfs2_sbd *sdp);
+int gfs2_logd(void *data);
+void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
+void gfs2_glock_remove_revoke(struct gfs2_glock *gl);
+void gfs2_flush_revokes(struct gfs2_sbd *sdp);
+void gfs2_ail_drain(struct gfs2_sbd *sdp);
 
 #endif /* __LOG_DOT_H__ */
index 1412ffba1d4446b918f7779a762b19564ea6ce95..07890c7b145d8b9263bf2531392d2772948e2fc4 100644 (file)
 #include "incore.h"
 
 extern const struct gfs2_log_operations *gfs2_log_ops[];
-extern void gfs2_log_incr_head(struct gfs2_sbd *sdp);
-extern u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lbn);
-extern void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
-                          struct page *page, unsigned size, unsigned offset,
-                          u64 blkno);
-extern void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf);
-extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
-extern int gfs2_find_jhead(struct gfs2_jdesc *jd,
-                          struct gfs2_log_header_host *head, bool keep_cache);
-extern void gfs2_drain_revokes(struct gfs2_sbd *sdp);
+
+void gfs2_log_incr_head(struct gfs2_sbd *sdp);
+u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lbn);
+void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+                   struct page *page, unsigned size, unsigned offset,
+                   u64 blkno);
+void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf);
+void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
+int gfs2_find_jhead(struct gfs2_jdesc *jd,
+                   struct gfs2_log_header_host *head, bool keep_cache);
+void gfs2_drain_revokes(struct gfs2_sbd *sdp);
+
 static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
 {
        return sdp->sd_ldptrs;
index d0a58cdd433a90c1024cac530368e1168572cd21..831d988c2ceb74f31f33beb05b792f3df8bec47a 100644 (file)
@@ -50,21 +50,21 @@ static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping)
                return inode->i_sb->s_fs_info;
 }
 
-extern struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno);
-extern int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
-                         int rahead, struct buffer_head **bhp);
-extern int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh);
-extern struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno,
-                                      int create);
+struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno);
+int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
+                  int rahead, struct buffer_head **bhp);
+int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh);
+struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno,
+                               int create);
 enum {
        REMOVE_JDATA = 0,
        REMOVE_META = 1,
 };
 
-extern void gfs2_remove_from_journal(struct buffer_head *bh, int meta);
-extern void gfs2_journal_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen);
-extern int gfs2_meta_buffer(struct gfs2_inode *ip, u32 mtype, u64 num,
-                           struct buffer_head **bhp);
+void gfs2_remove_from_journal(struct buffer_head *bh, int meta);
+void gfs2_journal_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen);
+int gfs2_meta_buffer(struct gfs2_inode *ip, u32 mtype, u64 num,
+                    struct buffer_head **bhp);
 
 static inline int gfs2_meta_inode_buffer(struct gfs2_inode *ip,
                                         struct buffer_head **bhp)
index ecf789b7168c9d7d644346c49be480d98afa1f37..b108c5d26839e1dc234c294df77fb557b4861a06 100644 (file)
@@ -292,8 +292,7 @@ static int gfs2_read_sb(struct gfs2_sbd *sdp, int silent)
                return error;
        }
 
-       sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
-                              GFS2_BASIC_BLOCK_SHIFT;
+       sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - 9;
        sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
        sdp->sd_diptrs = (sdp->sd_sb.sb_bsize -
                          sizeof(struct gfs2_dinode)) / sizeof(u64);
@@ -648,7 +647,7 @@ static int init_statfs(struct gfs2_sbd *sdp)
        struct gfs2_jdesc *jd;
        struct gfs2_inode *ip;
 
-       sdp->sd_statfs_inode = gfs2_lookup_simple(master, "statfs");
+       sdp->sd_statfs_inode = gfs2_lookup_meta(master, "statfs");
        if (IS_ERR(sdp->sd_statfs_inode)) {
                error = PTR_ERR(sdp->sd_statfs_inode);
                fs_err(sdp, "can't read in statfs inode: %d\n", error);
@@ -657,7 +656,7 @@ static int init_statfs(struct gfs2_sbd *sdp)
        if (sdp->sd_args.ar_spectator)
                goto out;
 
-       pn = gfs2_lookup_simple(master, "per_node");
+       pn = gfs2_lookup_meta(master, "per_node");
        if (IS_ERR(pn)) {
                error = PTR_ERR(pn);
                fs_err(sdp, "can't find per_node directory: %d\n", error);
@@ -674,7 +673,7 @@ static int init_statfs(struct gfs2_sbd *sdp)
                        goto free_local;
                }
                sprintf(buf, "statfs_change%u", jd->jd_jid);
-               lsi->si_sc_inode = gfs2_lookup_simple(pn, buf);
+               lsi->si_sc_inode = gfs2_lookup_meta(pn, buf);
                if (IS_ERR(lsi->si_sc_inode)) {
                        error = PTR_ERR(lsi->si_sc_inode);
                        fs_err(sdp, "can't find local \"sc\" file#%u: %d\n",
@@ -739,7 +738,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
        if (undo)
                goto fail_statfs;
 
-       sdp->sd_jindex = gfs2_lookup_simple(master, "jindex");
+       sdp->sd_jindex = gfs2_lookup_meta(master, "jindex");
        if (IS_ERR(sdp->sd_jindex)) {
                fs_err(sdp, "can't lookup journal index: %d\n", error);
                return PTR_ERR(sdp->sd_jindex);
@@ -888,7 +887,7 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo)
                goto fail;
 
        /* Read in the resource index inode */
-       sdp->sd_rindex = gfs2_lookup_simple(master, "rindex");
+       sdp->sd_rindex = gfs2_lookup_meta(master, "rindex");
        if (IS_ERR(sdp->sd_rindex)) {
                error = PTR_ERR(sdp->sd_rindex);
                fs_err(sdp, "can't get resource index inode: %d\n", error);
@@ -897,7 +896,7 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo)
        sdp->sd_rindex_uptodate = 0;
 
        /* Read in the quota inode */
-       sdp->sd_quota_inode = gfs2_lookup_simple(master, "quota");
+       sdp->sd_quota_inode = gfs2_lookup_meta(master, "quota");
        if (IS_ERR(sdp->sd_quota_inode)) {
                error = PTR_ERR(sdp->sd_quota_inode);
                fs_err(sdp, "can't get quota file inode: %d\n", error);
@@ -941,7 +940,7 @@ static int init_per_node(struct gfs2_sbd *sdp, int undo)
        if (undo)
                goto fail_qc_gh;
 
-       pn = gfs2_lookup_simple(master, "per_node");
+       pn = gfs2_lookup_meta(master, "per_node");
        if (IS_ERR(pn)) {
                error = PTR_ERR(pn);
                fs_err(sdp, "can't find per_node directory: %d\n", error);
@@ -949,7 +948,7 @@ static int init_per_node(struct gfs2_sbd *sdp, int undo)
        }
 
        sprintf(buf, "quota_change%u", sdp->sd_jdesc->jd_jid);
-       sdp->sd_qc_inode = gfs2_lookup_simple(pn, buf);
+       sdp->sd_qc_inode = gfs2_lookup_meta(pn, buf);
        if (IS_ERR(sdp->sd_qc_inode)) {
                error = PTR_ERR(sdp->sd_qc_inode);
                fs_err(sdp, "can't find local \"qc\" file: %d\n", error);
@@ -1187,10 +1186,9 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
 
        /* Set up the buffer cache and fill in some fake block size values
           to allow us to read-in the on-disk superblock. */
-       sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, GFS2_BASIC_BLOCK);
+       sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, 512);
        sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits;
-       sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
-                               GFS2_BASIC_BLOCK_SHIFT;
+       sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift - 9;
        sdp->sd_fsb2bb = BIT(sdp->sd_fsb2bb_shift);
 
        sdp->sd_tune.gt_logd_secs = sdp->sd_args.ar_commit;
@@ -1278,10 +1276,8 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
 
        if (!sb_rdonly(sb)) {
                error = init_threads(sdp);
-               if (error) {
-                       gfs2_withdraw_delayed(sdp);
+               if (error)
                        goto fail_per_node;
-               }
        }
 
        error = gfs2_freeze_lock_shared(sdp);
index 5cbbc1a46a92bb46e77bc9aa690f5a035acd357c..95dae7838b4e56b24c76c272c9cac407916e828f 100644 (file)
@@ -470,6 +470,17 @@ static int qd_check_sync(struct gfs2_sbd *sdp, struct gfs2_quota_data *qd,
            (sync_gen && (qd->qd_sync_gen >= *sync_gen)))
                return 0;
 
+       /*
+        * If qd_change is 0 it means a pending quota change was negated.
+        * We should not sync it, but we still have a qd reference and slot
+        * reference taken by gfs2_quota_change -> do_qc that need to be put.
+        */
+       if (!qd->qd_change && test_and_clear_bit(QDF_CHANGE, &qd->qd_flags)) {
+               slot_put(qd);
+               qd_put(qd);
+               return 0;
+       }
+
        if (!lockref_get_not_dead(&qd->qd_lockref))
                return 0;
 
@@ -912,7 +923,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
 {
        struct gfs2_sbd *sdp = (*qda)->qd_sbd;
        struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
-       struct gfs2_alloc_parms ap = { .aflags = 0, };
+       struct gfs2_alloc_parms ap = {};
        unsigned int data_blocks, ind_blocks;
        struct gfs2_holder *ghs, i_gh;
        unsigned int qx, x;
@@ -1086,8 +1097,7 @@ int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid)
        u32 x;
        int error;
 
-       if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON &&
-           sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET)
+       if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
                return 0;
 
        error = gfs2_quota_hold(ip, uid, gid);
@@ -1194,17 +1204,16 @@ void gfs2_quota_unlock(struct gfs2_inode *ip)
 
 #define MAX_LINE 256
 
-static int print_message(struct gfs2_quota_data *qd, char *type)
+static void print_message(struct gfs2_quota_data *qd, char *type)
 {
        struct gfs2_sbd *sdp = qd->qd_sbd;
 
-       if (sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET)
+       if (sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET) {
                fs_info(sdp, "quota %s for %s %u\n",
                        type,
                        (qd->qd_id.type == USRQUOTA) ? "user" : "group",
                        from_kqid(&init_user_ns, qd->qd_id));
-
-       return 0;
+       }
 }
 
 /**
@@ -1274,7 +1283,8 @@ int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
                                         * HZ)) {
                        quota_send_warning(qd->qd_id,
                                           sdp->sd_vfs->s_dev, QUOTA_NL_BSOFTWARN);
-                       error = print_message(qd, "warning");
+                       print_message(qd, "warning");
+                       error = 0;
                        qd->qd_last_warn = jiffies;
                }
        }
@@ -1288,8 +1298,7 @@ void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
        u32 x;
        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
 
-       if ((sdp->sd_args.ar_quota != GFS2_QUOTA_ON &&
-           sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET) ||
+       if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF ||
            gfs2_assert_warn(sdp, change))
                return;
        if (ip->i_diskflags & GFS2_DIF_SYSTEM)
@@ -1746,7 +1755,7 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
        if (gfs2_is_stuffed(ip))
                alloc_required = 1;
        if (alloc_required) {
-               struct gfs2_alloc_parms ap = { .aflags = 0, };
+               struct gfs2_alloc_parms ap = {};
                gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
                                       &data_blocks, &ind_blocks);
                blocks = 1 + data_blocks + ind_blocks;
index 36f54b426b0c3a5e22aa207bf9f5dbd34a823d65..f462d9cb308762e6df7a406a9cdb14ede81c6a2e 100644 (file)
@@ -15,27 +15,27 @@ struct gfs2_sbd;
 #define NO_UID_QUOTA_CHANGE INVALID_UID
 #define NO_GID_QUOTA_CHANGE INVALID_GID
 
-extern int gfs2_qa_get(struct gfs2_inode *ip);
-extern void gfs2_qa_put(struct gfs2_inode *ip);
-extern int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
-extern void gfs2_quota_unhold(struct gfs2_inode *ip);
+int gfs2_qa_get(struct gfs2_inode *ip);
+void gfs2_qa_put(struct gfs2_inode *ip);
+int gfs2_quota_hold(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
+void gfs2_quota_unhold(struct gfs2_inode *ip);
 
-extern int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
-extern void gfs2_quota_unlock(struct gfs2_inode *ip);
+int gfs2_quota_lock(struct gfs2_inode *ip, kuid_t uid, kgid_t gid);
+void gfs2_quota_unlock(struct gfs2_inode *ip);
 
-extern int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
-                           struct gfs2_alloc_parms *ap);
-extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
-                             kuid_t uid, kgid_t gid);
+int gfs2_quota_check(struct gfs2_inode *ip, kuid_t uid, kgid_t gid,
+                    struct gfs2_alloc_parms *ap);
+void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
+                      kuid_t uid, kgid_t gid);
 
-extern int gfs2_quota_sync(struct super_block *sb, int type);
-extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid);
+int gfs2_quota_sync(struct super_block *sb, int type);
+int gfs2_quota_refresh(struct gfs2_sbd *sdp, struct kqid qid);
 
-extern int gfs2_quota_init(struct gfs2_sbd *sdp);
-extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
-extern int gfs2_quotad(void *data);
+int gfs2_quota_init(struct gfs2_sbd *sdp);
+void gfs2_quota_cleanup(struct gfs2_sbd *sdp);
+int gfs2_quotad(void *data);
 
-extern void gfs2_wake_up_statfs(struct gfs2_sbd *sdp);
+void gfs2_wake_up_statfs(struct gfs2_sbd *sdp);
 
 static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
                                        struct gfs2_alloc_parms *ap)
@@ -50,8 +50,7 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip,
        ret = gfs2_quota_lock(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
        if (ret)
                return ret;
-       if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON &&
-           sdp->sd_args.ar_quota != GFS2_QUOTA_QUIET)
+       if (sdp->sd_args.ar_quota == GFS2_QUOTA_ACCOUNT)
                return 0;
        ret = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid, ap);
        if (ret)
@@ -63,6 +62,7 @@ extern const struct quotactl_ops gfs2_quotactl_ops;
 int __init gfs2_qd_shrinker_init(void);
 void gfs2_qd_shrinker_exit(void);
 extern struct list_lru gfs2_qd_lru;
-extern void __init gfs2_quota_hash_init(void);
+
+void __init gfs2_quota_hash_init(void);
 
 #endif /* __QUOTA_DOT_H__ */
index 7a0c9d0b7503f0bd14f7bce068103d6a1063c209..6a0fd42e1120fc1b57e107c18977c21e54b6ec4a 100644 (file)
@@ -17,18 +17,18 @@ static inline void gfs2_replay_incr_blk(struct gfs2_jdesc *jd, u32 *blk)
                *blk = 0;
 }
 
-extern int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
+int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk,
                           struct buffer_head **bh);
 
-extern int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where);
-extern int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where);
-extern void gfs2_revoke_clean(struct gfs2_jdesc *jd);
+int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where);
+int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where);
+void gfs2_revoke_clean(struct gfs2_jdesc *jd);
 
-extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait);
-extern void gfs2_recover_func(struct work_struct *work);
-extern int __get_log_header(struct gfs2_sbd *sdp,
-                           const struct gfs2_log_header *lh, unsigned int blkno,
-                           struct gfs2_log_header_host *head);
+int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait);
+void gfs2_recover_func(struct work_struct *work);
+int __get_log_header(struct gfs2_sbd *sdp,
+                    const struct gfs2_log_header *lh, unsigned int blkno,
+                    struct gfs2_log_header_host *head);
 
 #endif /* __RECOVERY_DOT_H__ */
 
index 9308190895c8903b6412f0360b6c0060116a3077..c2060203b98af857d510bc6618e2046bf00dc192 100644 (file)
@@ -2411,13 +2411,12 @@ static void gfs2_set_alloc_start(struct gfs2_rbm *rbm,
  * @bn: Used to return the starting block number
  * @nblocks: requested number of blocks/extent length (value/result)
  * @dinode: 1 if we're allocating a dinode block, else 0
- * @generation: the generation number of the inode
  *
  * Returns: 0 or error
  */
 
 int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
-                     bool dinode, u64 *generation)
+                     bool dinode)
 {
        struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
        struct buffer_head *dibh;
@@ -2477,10 +2476,13 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
        rbm.rgd->rd_free -= *nblocks;
        spin_unlock(&rbm.rgd->rd_rsspin);
        if (dinode) {
+               u64 generation;
+
                rbm.rgd->rd_dinodes++;
-               *generation = rbm.rgd->rd_igeneration++;
-               if (*generation == 0)
-                       *generation = rbm.rgd->rd_igeneration++;
+               generation = rbm.rgd->rd_igeneration++;
+               if (generation == 0)
+                       generation = rbm.rgd->rd_igeneration++;
+               ip->i_generation = generation;
        }
 
        gfs2_trans_add_meta(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh);
index 00b30cf893af238ac4be1b970b234c8b27773e03..8d20e99385db47e2de97c9049895c43486141d59 100644 (file)
@@ -22,38 +22,38 @@ struct gfs2_rgrpd;
 struct gfs2_sbd;
 struct gfs2_holder;
 
-extern void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd);
+void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd);
 
-extern struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact);
-extern struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp);
-extern struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd);
+struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact);
+struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp);
+struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd);
 
-extern void gfs2_clear_rgrpd(struct gfs2_sbd *sdp);
-extern int gfs2_rindex_update(struct gfs2_sbd *sdp);
-extern void gfs2_free_clones(struct gfs2_rgrpd *rgd);
-extern int gfs2_rgrp_go_instantiate(struct gfs2_glock *gl);
-extern void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd);
+void gfs2_clear_rgrpd(struct gfs2_sbd *sdp);
+int gfs2_rindex_update(struct gfs2_sbd *sdp);
+void gfs2_free_clones(struct gfs2_rgrpd *rgd);
+int gfs2_rgrp_go_instantiate(struct gfs2_glock *gl);
+void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd);
 
-extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
+struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
 
 #define GFS2_AF_ORLOV 1
-extern int gfs2_inplace_reserve(struct gfs2_inode *ip,
-                               struct gfs2_alloc_parms *ap);
-extern void gfs2_inplace_release(struct gfs2_inode *ip);
-
-extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
-                            bool dinode, u64 *generation);
-
-extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
-extern void gfs2_rs_delete(struct gfs2_inode *ip);
-extern void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
-                              u64 bstart, u32 blen, int meta);
-extern void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
-                          u64 bstart, u32 blen);
-extern void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
-extern void gfs2_unlink_di(struct inode *inode);
-extern int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr,
-                              unsigned int type);
+int gfs2_inplace_reserve(struct gfs2_inode *ip,
+                        struct gfs2_alloc_parms *ap);
+void gfs2_inplace_release(struct gfs2_inode *ip);
+
+int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n,
+                     bool dinode);
+
+void gfs2_rs_deltree(struct gfs2_blkreserv *rs);
+void gfs2_rs_delete(struct gfs2_inode *ip);
+void __gfs2_free_blocks(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
+                       u64 bstart, u32 blen, int meta);
+void gfs2_free_meta(struct gfs2_inode *ip, struct gfs2_rgrpd *rgd,
+                   u64 bstart, u32 blen);
+void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip);
+void gfs2_unlink_di(struct inode *inode);
+int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr,
+                       unsigned int type);
 
 struct gfs2_rgrp_list {
        unsigned int rl_rgrps;
@@ -62,18 +62,19 @@ struct gfs2_rgrp_list {
        struct gfs2_holder *rl_ghs;
 };
 
-extern void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
-                          u64 block);
-extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist,
-                            unsigned int state, u16 flags);
-extern void gfs2_rlist_free(struct gfs2_rgrp_list *rlist);
-extern u64 gfs2_ri_total(struct gfs2_sbd *sdp);
-extern void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
-                          const char *fs_id_buf);
-extern int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
-                                  struct buffer_head *bh,
-                                  const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed);
-extern int gfs2_fitrim(struct file *filp, void __user *argp);
+void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
+                   u64 block);
+void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist,
+                     unsigned int state, u16 flags);
+void gfs2_rlist_free(struct gfs2_rgrp_list *rlist);
+u64 gfs2_ri_total(struct gfs2_sbd *sdp);
+void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
+                   const char *fs_id_buf);
+int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
+                           struct buffer_head *bh,
+                           const struct gfs2_bitmap *bi, unsigned minlen,
+                           u64 *ptrimmed);
+int gfs2_fitrim(struct file *filp, void __user *argp);
 
 /* This is how to tell if a reservation is in the rgrp tree: */
 static inline bool gfs2_rs_active(const struct gfs2_blkreserv *rs)
@@ -88,9 +89,9 @@ static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
        return first <= block && block < last;
 }
 
-extern void check_and_update_goal(struct gfs2_inode *ip);
+void check_and_update_goal(struct gfs2_inode *ip);
 
-extern void rgrp_lock_local(struct gfs2_rgrpd *rgd);
-extern void rgrp_unlock_local(struct gfs2_rgrpd *rgd);
+void rgrp_lock_local(struct gfs2_rgrpd *rgd);
+void rgrp_unlock_local(struct gfs2_rgrpd *rgd);
 
 #endif /* __RGRP_DOT_H__ */
index 52a878fa7139d8f107371fbb4f4f0a2503c919c1..d21c04a22d7393bf65aafa02110f55a2e4aa666d 100644 (file)
@@ -602,13 +602,15 @@ restart:
        }
        spin_unlock(&sdp->sd_jindex_spin);
 
-       if (!sb_rdonly(sb)) {
+       if (!sb_rdonly(sb))
                gfs2_make_fs_ro(sdp);
-       }
-       if (gfs2_withdrawn(sdp)) {
-               gfs2_destroy_threads(sdp);
+       else {
+               if (gfs2_withdrawn(sdp))
+                       gfs2_destroy_threads(sdp);
+
                gfs2_quota_cleanup(sdp);
        }
+
        WARN_ON(gfs2_withdrawing(sdp));
 
        /*  At this point, we're through modifying the disk  */
@@ -1006,6 +1008,7 @@ static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
        buf->f_files = sc.sc_dinodes + sc.sc_free;
        buf->f_ffree = sc.sc_free;
        buf->f_namelen = GFS2_FNAMESIZE;
+       buf->f_fsid = uuid_to_fsid(sb->s_uuid.b);
 
        return 0;
 }
@@ -1299,18 +1302,8 @@ static bool gfs2_upgrade_iopen_glock(struct inode *inode)
         * As a last resort, if another node keeps holding the iopen glock
         * without showing any activity on the inode glock, we will eventually
         * time out and fail the iopen glock upgrade.
-        *
-        * Note that we're passing the LM_FLAG_TRY_1CB flag to the first
-        * locking request as an optimization to notify lock holders as soon as
-        * possible.  Without that flag, they'd be notified implicitly by the
-        * second locking request.
         */
 
-       gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, gh);
-       error = gfs2_glock_nq(gh);
-       if (error != GLR_TRYFAILED)
-               return !error;
-
        gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh);
        error = gfs2_glock_nq(gh);
        if (error)
@@ -1550,7 +1543,7 @@ out:
                wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
                gfs2_glock_add_to_lru(ip->i_gl);
                gfs2_glock_put_eventually(ip->i_gl);
-               ip->i_gl = NULL;
+               rcu_assign_pointer(ip->i_gl, NULL);
        }
 }
 
@@ -1576,7 +1569,7 @@ static void gfs2_free_inode(struct inode *inode)
        kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode));
 }
 
-extern void free_local_statfs_inodes(struct gfs2_sbd *sdp)
+void free_local_statfs_inodes(struct gfs2_sbd *sdp)
 {
        struct local_statfs_inode *lsi, *safe;
 
@@ -1591,8 +1584,8 @@ extern void free_local_statfs_inodes(struct gfs2_sbd *sdp)
        }
 }
 
-extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
-                                            unsigned int index)
+struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
+                                     unsigned int index)
 {
        struct local_statfs_inode *lsi;
 
index b4ddf624458656dc078cc1aa52090cb60882dd5c..b27a774d95808e318f8f772387b833d547364805 100644 (file)
@@ -15,7 +15,7 @@
 #define GFS2_FS_FORMAT_MIN (1801)
 #define GFS2_FS_FORMAT_MAX (1802)
 
-extern void gfs2_lm_unmount(struct gfs2_sbd *sdp);
+void gfs2_lm_unmount(struct gfs2_sbd *sdp);
 
 static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
 {
@@ -26,33 +26,33 @@ static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
        return x;
 }
 
-extern void gfs2_jindex_free(struct gfs2_sbd *sdp);
+void gfs2_jindex_free(struct gfs2_sbd *sdp);
 
-extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
-extern int gfs2_jdesc_check(struct gfs2_jdesc *jd);
-extern int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename,
-                                    struct gfs2_inode **ipp);
+struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
+int gfs2_jdesc_check(struct gfs2_jdesc *jd);
+int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename,
+                             struct gfs2_inode **ipp);
 
-extern int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
-extern void gfs2_make_fs_ro(struct gfs2_sbd *sdp);
-extern void gfs2_online_uevent(struct gfs2_sbd *sdp);
-extern void gfs2_destroy_threads(struct gfs2_sbd *sdp);
-extern int gfs2_statfs_init(struct gfs2_sbd *sdp);
-extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
-                              s64 dinodes);
-extern void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc,
-                                 const void *buf);
-extern void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc,
-                                  void *buf);
-extern void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh);
-extern int gfs2_statfs_sync(struct super_block *sb, int type);
-extern void gfs2_freeze_func(struct work_struct *work);
-extern void gfs2_thaw_freeze_initiator(struct super_block *sb);
+int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
+void gfs2_make_fs_ro(struct gfs2_sbd *sdp);
+void gfs2_online_uevent(struct gfs2_sbd *sdp);
+void gfs2_destroy_threads(struct gfs2_sbd *sdp);
+int gfs2_statfs_init(struct gfs2_sbd *sdp);
+void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
+                       s64 dinodes);
+void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc,
+                          const void *buf);
+void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc,
+                           void *buf);
+void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh);
+int gfs2_statfs_sync(struct super_block *sb, int type);
+void gfs2_freeze_func(struct work_struct *work);
+void gfs2_thaw_freeze_initiator(struct super_block *sb);
 
-extern void free_local_statfs_inodes(struct gfs2_sbd *sdp);
-extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
-                                            unsigned int index);
-extern void free_sbd(struct gfs2_sbd *sdp);
+void free_local_statfs_inodes(struct gfs2_sbd *sdp);
+struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
+                                     unsigned int index);
+void free_sbd(struct gfs2_sbd *sdp);
 
 extern struct file_system_type gfs2_fs_type;
 extern struct file_system_type gfs2meta_fs_type;
index c76ad9a4c75a986185a33daa8b19ec412f2eaea7..f8ce5302280d31a673394c909b5cd0a5676de19c 100644 (file)
@@ -34,17 +34,17 @@ static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip, unsigned
        return rgd->rd_length;
 }
 
-extern int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
-                             unsigned int blocks, unsigned int revokes,
-                             unsigned long ip);
-extern int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
-                           unsigned int revokes);
-
-extern void gfs2_trans_end(struct gfs2_sbd *sdp);
-extern void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh);
-extern void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh);
-extern void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
-extern void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len);
-extern void gfs2_trans_free(struct gfs2_sbd *sdp, struct gfs2_trans *tr);
+int __gfs2_trans_begin(struct gfs2_trans *tr, struct gfs2_sbd *sdp,
+                      unsigned int blocks, unsigned int revokes,
+                      unsigned long ip);
+int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
+                    unsigned int revokes);
+
+void gfs2_trans_end(struct gfs2_sbd *sdp);
+void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh);
+void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh);
+void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd);
+void gfs2_trans_remove_revoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len);
+void gfs2_trans_free(struct gfs2_sbd *sdp, struct gfs2_trans *tr);
 
 #endif /* __TRANS_DOT_H__ */
index cdb839529175d8d4abaaf0795579bada3f50479b..11c9d59b6889622912995be96bbe21b88e83bdea 100644 (file)
@@ -147,10 +147,10 @@ static inline void gfs2_metatype_set(struct buffer_head *bh, u16 type,
 int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
                    char *file, unsigned int line);
 
-extern int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
-                              bool verbose);
-extern int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp);
-extern void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh);
+int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+                       bool verbose);
+int gfs2_freeze_lock_shared(struct gfs2_sbd *sdp);
+void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh);
 
 #define gfs2_io_error(sdp) \
 gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__)
index 79d5c55595121d1e08bfe81263f79cb7f982c34c..8c96ba6230d1b9ace38bc09010fd637a89c76238 100644 (file)
@@ -639,7 +639,7 @@ static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
        u64 block;
        int error;
 
-       error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
+       error = gfs2_alloc_blocks(ip, &block, &n, 0);
        if (error)
                return error;
        gfs2_trans_remove_revoke(sdp, block, 1);
@@ -701,7 +701,7 @@ static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
                        int mh_size = sizeof(struct gfs2_meta_header);
                        unsigned int n = 1;
 
-                       error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
+                       error = gfs2_alloc_blocks(ip, &block, &n, 0);
                        if (error)
                                return error;
                        gfs2_trans_remove_revoke(sdp, block, 1);
@@ -1002,7 +1002,7 @@ static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
        } else {
                u64 blk;
                unsigned int n = 1;
-               error = gfs2_alloc_blocks(ip, &blk, &n, 0, NULL);
+               error = gfs2_alloc_blocks(ip, &blk, &n, 0);
                if (error)
                        return error;
                gfs2_trans_remove_revoke(sdp, blk, 1);
index 2aed9d7d483d5b181491137d7c18b912548d5224..eb12eb7e37c194c8857bcd4de03737ef6b7cfe2a 100644 (file)
@@ -50,14 +50,14 @@ struct gfs2_ea_location {
        struct gfs2_ea_header *el_prev;
 };
 
-extern int __gfs2_xattr_set(struct inode *inode, const char *name,
-                           const void *value, size_t size,
-                           int flags, int type);
-extern ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size);
-extern int gfs2_ea_dealloc(struct gfs2_inode *ip);
+int __gfs2_xattr_set(struct inode *inode, const char *name,
+                    const void *value, size_t size,
+                    int flags, int type);
+ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size);
+int gfs2_ea_dealloc(struct gfs2_inode *ip);
 
 /* Exported to acl.c */
 
-extern int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **data);
+int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **data);
 
 #endif /* __EATTR_DOT_H__ */
index 54b3d489b6a7a52f7f876632dff55da6c5fc89c8..f757d4f7ad98a48542cb197fe8292ff91c868cd3 100644 (file)
@@ -1179,7 +1179,9 @@ static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
        struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
        struct hstate *h = hstate_inode(d_inode(dentry));
+       u64 id = huge_encode_dev(dentry->d_sb->s_dev);
 
+       buf->f_fsid = u64_to_fsid(id);
        buf->f_type = HUGETLBFS_MAGIC;
        buf->f_bsize = huge_page_size(h);
        if (sbinfo) {
index 7ea37f49f1e18e0c7737ac4132c8c4930b4d474f..f99591a634b4dd5b8450ff54bc70de2c66d4677d 100644 (file)
@@ -150,6 +150,7 @@ static struct dentry *jffs2_get_parent(struct dentry *child)
 }
 
 static const struct export_operations jffs2_export_ops = {
+       .encode_fh = generic_encode_ino32_fh,
        .get_parent = jffs2_get_parent,
        .fh_to_dentry = jffs2_fh_to_dentry,
        .fh_to_parent = jffs2_fh_to_parent,
index 966826c394ee4be419baccb4871a770e2d9e2248..8d8e556bd6104eca1ec55d7ea4da3bcfec0c967f 100644 (file)
@@ -896,6 +896,7 @@ static const struct super_operations jfs_super_operations = {
 };
 
 static const struct export_operations jfs_export_operations = {
+       .encode_fh      = generic_encode_ino32_fh,
        .fh_to_dentry   = jfs_fh_to_dentry,
        .fh_to_parent   = jfs_fh_to_parent,
        .get_parent     = jfs_get_parent,
index abe2b5a40ba1b69ab4a8ad65b76c907a28fdb693..e9440d55073c50962486f3f4f047a7872544fcff 100644 (file)
@@ -41,6 +41,9 @@ EXPORT_SYMBOL(simple_getattr);
 
 int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
+       u64 id = huge_encode_dev(dentry->d_sb->s_dev);
+
+       buf->f_fsid = u64_to_fsid(id);
        buf->f_type = dentry->d_sb->s_magic;
        buf->f_bsize = PAGE_SIZE;
        buf->f_namelen = NAME_MAX;
@@ -1309,6 +1312,47 @@ ssize_t simple_attr_write_signed(struct file *file, const char __user *buf,
 }
 EXPORT_SYMBOL_GPL(simple_attr_write_signed);
 
+/**
+ * generic_encode_ino32_fh - generic export_operations->encode_fh function
+ * @inode:   the object to encode
+ * @fh:      where to store the file handle fragment
+ * @max_len: maximum length to store there (in 4 byte units)
+ * @parent:  parent directory inode, if wanted
+ *
+ * This generic encode_fh function assumes that the 32 inode number
+ * is suitable for locating an inode, and that the generation number
+ * can be used to check that it is still valid.  It places them in the
+ * filehandle fragment where export_decode_fh expects to find them.
+ */
+int generic_encode_ino32_fh(struct inode *inode, __u32 *fh, int *max_len,
+                           struct inode *parent)
+{
+       struct fid *fid = (void *)fh;
+       int len = *max_len;
+       int type = FILEID_INO32_GEN;
+
+       if (parent && (len < 4)) {
+               *max_len = 4;
+               return FILEID_INVALID;
+       } else if (len < 2) {
+               *max_len = 2;
+               return FILEID_INVALID;
+       }
+
+       len = 2;
+       fid->i32.ino = inode->i_ino;
+       fid->i32.gen = inode->i_generation;
+       if (parent) {
+               fid->i32.parent_ino = parent->i_ino;
+               fid->i32.parent_gen = parent->i_generation;
+               len = 4;
+               type = FILEID_INO32_GEN_PARENT;
+       }
+       *max_len = len;
+       return type;
+}
+EXPORT_SYMBOL_GPL(generic_encode_ino32_fh);
+
 /**
  * generic_fh_to_dentry - generic helper for the fh_to_dentry export operation
  * @sb:                filesystem to do the file handle conversion on
index 4905665c47d0dcc1244790d4a9e89be48091d455..57d1dedf3f8fa72fe7cb32316ee72961559a82c6 100644 (file)
@@ -256,6 +256,7 @@ struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap)
 
        return idmap;
 }
+EXPORT_SYMBOL_GPL(mnt_idmap_get);
 
 /**
  * mnt_idmap_put - put a reference to an idmapping
@@ -271,3 +272,4 @@ void mnt_idmap_put(struct mnt_idmap *idmap)
                kfree(idmap);
        }
 }
+EXPORT_SYMBOL_GPL(mnt_idmap_put);
index 7df2503cef6c30060f66d17330c3270514b9c78d..01ac733a63203a459a994a0ec9df8d6006fcb875 100644 (file)
@@ -125,7 +125,7 @@ config PNFS_BLOCK
 
 config PNFS_FLEXFILE_LAYOUT
        tristate
-       depends on NFS_V4_1 && NFS_V3
+       depends on NFS_V4_1
        default NFS_V4
 
 config NFS_V4_1_IMPLEMENTATION_ID_DOMAIN
index cf7365581031b5c4442956a6643572580abc9abc..fa1a14def45cea2fc485b598831becc9e7497992 100644 (file)
@@ -448,6 +448,7 @@ int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
        delegation->cred = get_cred(cred);
        delegation->inode = inode;
        delegation->flags = 1<<NFS_DELEGATION_REFERENCED;
+       delegation->test_gen = 0;
        spin_lock_init(&delegation->lock);
 
        spin_lock(&clp->cl_lock);
@@ -1294,6 +1295,8 @@ static int nfs_server_reap_expired_delegations(struct nfs_server *server,
        struct inode *inode;
        const struct cred *cred;
        nfs4_stateid stateid;
+       unsigned long gen = ++server->delegation_gen;
+
 restart:
        rcu_read_lock();
 restart_locked:
@@ -1303,7 +1306,8 @@ restart_locked:
                    test_bit(NFS_DELEGATION_RETURNING,
                                        &delegation->flags) ||
                    test_bit(NFS_DELEGATION_TEST_EXPIRED,
-                                       &delegation->flags) == 0)
+                                       &delegation->flags) == 0 ||
+                       delegation->test_gen == gen)
                        continue;
                inode = nfs_delegation_grab_inode(delegation);
                if (inode == NULL)
@@ -1312,6 +1316,7 @@ restart_locked:
                cred = get_cred_rcu(delegation->cred);
                nfs4_stateid_copy(&stateid, &delegation->stateid);
                spin_unlock(&delegation->lock);
+               delegation->test_gen = gen;
                clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags);
                rcu_read_unlock();
                nfs_delegation_test_free_expired(inode, &stateid, cred);
index 1c378992b7c0fce3be940398f5ced5ac3e36c7c4..a6f495d012cf1156700813baec8b1c7f995c5723 100644 (file)
@@ -21,6 +21,7 @@ struct nfs_delegation {
        fmode_t type;
        unsigned long pagemod_limit;
        __u64 change_attr;
+       unsigned long test_gen;
        unsigned long flags;
        refcount_t refcount;
        spinlock_t lock;
index e6a51fd94fea876630c2ea8349256ba09ebadb4d..13dffe4201e6e98fd1d6f3bb649cc6baaa89e17a 100644 (file)
@@ -2532,7 +2532,7 @@ EXPORT_SYMBOL_GPL(nfs_unlink);
 int nfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
                struct dentry *dentry, const char *symname)
 {
-       struct page *page;
+       struct folio *folio;
        char *kaddr;
        struct iattr attr;
        unsigned int pathlen = strlen(symname);
@@ -2547,24 +2547,24 @@ int nfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
        attr.ia_mode = S_IFLNK | S_IRWXUGO;
        attr.ia_valid = ATTR_MODE;
 
-       page = alloc_page(GFP_USER);
-       if (!page)
+       folio = folio_alloc(GFP_USER, 0);
+       if (!folio)
                return -ENOMEM;
 
-       kaddr = page_address(page);
+       kaddr = folio_address(folio);
        memcpy(kaddr, symname, pathlen);
        if (pathlen < PAGE_SIZE)
                memset(kaddr + pathlen, 0, PAGE_SIZE - pathlen);
 
        trace_nfs_symlink_enter(dir, dentry);
-       error = NFS_PROTO(dir)->symlink(dir, dentry, page, pathlen, &attr);
+       error = NFS_PROTO(dir)->symlink(dir, dentry, folio, pathlen, &attr);
        trace_nfs_symlink_exit(dir, dentry, error);
        if (error != 0) {
                dfprintk(VFS, "NFS: symlink(%s/%lu, %pd, %s) error %d\n",
                        dir->i_sb->s_id, dir->i_ino,
                        dentry, symname, error);
                d_drop(dentry);
-               __free_page(page);
+               folio_put(folio);
                return error;
        }
 
@@ -2574,18 +2574,13 @@ int nfs_symlink(struct mnt_idmap *idmap, struct inode *dir,
         * No big deal if we can't add this page to the page cache here.
         * READLINK will get the missing page from the server if needed.
         */
-       if (!add_to_page_cache_lru(page, d_inode(dentry)->i_mapping, 0,
-                                                       GFP_KERNEL)) {
-               SetPageUptodate(page);
-               unlock_page(page);
-               /*
-                * add_to_page_cache_lru() grabs an extra page refcount.
-                * Drop it here to avoid leaking this page later.
-                */
-               put_page(page);
-       } else
-               __free_page(page);
+       if (filemap_add_folio(d_inode(dentry)->i_mapping, folio, 0,
+                                                       GFP_KERNEL) == 0) {
+               folio_mark_uptodate(folio);
+               folio_unlock(folio);
+       }
 
+       folio_put(folio);
        return 0;
 }
 EXPORT_SYMBOL_GPL(nfs_symlink);
index 4bf208a0a8e9948de4131e8e9e9fb6d21db3a5fc..2de66e4e8280a801b647dfe10c577e69192e236c 100644 (file)
@@ -543,9 +543,10 @@ out:
 }
 
 static int
-nfs3_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
+nfs3_proc_symlink(struct inode *dir, struct dentry *dentry, struct folio *folio,
                  unsigned int len, struct iattr *sattr)
 {
+       struct page *page = &folio->page;
        struct nfs3_createdata *data;
        struct dentry *d_alias;
        int status = -ENOMEM;
index 827d00e2f094dc7cf0e959ac75091b8012e324b0..581698f1b7b2441025d5421b3b00b4fba42b2ab8 100644 (file)
@@ -209,6 +209,7 @@ struct nfs4_exception {
        struct inode *inode;
        nfs4_stateid *stateid;
        long timeout;
+       unsigned short retrans;
        unsigned char task_is_privileged : 1;
        unsigned char delay : 1,
                      recovering : 1,
@@ -546,6 +547,7 @@ extern unsigned short max_session_slots;
 extern unsigned short max_session_cb_slots;
 extern unsigned short send_implementation_id;
 extern bool recover_lost_locks;
+extern short nfs_delay_retrans;
 
 #define NFS4_CLIENT_ID_UNIQ_LEN                (64)
 extern char nfs4_client_id_uniquifier[NFS4_CLIENT_ID_UNIQ_LEN];
index a654d7234f51d77c0466b65033b5223b712d6abe..8a943fffaad5619d02941f08d09ee9aca86e1cf2 100644 (file)
@@ -585,6 +585,21 @@ wait_on_recovery:
        return 0;
 }
 
+/*
+ * Track the number of NFS4ERR_DELAY related retransmissions and return
+ * EAGAIN if the 'softerr' mount option is set, and we've exceeded the limit
+ * set by 'nfs_delay_retrans'.
+ */
+static int nfs4_exception_should_retrans(const struct nfs_server *server,
+                                        struct nfs4_exception *exception)
+{
+       if (server->flags & NFS_MOUNT_SOFTERR && nfs_delay_retrans >= 0) {
+               if (exception->retrans++ >= (unsigned short)nfs_delay_retrans)
+                       return -EAGAIN;
+       }
+       return 0;
+}
+
 /* This is the error handling routine for processes that are allowed
  * to sleep.
  */
@@ -595,6 +610,11 @@ int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_
 
        ret = nfs4_do_handle_exception(server, errorcode, exception);
        if (exception->delay) {
+               int ret2 = nfs4_exception_should_retrans(server, exception);
+               if (ret2 < 0) {
+                       exception->retry = 0;
+                       return ret2;
+               }
                ret = nfs4_delay(&exception->timeout,
                                exception->interruptible);
                goto out_retry;
@@ -623,6 +643,11 @@ nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
 
        ret = nfs4_do_handle_exception(server, errorcode, exception);
        if (exception->delay) {
+               int ret2 = nfs4_exception_should_retrans(server, exception);
+               if (ret2 < 0) {
+                       exception->retry = 0;
+                       return ret2;
+               }
                rpc_delay(task, nfs4_update_delay(&exception->timeout));
                goto out_retry;
        }
@@ -5011,9 +5036,10 @@ static void nfs4_free_createdata(struct nfs4_createdata *data)
 }
 
 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
-               struct page *page, unsigned int len, struct iattr *sattr,
+               struct folio *folio, unsigned int len, struct iattr *sattr,
                struct nfs4_label *label)
 {
+       struct page *page = &folio->page;
        struct nfs4_createdata *data;
        int status = -ENAMETOOLONG;
 
@@ -5038,7 +5064,7 @@ out:
 }
 
 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
-               struct page *page, unsigned int len, struct iattr *sattr)
+               struct folio *folio, unsigned int len, struct iattr *sattr)
 {
        struct nfs4_exception exception = {
                .interruptible = true,
@@ -5049,7 +5075,7 @@ static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
        label = nfs4_label_init_security(dir, dentry, sattr, &l);
 
        do {
-               err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
+               err = _nfs4_proc_symlink(dir, dentry, folio, len, sattr, label);
                trace_nfs4_symlink(dir, &dentry->d_name, err);
                err = nfs4_handle_exception(NFS_SERVER(dir), err,
                                &exception);
@@ -5622,7 +5648,7 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
 
        msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
        nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
-       nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr);
+       nfs4_state_protect_write(hdr->ds_clp ? hdr->ds_clp : server->nfs_client, clnt, msg, hdr);
 }
 
 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
@@ -5663,7 +5689,8 @@ static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_mess
        data->res.server = server;
        msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
        nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1, 0);
-       nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
+       nfs4_state_protect(data->ds_clp ? data->ds_clp : server->nfs_client,
+                       NFS_SP4_MACH_CRED_COMMIT, clnt, msg);
 }
 
 static int _nfs4_proc_commit(struct file *dst, struct nfs_commitargs *args,
@@ -8934,6 +8961,7 @@ void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
 
        sp4_how = (adata->clp->cl_sp4_flags == 0 ? SP4_NONE : SP4_MACH_CRED);
 
+try_again:
        /* Test connection for session trunking. Async exchange_id call */
        task = nfs4_run_exchange_id(adata->clp, adata->cred, sp4_how, xprt);
        if (IS_ERR(task))
@@ -8946,11 +8974,15 @@ void nfs4_test_session_trunk(struct rpc_clnt *clnt, struct rpc_xprt *xprt,
 
        if (status == 0)
                rpc_clnt_xprt_switch_add_xprt(clnt, xprt);
-       else if (rpc_clnt_xprt_switch_has_addr(clnt,
+       else if (status != -NFS4ERR_DELAY && rpc_clnt_xprt_switch_has_addr(clnt,
                                (struct sockaddr *)&xprt->addr))
                rpc_clnt_xprt_switch_remove_xprt(clnt, xprt);
 
        rpc_put_task(task);
+       if (status == -NFS4ERR_DELAY) {
+               ssleep(1);
+               goto try_again;
+       }
 }
 EXPORT_SYMBOL_GPL(nfs4_test_session_trunk);
 
@@ -9621,6 +9653,9 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
 
        nfs4_sequence_free_slot(&lgp->res.seq_res);
 
+       exception->state = NULL;
+       exception->stateid = NULL;
+
        switch (nfs4err) {
        case 0:
                goto out;
@@ -9716,7 +9751,8 @@ static const struct rpc_call_ops nfs4_layoutget_call_ops = {
 };
 
 struct pnfs_layout_segment *
-nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
+nfs4_proc_layoutget(struct nfs4_layoutget *lgp,
+                   struct nfs4_exception *exception)
 {
        struct inode *inode = lgp->args.inode;
        struct nfs_server *server = NFS_SERVER(inode);
@@ -9736,13 +9772,10 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
                         RPC_TASK_MOVEABLE,
        };
        struct pnfs_layout_segment *lseg = NULL;
-       struct nfs4_exception exception = {
-               .inode = inode,
-               .timeout = *timeout,
-       };
        int status = 0;
 
        nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0, 0);
+       exception->retry = 0;
 
        task = rpc_run_task(&task_setup_data);
        if (IS_ERR(task))
@@ -9753,11 +9786,12 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
                goto out;
 
        if (task->tk_status < 0) {
-               status = nfs4_layoutget_handle_exception(task, lgp, &exception);
-               *timeout = exception.timeout;
+               exception->retry = 1;
+               status = nfs4_layoutget_handle_exception(task, lgp, exception);
        } else if (lgp->res.layoutp->len == 0) {
+               exception->retry = 1;
                status = -EAGAIN;
-               *timeout = nfs4_update_delay(&exception.timeout);
+               nfs4_update_delay(&exception->timeout);
        } else
                lseg = pnfs_layout_process(lgp);
 out:
index 84343aefbbd64cdf241fcf4e677ba50e991d9109..21a365357629c7531aeb8ec128cba116d41dc7dd 100644 (file)
@@ -1980,7 +1980,9 @@ pnfs_update_layout(struct inode *ino,
        struct pnfs_layout_segment *lseg = NULL;
        struct nfs4_layoutget *lgp;
        nfs4_stateid stateid;
-       long timeout = 0;
+       struct nfs4_exception exception = {
+               .inode = ino,
+       };
        unsigned long giveup = jiffies + (clp->cl_lease_time << 1);
        bool first;
 
@@ -2144,7 +2146,7 @@ lookup_again:
        lgp->lo = lo;
        pnfs_get_layout_hdr(lo);
 
-       lseg = nfs4_proc_layoutget(lgp, &timeout);
+       lseg = nfs4_proc_layoutget(lgp, &exception);
        trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
                                 PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
        nfs_layoutget_end(lo);
@@ -2171,6 +2173,8 @@ lookup_again:
                        goto out_put_layout_hdr;
                }
                if (lseg) {
+                       if (!exception.retry)
+                               goto out_put_layout_hdr;
                        if (first)
                                pnfs_clear_first_layoutget(lo);
                        trace_pnfs_update_layout(ino, pos, count,
index d886c8226d8fecac631cf85dcfad283a4c5681e3..db57a85500ee70f07347f8de647ddca341f4b1ac 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/nfs_page.h>
 #include <linux/workqueue.h>
 
+struct nfs4_exception;
 struct nfs4_opendata;
 
 enum {
@@ -245,7 +246,9 @@ extern size_t max_response_pages(struct nfs_server *server);
 extern int nfs4_proc_getdeviceinfo(struct nfs_server *server,
                                   struct pnfs_device *dev,
                                   const struct cred *cred);
-extern struct pnfs_layout_segment* nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout);
+extern struct pnfs_layout_segment *
+nfs4_proc_layoutget(struct nfs4_layoutget *lgp,
+                   struct nfs4_exception *exception);
 extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync);
 
 /* pnfs.c */
index e3570c656b0f9b27cd4caa08c4ab67350ae13cd3..ad3a321ae997f593184f7d970f95778e83a636e3 100644 (file)
@@ -396,9 +396,10 @@ nfs_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
 }
 
 static int
-nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct page *page,
+nfs_proc_symlink(struct inode *dir, struct dentry *dentry, struct folio *folio,
                 unsigned int len, struct iattr *sattr)
 {
+       struct page *page = &folio->page;
        struct nfs_fh *fh;
        struct nfs_fattr *fattr;
        struct nfs_symlinkargs  arg = {
index 2667ab753d42747e0b20cd2cdfcbe0710884afc0..075b31c93f87d0c8a4aa9699187e94bcdba0aeab 100644 (file)
@@ -1371,6 +1371,7 @@ unsigned short max_session_cb_slots = NFS4_DEF_CB_SLOT_TABLE_SIZE;
 unsigned short send_implementation_id = 1;
 char nfs4_client_id_uniquifier[NFS4_CLIENT_ID_UNIQ_LEN] = "";
 bool recover_lost_locks = false;
+short nfs_delay_retrans = -1;
 
 EXPORT_SYMBOL_GPL(nfs_callback_nr_threads);
 EXPORT_SYMBOL_GPL(nfs_callback_set_tcpport);
@@ -1381,6 +1382,7 @@ EXPORT_SYMBOL_GPL(max_session_cb_slots);
 EXPORT_SYMBOL_GPL(send_implementation_id);
 EXPORT_SYMBOL_GPL(nfs4_client_id_uniquifier);
 EXPORT_SYMBOL_GPL(recover_lost_locks);
+EXPORT_SYMBOL_GPL(nfs_delay_retrans);
 
 #define NFS_CALLBACK_MAXPORTNR (65535U)
 
@@ -1429,5 +1431,9 @@ MODULE_PARM_DESC(recover_lost_locks,
                 "If the server reports that a lock might be lost, "
                 "try to recover it risking data corruption.");
 
-
+module_param_named(delay_retrans, nfs_delay_retrans, short, 0644);
+MODULE_PARM_DESC(delay_retrans,
+                "Unless negative, specifies the number of times the NFSv4 "
+                "client retries a request before returning an EAGAIN error, "
+                "after a reply of NFS4ERR_DELAY from the server.");
 #endif /* CONFIG_NFS_V4 */
index 9d82d50ce0b12dc7063f021d4380582633ed419f..b664caea8b4e6704bc0b46e8388487999697b80b 100644 (file)
@@ -739,6 +739,8 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
                                        &pgio);
                pgio.pg_error = 0;
                nfs_pageio_complete(&pgio);
+               if (err == -EAGAIN && mntflags & NFS_MOUNT_SOFTERR)
+                       break;
        } while (err < 0 && !nfs_error_is_fatal(err));
        nfs_io_completion_put(ioc);
 
index b7da17e530077e7c4cd0f530d17db53046ceff0a..7b641095a665fde11850eb670831ce89ceead18c 100644 (file)
@@ -426,8 +426,7 @@ static int check_export(struct path *path, int *flags, unsigned char *uuid)
                return -EINVAL;
        }
 
-       if (!inode->i_sb->s_export_op ||
-           !inode->i_sb->s_export_op->fh_to_dentry) {
+       if (!exportfs_can_decode_fh(inode->i_sb->s_export_op)) {
                dprintk("exp_export: export of invalid fs type.\n");
                return -EINVAL;
        }
index 45aecdc302f4d9a41fee3cc364a553f2aeb59a3f..4d765c72496f0a59bbf06dd44e455ce84433907d 100644 (file)
@@ -1595,7 +1595,7 @@ static int fanotify_test_fid(struct dentry *dentry, unsigned int flags)
         * file handles so user can use name_to_handle_at() to compare fids
         * reported with events to the file handle of watched objects.
         */
-       if (!nop)
+       if (!exportfs_can_encode_fid(nop))
                return -EOPNOTSUPP;
 
        /*
@@ -1603,7 +1603,7 @@ static int fanotify_test_fid(struct dentry *dentry, unsigned int flags)
         * supports decoding file handles, so user has a way to map back the
         * reported fids to filesystem objects.
         */
-       if (mark_type != FAN_MARK_INODE && !nop->fh_to_dentry)
+       if (mark_type != FAN_MARK_INODE && !exportfs_can_decode_fh(nop))
                return -EOPNOTSUPP;
 
        return 0;
index ab44f2db533be099d0a13485c8c2adeedae9f82c..d7498ddc4a7214c60a1526380c17035694979006 100644 (file)
@@ -384,6 +384,7 @@ static struct dentry *ntfs_fh_to_parent(struct super_block *sb, struct fid *fid,
  * and due to using iget() whereas NTFS needs ntfs_iget().
  */
 const struct export_operations ntfs_export_ops = {
+       .encode_fh      = generic_encode_ino32_fh,
        .get_parent     = ntfs_get_parent,      /* Find the parent of a given
                                                   directory. */
        .fh_to_dentry   = ntfs_fh_to_dentry,
index f763e3256ccc1b6bf8b49acd783030ec4f59eef6..9153dffde950c2a396291bea88e3e6d31169f568 100644 (file)
@@ -811,6 +811,7 @@ static int ntfs_nfs_commit_metadata(struct inode *inode)
 }
 
 static const struct export_operations ntfs_export_ops = {
+       .encode_fh = generic_encode_ino32_fh,
        .fh_to_dentry = ntfs_fh_to_dentry,
        .fh_to_parent = ntfs_fh_to_parent,
        .get_parent = ntfs3_get_parent,
index 4e173d56b11fae4d1b6859a3ac6d74c980bae10f..5648954f8588b83e47b3ed8fa4ea2d990e663a9e 100644 (file)
@@ -6,4 +6,4 @@
 obj-$(CONFIG_OVERLAY_FS) += overlay.o
 
 overlay-objs := super.o namei.o util.o inode.o file.o dir.o readdir.o \
-               copy_up.o export.o params.o
+               copy_up.o export.o params.o xattrs.o
index ada3fcc9c6d5015ac65929ecf6738720b51615d6..4382881b070948cc21425c330153db7eebd065a0 100644 (file)
@@ -252,7 +252,9 @@ static int ovl_copy_up_file(struct ovl_fs *ofs, struct dentry *dentry,
                return PTR_ERR(old_file);
 
        /* Try to use clone_file_range to clone up within the same fs */
+       ovl_start_write(dentry);
        cloned = do_clone_file_range(old_file, 0, new_file, 0, len, 0);
+       ovl_end_write(dentry);
        if (cloned == len)
                goto out_fput;
        /* Couldn't clone, so now we try to copy the data */
@@ -287,8 +289,12 @@ static int ovl_copy_up_file(struct ovl_fs *ofs, struct dentry *dentry,
                 * it may not recognize all kind of holes and sometimes
                 * only skips partial of hole area. However, it will be
                 * enough for most of the use cases.
+                *
+                * We do not hold upper sb_writers throughout the loop to avert
+                * lockdep warning with llseek of lower file in nested overlay:
+                * - upper sb_writers
+                * -- lower ovl_inode_lock (ovl_llseek)
                 */
-
                if (skip_hole && data_pos < old_pos) {
                        data_pos = vfs_llseek(old_file, old_pos, SEEK_DATA);
                        if (data_pos > old_pos) {
@@ -303,9 +309,11 @@ static int ovl_copy_up_file(struct ovl_fs *ofs, struct dentry *dentry,
                        }
                }
 
+               ovl_start_write(dentry);
                bytes = do_splice_direct(old_file, &old_pos,
                                         new_file, &new_pos,
                                         this_len, SPLICE_F_MOVE);
+               ovl_end_write(dentry);
                if (bytes <= 0) {
                        error = bytes;
                        break;
@@ -426,29 +434,29 @@ out_err:
        return ERR_PTR(err);
 }
 
-int ovl_set_origin(struct ovl_fs *ofs, struct dentry *lower,
-                  struct dentry *upper)
+struct ovl_fh *ovl_get_origin_fh(struct ovl_fs *ofs, struct dentry *origin)
 {
-       const struct ovl_fh *fh = NULL;
-       int err;
-
        /*
         * When lower layer doesn't support export operations store a 'null' fh,
         * so we can use the overlay.origin xattr to distignuish between a copy
         * up and a pure upper inode.
         */
-       if (ovl_can_decode_fh(lower->d_sb)) {
-               fh = ovl_encode_real_fh(ofs, lower, false);
-               if (IS_ERR(fh))
-                       return PTR_ERR(fh);
-       }
+       if (!ovl_can_decode_fh(origin->d_sb))
+               return NULL;
+
+       return ovl_encode_real_fh(ofs, origin, false);
+}
+
+int ovl_set_origin_fh(struct ovl_fs *ofs, const struct ovl_fh *fh,
+                     struct dentry *upper)
+{
+       int err;
 
        /*
         * Do not fail when upper doesn't support xattrs.
         */
        err = ovl_check_setxattr(ofs, upper, OVL_XATTR_ORIGIN, fh->buf,
                                 fh ? fh->fb.len : 0, 0);
-       kfree(fh);
 
        /* Ignore -EPERM from setting "user.*" on symlink/special */
        return err == -EPERM ? 0 : err;
@@ -476,7 +484,7 @@ static int ovl_set_upper_fh(struct ovl_fs *ofs, struct dentry *upper,
  *
  * Caller must hold i_mutex on indexdir.
  */
-static int ovl_create_index(struct dentry *dentry, struct dentry *origin,
+static int ovl_create_index(struct dentry *dentry, const struct ovl_fh *fh,
                            struct dentry *upper)
 {
        struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
@@ -502,7 +510,7 @@ static int ovl_create_index(struct dentry *dentry, struct dentry *origin,
        if (WARN_ON(ovl_test_flag(OVL_INDEX, d_inode(dentry))))
                return -EIO;
 
-       err = ovl_get_index_name(ofs, origin, &name);
+       err = ovl_get_index_name_fh(fh, &name);
        if (err)
                return err;
 
@@ -541,6 +549,7 @@ struct ovl_copy_up_ctx {
        struct dentry *destdir;
        struct qstr destname;
        struct dentry *workdir;
+       const struct ovl_fh *origin_fh;
        bool origin;
        bool indexed;
        bool metacopy;
@@ -555,14 +564,16 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
        struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
        struct inode *udir = d_inode(upperdir);
 
+       ovl_start_write(c->dentry);
+
        /* Mark parent "impure" because it may now contain non-pure upper */
        err = ovl_set_impure(c->parent, upperdir);
        if (err)
-               return err;
+               goto out;
 
        err = ovl_set_nlink_lower(c->dentry);
        if (err)
-               return err;
+               goto out;
 
        inode_lock_nested(udir, I_MUTEX_PARENT);
        upper = ovl_lookup_upper(ofs, c->dentry->d_name.name, upperdir,
@@ -581,10 +592,12 @@ static int ovl_link_up(struct ovl_copy_up_ctx *c)
        }
        inode_unlock(udir);
        if (err)
-               return err;
+               goto out;
 
        err = ovl_set_nlink_upper(c->dentry);
 
+out:
+       ovl_end_write(c->dentry);
        return err;
 }
 
@@ -637,7 +650,7 @@ static int ovl_copy_up_metadata(struct ovl_copy_up_ctx *c, struct dentry *temp)
         * hard link.
         */
        if (c->origin) {
-               err = ovl_set_origin(ofs, c->lowerpath.dentry, temp);
+               err = ovl_set_origin_fh(ofs, c->origin_fh, temp);
                if (err)
                        return err;
        }
@@ -719,21 +732,19 @@ static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c)
                .link = c->link
        };
 
-       /* workdir and destdir could be the same when copying up to indexdir */
-       err = -EIO;
-       if (lock_rename(c->workdir, c->destdir) != NULL)
-               goto unlock;
-
        err = ovl_prep_cu_creds(c->dentry, &cc);
        if (err)
-               goto unlock;
+               return err;
 
+       ovl_start_write(c->dentry);
+       inode_lock(wdir);
        temp = ovl_create_temp(ofs, c->workdir, &cattr);
+       inode_unlock(wdir);
+       ovl_end_write(c->dentry);
        ovl_revert_cu_creds(&cc);
 
-       err = PTR_ERR(temp);
        if (IS_ERR(temp))
-               goto unlock;
+               return PTR_ERR(temp);
 
        /*
         * Copy up data first and then xattrs. Writing data after
@@ -741,15 +752,28 @@ static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c)
         */
        path.dentry = temp;
        err = ovl_copy_up_data(c, &path);
-       if (err)
+       /*
+        * We cannot hold lock_rename() throughout this helper, because or
+        * lock ordering with sb_writers, which shouldn't be held when calling
+        * ovl_copy_up_data(), so lock workdir and destdir and make sure that
+        * temp wasn't moved before copy up completion or cleanup.
+        * If temp was moved, abort without the cleanup.
+        */
+       ovl_start_write(c->dentry);
+       if (lock_rename(c->workdir, c->destdir) != NULL ||
+           temp->d_parent != c->workdir) {
+               err = -EIO;
+               goto unlock;
+       } else if (err) {
                goto cleanup;
+       }
 
        err = ovl_copy_up_metadata(c, temp);
        if (err)
                goto cleanup;
 
        if (S_ISDIR(c->stat.mode) && c->indexed) {
-               err = ovl_create_index(c->dentry, c->lowerpath.dentry, temp);
+               err = ovl_create_index(c->dentry, c->origin_fh, temp);
                if (err)
                        goto cleanup;
        }
@@ -779,6 +803,7 @@ static int ovl_copy_up_workdir(struct ovl_copy_up_ctx *c)
                ovl_set_flag(OVL_WHITEOUTS, inode);
 unlock:
        unlock_rename(c->workdir, c->destdir);
+       ovl_end_write(c->dentry);
 
        return err;
 
@@ -802,9 +827,10 @@ static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
        if (err)
                return err;
 
+       ovl_start_write(c->dentry);
        tmpfile = ovl_do_tmpfile(ofs, c->workdir, c->stat.mode);
+       ovl_end_write(c->dentry);
        ovl_revert_cu_creds(&cc);
-
        if (IS_ERR(tmpfile))
                return PTR_ERR(tmpfile);
 
@@ -815,9 +841,11 @@ static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
                        goto out_fput;
        }
 
+       ovl_start_write(c->dentry);
+
        err = ovl_copy_up_metadata(c, temp);
        if (err)
-               goto out_fput;
+               goto out;
 
        inode_lock_nested(udir, I_MUTEX_PARENT);
 
@@ -831,7 +859,7 @@ static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
        inode_unlock(udir);
 
        if (err)
-               goto out_fput;
+               goto out;
 
        if (c->metacopy_digest)
                ovl_set_flag(OVL_HAS_DIGEST, d_inode(c->dentry));
@@ -843,6 +871,8 @@ static int ovl_copy_up_tmpfile(struct ovl_copy_up_ctx *c)
                ovl_set_upperdata(d_inode(c->dentry));
        ovl_inode_update(d_inode(c->dentry), dget(temp));
 
+out:
+       ovl_end_write(c->dentry);
 out_fput:
        fput(tmpfile);
        return err;
@@ -861,6 +891,8 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
 {
        int err;
        struct ovl_fs *ofs = OVL_FS(c->dentry->d_sb);
+       struct dentry *origin = c->lowerpath.dentry;
+       struct ovl_fh *fh = NULL;
        bool to_index = false;
 
        /*
@@ -877,25 +909,35 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
                        to_index = true;
        }
 
-       if (S_ISDIR(c->stat.mode) || c->stat.nlink == 1 || to_index)
+       if (S_ISDIR(c->stat.mode) || c->stat.nlink == 1 || to_index) {
+               fh = ovl_get_origin_fh(ofs, origin);
+               if (IS_ERR(fh))
+                       return PTR_ERR(fh);
+
+               /* origin_fh may be NULL */
+               c->origin_fh = fh;
                c->origin = true;
+       }
 
        if (to_index) {
                c->destdir = ovl_indexdir(c->dentry->d_sb);
-               err = ovl_get_index_name(ofs, c->lowerpath.dentry, &c->destname);
+               err = ovl_get_index_name(ofs, origin, &c->destname);
                if (err)
-                       return err;
+                       goto out_free_fh;
        } else if (WARN_ON(!c->parent)) {
                /* Disconnected dentry must be copied up to index dir */
-               return -EIO;
+               err = -EIO;
+               goto out_free_fh;
        } else {
                /*
                 * Mark parent "impure" because it may now contain non-pure
                 * upper
                 */
+               ovl_start_write(c->dentry);
                err = ovl_set_impure(c->parent, c->destdir);
+               ovl_end_write(c->dentry);
                if (err)
-                       return err;
+                       goto out_free_fh;
        }
 
        /* Should we copyup with O_TMPFILE or with workdir? */
@@ -909,6 +951,7 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
        if (c->indexed)
                ovl_set_flag(OVL_INDEX, d_inode(c->dentry));
 
+       ovl_start_write(c->dentry);
        if (to_index) {
                /* Initialize nlink for copy up of disconnected dentry */
                err = ovl_set_nlink_upper(c->dentry);
@@ -923,10 +966,13 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c)
                ovl_dentry_set_upper_alias(c->dentry);
                ovl_dentry_update_reval(c->dentry, ovl_dentry_upper(c->dentry));
        }
+       ovl_end_write(c->dentry);
 
 out:
        if (to_index)
                kfree(c->destname.name);
+out_free_fh:
+       kfree(fh);
        return err;
 }
 
@@ -1011,15 +1057,16 @@ static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c)
         * Writing to upper file will clear security.capability xattr. We
         * don't want that to happen for normal copy-up operation.
         */
+       ovl_start_write(c->dentry);
        if (capability) {
                err = ovl_do_setxattr(ofs, upperpath.dentry, XATTR_NAME_CAPS,
                                      capability, cap_size, 0);
-               if (err)
-                       goto out_free;
        }
-
-
-       err = ovl_removexattr(ofs, upperpath.dentry, OVL_XATTR_METACOPY);
+       if (!err) {
+               err = ovl_removexattr(ofs, upperpath.dentry,
+                                     OVL_XATTR_METACOPY);
+       }
+       ovl_end_write(c->dentry);
        if (err)
                goto out_free;
 
@@ -1170,17 +1217,10 @@ static bool ovl_open_need_copy_up(struct dentry *dentry, int flags)
 
 int ovl_maybe_copy_up(struct dentry *dentry, int flags)
 {
-       int err = 0;
-
-       if (ovl_open_need_copy_up(dentry, flags)) {
-               err = ovl_want_write(dentry);
-               if (!err) {
-                       err = ovl_copy_up_flags(dentry, flags);
-                       ovl_drop_write(dentry);
-               }
-       }
+       if (!ovl_open_need_copy_up(dentry, flags))
+               return 0;
 
-       return err;
+       return ovl_copy_up_flags(dentry, flags);
 }
 
 int ovl_copy_up_with_data(struct dentry *dentry)
index 033fc0458a3d824b3ba6bb6079ad103c45fa4689..aab3f5d93556f90575aec60bf99fae042e92eea1 100644 (file)
@@ -477,7 +477,7 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
                goto out_unlock;
 
        err = -ESTALE;
-       if (d_is_negative(upper) || !IS_WHITEOUT(d_inode(upper)))
+       if (d_is_negative(upper) || !ovl_upper_is_whiteout(ofs, upper))
                goto out_dput;
 
        newdentry = ovl_create_temp(ofs, workdir, cattr);
@@ -559,10 +559,6 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode,
        struct cred *override_cred;
        struct dentry *parent = dentry->d_parent;
 
-       err = ovl_copy_up(parent);
-       if (err)
-               return err;
-
        old_cred = ovl_override_creds(dentry->d_sb);
 
        /*
@@ -626,6 +622,10 @@ static int ovl_create_object(struct dentry *dentry, int mode, dev_t rdev,
                .link = link,
        };
 
+       err = ovl_copy_up(dentry->d_parent);
+       if (err)
+               return err;
+
        err = ovl_want_write(dentry);
        if (err)
                goto out;
@@ -700,28 +700,24 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
        int err;
        struct inode *inode;
 
-       err = ovl_want_write(old);
+       err = ovl_copy_up(old);
        if (err)
                goto out;
 
-       err = ovl_copy_up(old);
+       err = ovl_copy_up(new->d_parent);
        if (err)
-               goto out_drop_write;
+               goto out;
 
-       err = ovl_copy_up(new->d_parent);
+       err = ovl_nlink_start(old);
        if (err)
-               goto out_drop_write;
+               goto out;
 
        if (ovl_is_metacopy_dentry(old)) {
                err = ovl_set_link_redirect(old);
                if (err)
-                       goto out_drop_write;
+                       goto out_nlink_end;
        }
 
-       err = ovl_nlink_start(old);
-       if (err)
-               goto out_drop_write;
-
        inode = d_inode(old);
        ihold(inode);
 
@@ -731,9 +727,8 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
        if (err)
                iput(inode);
 
+out_nlink_end:
        ovl_nlink_end(old);
-out_drop_write:
-       ovl_drop_write(old);
 out:
        return err;
 }
@@ -891,17 +886,13 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
                        goto out;
        }
 
-       err = ovl_want_write(dentry);
-       if (err)
-               goto out;
-
        err = ovl_copy_up(dentry->d_parent);
        if (err)
-               goto out_drop_write;
+               goto out;
 
        err = ovl_nlink_start(dentry);
        if (err)
-               goto out_drop_write;
+               goto out;
 
        old_cred = ovl_override_creds(dentry->d_sb);
        if (!lower_positive)
@@ -926,8 +917,6 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
        if (ovl_dentry_upper(dentry))
                ovl_copyattr(d_inode(dentry));
 
-out_drop_write:
-       ovl_drop_write(dentry);
 out:
        ovl_cache_free(&list);
        return err;
@@ -1131,29 +1120,32 @@ static int ovl_rename(struct mnt_idmap *idmap, struct inode *olddir,
                }
        }
 
-       err = ovl_want_write(old);
-       if (err)
-               goto out;
-
        err = ovl_copy_up(old);
        if (err)
-               goto out_drop_write;
+               goto out;
 
        err = ovl_copy_up(new->d_parent);
        if (err)
-               goto out_drop_write;
+               goto out;
        if (!overwrite) {
                err = ovl_copy_up(new);
                if (err)
-                       goto out_drop_write;
+                       goto out;
        } else if (d_inode(new)) {
                err = ovl_nlink_start(new);
                if (err)
-                       goto out_drop_write;
+                       goto out;
 
                update_nlink = true;
        }
 
+       if (!update_nlink) {
+               /* ovl_nlink_start() took ovl_want_write() */
+               err = ovl_want_write(old);
+               if (err)
+                       goto out;
+       }
+
        old_cred = ovl_override_creds(old->d_sb);
 
        if (!list_empty(&list)) {
@@ -1219,7 +1211,7 @@ static int ovl_rename(struct mnt_idmap *idmap, struct inode *olddir,
                }
        } else {
                if (!d_is_negative(newdentry)) {
-                       if (!new_opaque || !ovl_is_whiteout(newdentry))
+                       if (!new_opaque || !ovl_upper_is_whiteout(ofs, newdentry))
                                goto out_dput;
                } else {
                        if (flags & RENAME_EXCHANGE)
@@ -1286,8 +1278,8 @@ out_revert_creds:
        revert_creds(old_cred);
        if (update_nlink)
                ovl_nlink_end(new);
-out_drop_write:
-       ovl_drop_write(old);
+       else
+               ovl_drop_write(old);
 out:
        dput(opaquedir);
        ovl_cache_free(&list);
index 26b782c53910b538cb26d770fdb6b9416d0f19dd..7e16bbcad95e631921b5fe5fe97d220dc10fa08a 100644 (file)
@@ -23,12 +23,7 @@ static int ovl_encode_maybe_copy_up(struct dentry *dentry)
        if (ovl_dentry_upper(dentry))
                return 0;
 
-       err = ovl_want_write(dentry);
-       if (!err) {
-               err = ovl_copy_up(dentry);
-               ovl_drop_write(dentry);
-       }
-
+       err = ovl_copy_up(dentry);
        if (err) {
                pr_warn_ratelimited("failed to copy up on encode (%pd2, err=%i)\n",
                                    dentry, err);
index ec3671ca140c3f5af926fd74268d2a3a653f7574..131621daeb134ac3f9b78ea592b4639d5f29a3f3 100644 (file)
 #include <linux/fs.h>
 #include "overlayfs.h"
 
+#include "../internal.h"       /* for sb_init_dio_done_wq */
+
 struct ovl_aio_req {
        struct kiocb iocb;
        refcount_t ref;
        struct kiocb *orig_iocb;
+       /* used for aio completion */
+       struct work_struct work;
+       long res;
 };
 
 static struct kmem_cache *ovl_aio_request_cachep;
@@ -235,6 +240,12 @@ static loff_t ovl_llseek(struct file *file, loff_t offset, int whence)
        return ret;
 }
 
+static void ovl_file_modified(struct file *file)
+{
+       /* Update size/mtime */
+       ovl_copyattr(file_inode(file));
+}
+
 static void ovl_file_accessed(struct file *file)
 {
        struct inode *inode, *upperinode;
@@ -263,20 +274,12 @@ static void ovl_file_accessed(struct file *file)
        touch_atime(&file->f_path);
 }
 
-static rwf_t ovl_iocb_to_rwf(int ifl)
+#define OVL_IOCB_MASK \
+       (IOCB_NOWAIT | IOCB_HIPRI | IOCB_DSYNC | IOCB_SYNC | IOCB_APPEND)
+
+static rwf_t iocb_to_rw_flags(int flags)
 {
-       rwf_t flags = 0;
-
-       if (ifl & IOCB_NOWAIT)
-               flags |= RWF_NOWAIT;
-       if (ifl & IOCB_HIPRI)
-               flags |= RWF_HIPRI;
-       if (ifl & IOCB_DSYNC)
-               flags |= RWF_DSYNC;
-       if (ifl & IOCB_SYNC)
-               flags |= RWF_SYNC;
-
-       return flags;
+       return (__force rwf_t)(flags & OVL_IOCB_MASK);
 }
 
 static inline void ovl_aio_put(struct ovl_aio_req *aio_req)
@@ -293,10 +296,8 @@ static void ovl_aio_cleanup_handler(struct ovl_aio_req *aio_req)
        struct kiocb *orig_iocb = aio_req->orig_iocb;
 
        if (iocb->ki_flags & IOCB_WRITE) {
-               struct inode *inode = file_inode(orig_iocb->ki_filp);
-
                kiocb_end_write(iocb);
-               ovl_copyattr(inode);
+               ovl_file_modified(orig_iocb->ki_filp);
        }
 
        orig_iocb->ki_pos = iocb->ki_pos;
@@ -313,6 +314,37 @@ static void ovl_aio_rw_complete(struct kiocb *iocb, long res)
        orig_iocb->ki_complete(orig_iocb, res);
 }
 
+static void ovl_aio_complete_work(struct work_struct *work)
+{
+       struct ovl_aio_req *aio_req = container_of(work,
+                                                  struct ovl_aio_req, work);
+
+       ovl_aio_rw_complete(&aio_req->iocb, aio_req->res);
+}
+
+static void ovl_aio_queue_completion(struct kiocb *iocb, long res)
+{
+       struct ovl_aio_req *aio_req = container_of(iocb,
+                                                  struct ovl_aio_req, iocb);
+       struct kiocb *orig_iocb = aio_req->orig_iocb;
+
+       /*
+        * Punt to a work queue to serialize updates of mtime/size.
+        */
+       aio_req->res = res;
+       INIT_WORK(&aio_req->work, ovl_aio_complete_work);
+       queue_work(file_inode(orig_iocb->ki_filp)->i_sb->s_dio_done_wq,
+                  &aio_req->work);
+}
+
+static int ovl_init_aio_done_wq(struct super_block *sb)
+{
+       if (sb->s_dio_done_wq)
+               return 0;
+
+       return sb_init_dio_done_wq(sb);
+}
+
 static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 {
        struct file *file = iocb->ki_filp;
@@ -334,8 +366,9 @@ static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 
        old_cred = ovl_override_creds(file_inode(file)->i_sb);
        if (is_sync_kiocb(iocb)) {
-               ret = vfs_iter_read(real.file, iter, &iocb->ki_pos,
-                                   ovl_iocb_to_rwf(iocb->ki_flags));
+               rwf_t rwf = iocb_to_rw_flags(iocb->ki_flags);
+
+               ret = vfs_iter_read(real.file, iter, &iocb->ki_pos, rwf);
        } else {
                struct ovl_aio_req *aio_req;
 
@@ -401,15 +434,20 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
 
        old_cred = ovl_override_creds(file_inode(file)->i_sb);
        if (is_sync_kiocb(iocb)) {
+               rwf_t rwf = iocb_to_rw_flags(ifl);
+
                file_start_write(real.file);
-               ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
-                                    ovl_iocb_to_rwf(ifl));
+               ret = vfs_iter_write(real.file, iter, &iocb->ki_pos, rwf);
                file_end_write(real.file);
                /* Update size */
-               ovl_copyattr(inode);
+               ovl_file_modified(file);
        } else {
                struct ovl_aio_req *aio_req;
 
+               ret = ovl_init_aio_done_wq(inode->i_sb);
+               if (ret)
+                       goto out;
+
                ret = -ENOMEM;
                aio_req = kmem_cache_zalloc(ovl_aio_request_cachep, GFP_KERNEL);
                if (!aio_req)
@@ -418,7 +456,7 @@ static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter)
                aio_req->orig_iocb = iocb;
                kiocb_clone(&aio_req->iocb, iocb, get_file(real.file));
                aio_req->iocb.ki_flags = ifl;
-               aio_req->iocb.ki_complete = ovl_aio_rw_complete;
+               aio_req->iocb.ki_complete = ovl_aio_queue_completion;
                refcount_set(&aio_req->ref, 2);
                kiocb_start_write(&aio_req->iocb);
                ret = vfs_iocb_iter_write(real.file, &aio_req->iocb, iter);
@@ -492,7 +530,7 @@ static ssize_t ovl_splice_write(struct pipe_inode_info *pipe, struct file *out,
 
        file_end_write(real.file);
        /* Update size */
-       ovl_copyattr(inode);
+       ovl_file_modified(out);
        revert_creds(old_cred);
        fdput(real);
 
@@ -573,7 +611,7 @@ static long ovl_fallocate(struct file *file, int mode, loff_t offset, loff_t len
        revert_creds(old_cred);
 
        /* Update size */
-       ovl_copyattr(inode);
+       ovl_file_modified(file);
 
        fdput(real);
 
@@ -657,7 +695,7 @@ static loff_t ovl_copyfile(struct file *file_in, loff_t pos_in,
        revert_creds(old_cred);
 
        /* Update size */
-       ovl_copyattr(inode_out);
+       ovl_file_modified(file_out);
 
        fdput(real_in);
        fdput(real_out);
index b6e98a7d36ce8f96e27b21a5c34ea06e42dfb226..345b8f161ca4c3da0b81fb397e9ef3dc21dde0ea 100644 (file)
@@ -32,10 +32,6 @@ int ovl_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
        if (err)
                return err;
 
-       err = ovl_want_write(dentry);
-       if (err)
-               goto out;
-
        if (attr->ia_valid & ATTR_SIZE) {
                /* Truncate should trigger data copy up as well */
                full_copy_up = true;
@@ -54,7 +50,7 @@ int ovl_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
                        winode = d_inode(upperdentry);
                        err = get_write_access(winode);
                        if (err)
-                               goto out_drop_write;
+                               goto out;
                }
 
                if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
@@ -78,6 +74,10 @@ int ovl_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
                 */
                attr->ia_valid &= ~ATTR_OPEN;
 
+               err = ovl_want_write(dentry);
+               if (err)
+                       goto out_put_write;
+
                inode_lock(upperdentry->d_inode);
                old_cred = ovl_override_creds(dentry->d_sb);
                err = ovl_do_notify_change(ofs, upperdentry, attr);
@@ -85,12 +85,12 @@ int ovl_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
                if (!err)
                        ovl_copyattr(dentry->d_inode);
                inode_unlock(upperdentry->d_inode);
+               ovl_drop_write(dentry);
 
+out_put_write:
                if (winode)
                        put_write_access(winode);
        }
-out_drop_write:
-       ovl_drop_write(dentry);
 out:
        return err;
 }
@@ -339,130 +339,6 @@ static const char *ovl_get_link(struct dentry *dentry,
        return p;
 }
 
-bool ovl_is_private_xattr(struct super_block *sb, const char *name)
-{
-       struct ovl_fs *ofs = OVL_FS(sb);
-
-       if (ofs->config.userxattr)
-               return strncmp(name, OVL_XATTR_USER_PREFIX,
-                              sizeof(OVL_XATTR_USER_PREFIX) - 1) == 0;
-       else
-               return strncmp(name, OVL_XATTR_TRUSTED_PREFIX,
-                              sizeof(OVL_XATTR_TRUSTED_PREFIX) - 1) == 0;
-}
-
-int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
-                 const void *value, size_t size, int flags)
-{
-       int err;
-       struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
-       struct dentry *upperdentry = ovl_i_dentry_upper(inode);
-       struct dentry *realdentry = upperdentry ?: ovl_dentry_lower(dentry);
-       struct path realpath;
-       const struct cred *old_cred;
-
-       err = ovl_want_write(dentry);
-       if (err)
-               goto out;
-
-       if (!value && !upperdentry) {
-               ovl_path_lower(dentry, &realpath);
-               old_cred = ovl_override_creds(dentry->d_sb);
-               err = vfs_getxattr(mnt_idmap(realpath.mnt), realdentry, name, NULL, 0);
-               revert_creds(old_cred);
-               if (err < 0)
-                       goto out_drop_write;
-       }
-
-       if (!upperdentry) {
-               err = ovl_copy_up(dentry);
-               if (err)
-                       goto out_drop_write;
-
-               realdentry = ovl_dentry_upper(dentry);
-       }
-
-       old_cred = ovl_override_creds(dentry->d_sb);
-       if (value) {
-               err = ovl_do_setxattr(ofs, realdentry, name, value, size,
-                                     flags);
-       } else {
-               WARN_ON(flags != XATTR_REPLACE);
-               err = ovl_do_removexattr(ofs, realdentry, name);
-       }
-       revert_creds(old_cred);
-
-       /* copy c/mtime */
-       ovl_copyattr(inode);
-
-out_drop_write:
-       ovl_drop_write(dentry);
-out:
-       return err;
-}
-
-int ovl_xattr_get(struct dentry *dentry, struct inode *inode, const char *name,
-                 void *value, size_t size)
-{
-       ssize_t res;
-       const struct cred *old_cred;
-       struct path realpath;
-
-       ovl_i_path_real(inode, &realpath);
-       old_cred = ovl_override_creds(dentry->d_sb);
-       res = vfs_getxattr(mnt_idmap(realpath.mnt), realpath.dentry, name, value, size);
-       revert_creds(old_cred);
-       return res;
-}
-
-static bool ovl_can_list(struct super_block *sb, const char *s)
-{
-       /* Never list private (.overlay) */
-       if (ovl_is_private_xattr(sb, s))
-               return false;
-
-       /* List all non-trusted xattrs */
-       if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0)
-               return true;
-
-       /* list other trusted for superuser only */
-       return ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN);
-}
-
-ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
-{
-       struct dentry *realdentry = ovl_dentry_real(dentry);
-       ssize_t res;
-       size_t len;
-       char *s;
-       const struct cred *old_cred;
-
-       old_cred = ovl_override_creds(dentry->d_sb);
-       res = vfs_listxattr(realdentry, list, size);
-       revert_creds(old_cred);
-       if (res <= 0 || size == 0)
-               return res;
-
-       /* filter out private xattrs */
-       for (s = list, len = res; len;) {
-               size_t slen = strnlen(s, len) + 1;
-
-               /* underlying fs providing us with an broken xattr list? */
-               if (WARN_ON(slen > len))
-                       return -EIO;
-
-               len -= slen;
-               if (!ovl_can_list(dentry->d_sb, s)) {
-                       res -= slen;
-                       memmove(s, s + slen, len);
-               } else {
-                       s += slen;
-               }
-       }
-
-       return res;
-}
-
 #ifdef CONFIG_FS_POSIX_ACL
 /*
  * Apply the idmapping of the layer to POSIX ACLs. The caller must pass a clone
@@ -611,10 +487,6 @@ static int ovl_set_or_remove_acl(struct dentry *dentry, struct inode *inode,
        struct dentry *upperdentry = ovl_dentry_upper(dentry);
        struct dentry *realdentry = upperdentry ?: ovl_dentry_lower(dentry);
 
-       err = ovl_want_write(dentry);
-       if (err)
-               return err;
-
        /*
         * If ACL is to be removed from a lower file, check if it exists in
         * the first place before copying it up.
@@ -630,7 +502,7 @@ static int ovl_set_or_remove_acl(struct dentry *dentry, struct inode *inode,
                revert_creds(old_cred);
                if (IS_ERR(real_acl)) {
                        err = PTR_ERR(real_acl);
-                       goto out_drop_write;
+                       goto out;
                }
                posix_acl_release(real_acl);
        }
@@ -638,23 +510,26 @@ static int ovl_set_or_remove_acl(struct dentry *dentry, struct inode *inode,
        if (!upperdentry) {
                err = ovl_copy_up(dentry);
                if (err)
-                       goto out_drop_write;
+                       goto out;
 
                realdentry = ovl_dentry_upper(dentry);
        }
 
+       err = ovl_want_write(dentry);
+       if (err)
+               goto out;
+
        old_cred = ovl_override_creds(dentry->d_sb);
        if (acl)
                err = ovl_do_set_acl(ofs, realdentry, acl_name, acl);
        else
                err = ovl_do_remove_acl(ofs, realdentry, acl_name);
        revert_creds(old_cred);
+       ovl_drop_write(dentry);
 
        /* copy c/mtime */
        ovl_copyattr(inode);
-
-out_drop_write:
-       ovl_drop_write(dentry);
+out:
        return err;
 }
 
@@ -778,14 +653,14 @@ int ovl_fileattr_set(struct mnt_idmap *idmap,
        unsigned int flags;
        int err;
 
-       err = ovl_want_write(dentry);
-       if (err)
-               goto out;
-
        err = ovl_copy_up(dentry);
        if (!err) {
                ovl_path_real(dentry, &upperpath);
 
+               err = ovl_want_write(dentry);
+               if (err)
+                       goto out;
+
                old_cred = ovl_override_creds(inode->i_sb);
                /*
                 * Store immutable/append-only flags in xattr and clear them
@@ -798,6 +673,7 @@ int ovl_fileattr_set(struct mnt_idmap *idmap,
                if (!err)
                        err = ovl_real_fileattr_set(&upperpath, fa);
                revert_creds(old_cred);
+               ovl_drop_write(dentry);
 
                /*
                 * Merge real inode flags with inode flags read from
@@ -812,7 +688,6 @@ int ovl_fileattr_set(struct mnt_idmap *idmap,
                /* Update ctime */
                ovl_copyattr(inode);
        }
-       ovl_drop_write(dentry);
 out:
        return err;
 }
index 80391c687c2ad83975f661905414f924d927a155..03bc8d5dfa318889da1aa00899664bf0c28c910c 100644 (file)
@@ -251,7 +251,10 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
                err = -EREMOTE;
                goto out_err;
        }
-       if (ovl_is_whiteout(this)) {
+
+       path.dentry = this;
+       path.mnt = d->mnt;
+       if (ovl_path_is_whiteout(OVL_FS(d->sb), &path)) {
                d->stop = d->opaque = true;
                goto put_and_out;
        }
@@ -264,8 +267,6 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
                goto put_and_out;
        }
 
-       path.dentry = this;
-       path.mnt = d->mnt;
        if (!d_can_lookup(this)) {
                if (d->is_dir || !last_element) {
                        d->stop = true;
@@ -438,7 +439,7 @@ int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected,
        else if (IS_ERR(origin))
                return PTR_ERR(origin);
 
-       if (upperdentry && !ovl_is_whiteout(upperdentry) &&
+       if (upperdentry && !ovl_upper_is_whiteout(ofs, upperdentry) &&
            inode_wrong_type(d_inode(upperdentry), d_inode(origin)->i_mode))
                goto invalid;
 
@@ -507,6 +508,19 @@ static int ovl_verify_fh(struct ovl_fs *ofs, struct dentry *dentry,
        return err;
 }
 
+int ovl_verify_set_fh(struct ovl_fs *ofs, struct dentry *dentry,
+                     enum ovl_xattr ox, const struct ovl_fh *fh,
+                     bool is_upper, bool set)
+{
+       int err;
+
+       err = ovl_verify_fh(ofs, dentry, ox, fh);
+       if (set && err == -ENODATA)
+               err = ovl_setxattr(ofs, dentry, ox, fh->buf, fh->fb.len);
+
+       return err;
+}
+
 /*
  * Verify that @real dentry matches the file handle stored in xattr @name.
  *
@@ -515,9 +529,9 @@ static int ovl_verify_fh(struct ovl_fs *ofs, struct dentry *dentry,
  *
  * Return 0 on match, -ESTALE on mismatch, -ENODATA on no xattr, < 0 on error.
  */
-int ovl_verify_set_fh(struct ovl_fs *ofs, struct dentry *dentry,
-                     enum ovl_xattr ox, struct dentry *real, bool is_upper,
-                     bool set)
+int ovl_verify_origin_xattr(struct ovl_fs *ofs, struct dentry *dentry,
+                           enum ovl_xattr ox, struct dentry *real,
+                           bool is_upper, bool set)
 {
        struct inode *inode;
        struct ovl_fh *fh;
@@ -530,9 +544,7 @@ int ovl_verify_set_fh(struct ovl_fs *ofs, struct dentry *dentry,
                goto fail;
        }
 
-       err = ovl_verify_fh(ofs, dentry, ox, fh);
-       if (set && err == -ENODATA)
-               err = ovl_setxattr(ofs, dentry, ox, fh->buf, fh->fb.len);
+       err = ovl_verify_set_fh(ofs, dentry, ox, fh, is_upper, set);
        if (err)
                goto fail;
 
@@ -548,6 +560,7 @@ fail:
        goto out;
 }
 
+
 /* Get upper dentry from index */
 struct dentry *ovl_index_upper(struct ovl_fs *ofs, struct dentry *index,
                               bool connected)
@@ -684,7 +697,7 @@ orphan:
        goto out;
 }
 
-static int ovl_get_index_name_fh(struct ovl_fh *fh, struct qstr *name)
+int ovl_get_index_name_fh(const struct ovl_fh *fh, struct qstr *name)
 {
        char *n, *s;
 
@@ -873,20 +886,27 @@ int ovl_path_next(int idx, struct dentry *dentry, struct path *path)
 static int ovl_fix_origin(struct ovl_fs *ofs, struct dentry *dentry,
                          struct dentry *lower, struct dentry *upper)
 {
+       const struct ovl_fh *fh;
        int err;
 
        if (ovl_check_origin_xattr(ofs, upper))
                return 0;
 
+       fh = ovl_get_origin_fh(ofs, lower);
+       if (IS_ERR(fh))
+               return PTR_ERR(fh);
+
        err = ovl_want_write(dentry);
        if (err)
-               return err;
+               goto out;
 
-       err = ovl_set_origin(ofs, lower, upper);
+       err = ovl_set_origin_fh(ofs, fh, upper);
        if (!err)
                err = ovl_set_impure(dentry->d_parent, upper->d_parent);
 
        ovl_drop_write(dentry);
+out:
+       kfree(fh);
        return err;
 }
 
@@ -1383,7 +1403,11 @@ bool ovl_lower_positive(struct dentry *dentry)
                                break;
                        }
                } else {
-                       positive = !ovl_is_whiteout(this);
+                       struct path path = {
+                               .dentry = this,
+                               .mnt = parentpath->layer->mnt,
+                       };
+                       positive = !ovl_path_is_whiteout(OVL_FS(dentry->d_sb), &path);
                        done = true;
                        dput(this);
                }
index 9817b2dcb132c2c2991b4303eb74b51124b547e7..ca88b2636a5729604221000fe450729d661e8e7e 100644 (file)
@@ -28,7 +28,16 @@ enum ovl_path_type {
 
 #define OVL_XATTR_NAMESPACE "overlay."
 #define OVL_XATTR_TRUSTED_PREFIX XATTR_TRUSTED_PREFIX OVL_XATTR_NAMESPACE
+#define OVL_XATTR_TRUSTED_PREFIX_LEN (sizeof(OVL_XATTR_TRUSTED_PREFIX) - 1)
 #define OVL_XATTR_USER_PREFIX XATTR_USER_PREFIX OVL_XATTR_NAMESPACE
+#define OVL_XATTR_USER_PREFIX_LEN (sizeof(OVL_XATTR_USER_PREFIX) - 1)
+
+#define OVL_XATTR_ESCAPE_PREFIX OVL_XATTR_NAMESPACE
+#define OVL_XATTR_ESCAPE_PREFIX_LEN (sizeof(OVL_XATTR_ESCAPE_PREFIX) - 1)
+#define OVL_XATTR_ESCAPE_TRUSTED_PREFIX OVL_XATTR_TRUSTED_PREFIX OVL_XATTR_ESCAPE_PREFIX
+#define OVL_XATTR_ESCAPE_TRUSTED_PREFIX_LEN (sizeof(OVL_XATTR_ESCAPE_TRUSTED_PREFIX) - 1)
+#define OVL_XATTR_ESCAPE_USER_PREFIX OVL_XATTR_USER_PREFIX OVL_XATTR_ESCAPE_PREFIX
+#define OVL_XATTR_ESCAPE_USER_PREFIX_LEN (sizeof(OVL_XATTR_ESCAPE_USER_PREFIX) - 1)
 
 enum ovl_xattr {
        OVL_XATTR_OPAQUE,
@@ -40,6 +49,8 @@ enum ovl_xattr {
        OVL_XATTR_UUID,
        OVL_XATTR_METACOPY,
        OVL_XATTR_PROTATTR,
+       OVL_XATTR_XWHITEOUT,
+       OVL_XATTR_XWHITEOUTS,
 };
 
 enum ovl_inode_flag {
@@ -398,6 +409,10 @@ static inline bool ovl_open_flags_need_copy_up(int flags)
 }
 
 /* util.c */
+int ovl_get_write_access(struct dentry *dentry);
+void ovl_put_write_access(struct dentry *dentry);
+void ovl_start_write(struct dentry *dentry);
+void ovl_end_write(struct dentry *dentry);
 int ovl_want_write(struct dentry *dentry);
 void ovl_drop_write(struct dentry *dentry);
 struct dentry *ovl_workdir(struct dentry *dentry);
@@ -460,6 +475,7 @@ void ovl_inode_update(struct inode *inode, struct dentry *upperdentry);
 void ovl_dir_modified(struct dentry *dentry, bool impurity);
 u64 ovl_inode_version_get(struct inode *inode);
 bool ovl_is_whiteout(struct dentry *dentry);
+bool ovl_path_is_whiteout(struct ovl_fs *ofs, const struct path *path);
 struct file *ovl_path_open(const struct path *path, int flags);
 int ovl_copy_up_start(struct dentry *dentry, int flags);
 void ovl_copy_up_end(struct dentry *dentry);
@@ -467,9 +483,21 @@ bool ovl_already_copied_up(struct dentry *dentry, int flags);
 bool ovl_path_check_dir_xattr(struct ovl_fs *ofs, const struct path *path,
                              enum ovl_xattr ox);
 bool ovl_path_check_origin_xattr(struct ovl_fs *ofs, const struct path *path);
+bool ovl_path_check_xwhiteout_xattr(struct ovl_fs *ofs, const struct path *path);
+bool ovl_path_check_xwhiteouts_xattr(struct ovl_fs *ofs, const struct path *path);
 bool ovl_init_uuid_xattr(struct super_block *sb, struct ovl_fs *ofs,
                         const struct path *upperpath);
 
+static inline bool ovl_upper_is_whiteout(struct ovl_fs *ofs,
+                                        struct dentry *upperdentry)
+{
+       struct path upperpath = {
+               .dentry = upperdentry,
+               .mnt = ovl_upper_mnt(ofs),
+       };
+       return ovl_path_is_whiteout(ofs, &upperpath);
+}
+
 static inline bool ovl_check_origin_xattr(struct ovl_fs *ofs,
                                          struct dentry *upperdentry)
 {
@@ -624,11 +652,15 @@ struct dentry *ovl_decode_real_fh(struct ovl_fs *ofs, struct ovl_fh *fh,
 int ovl_check_origin_fh(struct ovl_fs *ofs, struct ovl_fh *fh, bool connected,
                        struct dentry *upperdentry, struct ovl_path **stackp);
 int ovl_verify_set_fh(struct ovl_fs *ofs, struct dentry *dentry,
-                     enum ovl_xattr ox, struct dentry *real, bool is_upper,
-                     bool set);
+                     enum ovl_xattr ox, const struct ovl_fh *fh,
+                     bool is_upper, bool set);
+int ovl_verify_origin_xattr(struct ovl_fs *ofs, struct dentry *dentry,
+                           enum ovl_xattr ox, struct dentry *real,
+                           bool is_upper, bool set);
 struct dentry *ovl_index_upper(struct ovl_fs *ofs, struct dentry *index,
                               bool connected);
 int ovl_verify_index(struct ovl_fs *ofs, struct dentry *index);
+int ovl_get_index_name_fh(const struct ovl_fh *fh, struct qstr *name);
 int ovl_get_index_name(struct ovl_fs *ofs, struct dentry *origin,
                       struct qstr *name);
 struct dentry *ovl_get_index_fh(struct ovl_fs *ofs, struct ovl_fh *fh);
@@ -640,17 +672,24 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
                          unsigned int flags);
 bool ovl_lower_positive(struct dentry *dentry);
 
+static inline int ovl_verify_origin_fh(struct ovl_fs *ofs, struct dentry *upper,
+                                      const struct ovl_fh *fh, bool set)
+{
+       return ovl_verify_set_fh(ofs, upper, OVL_XATTR_ORIGIN, fh, false, set);
+}
+
 static inline int ovl_verify_origin(struct ovl_fs *ofs, struct dentry *upper,
                                    struct dentry *origin, bool set)
 {
-       return ovl_verify_set_fh(ofs, upper, OVL_XATTR_ORIGIN, origin,
-                                false, set);
+       return ovl_verify_origin_xattr(ofs, upper, OVL_XATTR_ORIGIN, origin,
+                                      false, set);
 }
 
 static inline int ovl_verify_upper(struct ovl_fs *ofs, struct dentry *index,
                                   struct dentry *upper, bool set)
 {
-       return ovl_verify_set_fh(ofs, index, OVL_XATTR_UPPER, upper, true, set);
+       return ovl_verify_origin_xattr(ofs, index, OVL_XATTR_UPPER, upper,
+                                      true, set);
 }
 
 /* readdir.c */
@@ -684,17 +723,8 @@ int ovl_set_nlink_lower(struct dentry *dentry);
 unsigned int ovl_get_nlink(struct ovl_fs *ofs, struct dentry *lowerdentry,
                           struct dentry *upperdentry,
                           unsigned int fallback);
-int ovl_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
-               struct iattr *attr);
-int ovl_getattr(struct mnt_idmap *idmap, const struct path *path,
-               struct kstat *stat, u32 request_mask, unsigned int flags);
 int ovl_permission(struct mnt_idmap *idmap, struct inode *inode,
                   int mask);
-int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
-                 const void *value, size_t size, int flags);
-int ovl_xattr_get(struct dentry *dentry, struct inode *inode, const char *name,
-                 void *value, size_t size);
-ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
 
 #ifdef CONFIG_FS_POSIX_ACL
 struct posix_acl *do_ovl_get_acl(struct mnt_idmap *idmap,
@@ -815,8 +845,9 @@ int ovl_copy_xattr(struct super_block *sb, const struct path *path, struct dentr
 int ovl_set_attr(struct ovl_fs *ofs, struct dentry *upper, struct kstat *stat);
 struct ovl_fh *ovl_encode_real_fh(struct ovl_fs *ofs, struct dentry *real,
                                  bool is_upper);
-int ovl_set_origin(struct ovl_fs *ofs, struct dentry *lower,
-                  struct dentry *upper);
+struct ovl_fh *ovl_get_origin_fh(struct ovl_fs *ofs, struct dentry *origin);
+int ovl_set_origin_fh(struct ovl_fs *ofs, const struct ovl_fh *fh,
+                     struct dentry *upper);
 
 /* export.c */
 extern const struct export_operations ovl_export_operations;
@@ -830,3 +861,12 @@ static inline bool ovl_force_readonly(struct ovl_fs *ofs)
 {
        return (!ovl_upper_mnt(ofs) || !ofs->workdir);
 }
+
+/* xattr.c */
+
+const struct xattr_handler * const *ovl_xattr_handlers(struct ovl_fs *ofs);
+int ovl_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+               struct iattr *attr);
+int ovl_getattr(struct mnt_idmap *idmap, const struct path *path,
+               struct kstat *stat, u32 request_mask, unsigned int flags);
+ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
index f6ff23fd101c8f3cda5f485c657a51b24fd565d1..ddab9ea267d1283b73f8245aa1adb81232f2a202 100644 (file)
@@ -43,8 +43,10 @@ module_param_named(metacopy, ovl_metacopy_def, bool, 0644);
 MODULE_PARM_DESC(metacopy,
                 "Default to on or off for the metadata only copy up feature");
 
-enum {
+enum ovl_opt {
        Opt_lowerdir,
+       Opt_lowerdir_add,
+       Opt_datadir_add,
        Opt_upperdir,
        Opt_workdir,
        Opt_default_permissions,
@@ -140,8 +142,11 @@ static int ovl_verity_mode_def(void)
 #define fsparam_string_empty(NAME, OPT) \
        __fsparam(fs_param_is_string, NAME, OPT, fs_param_can_be_empty, NULL)
 
+
 const struct fs_parameter_spec ovl_parameter_spec[] = {
        fsparam_string_empty("lowerdir",    Opt_lowerdir),
+       fsparam_string("lowerdir+",         Opt_lowerdir_add),
+       fsparam_string("datadir+",          Opt_datadir_add),
        fsparam_string("upperdir",          Opt_upperdir),
        fsparam_string("workdir",           Opt_workdir),
        fsparam_flag("default_permissions", Opt_default_permissions),
@@ -238,19 +243,8 @@ static int ovl_mount_dir_noesc(const char *name, struct path *path)
                pr_err("failed to resolve '%s': %i\n", name, err);
                goto out;
        }
-       err = -EINVAL;
-       if (ovl_dentry_weird(path->dentry)) {
-               pr_err("filesystem on '%s' not supported\n", name);
-               goto out_put;
-       }
-       if (!d_is_dir(path->dentry)) {
-               pr_err("'%s' not a directory\n", name);
-               goto out_put;
-       }
        return 0;
 
-out_put:
-       path_put_init(path);
 out:
        return err;
 }
@@ -268,7 +262,7 @@ static void ovl_unescape(char *s)
        }
 }
 
-static int ovl_mount_dir(const char *name, struct path *path, bool upper)
+static int ovl_mount_dir(const char *name, struct path *path)
 {
        int err = -ENOMEM;
        char *tmp = kstrdup(name, GFP_KERNEL);
@@ -276,68 +270,147 @@ static int ovl_mount_dir(const char *name, struct path *path, bool upper)
        if (tmp) {
                ovl_unescape(tmp);
                err = ovl_mount_dir_noesc(tmp, path);
-
-               if (!err && upper && path->dentry->d_flags & DCACHE_OP_REAL) {
-                       pr_err("filesystem on '%s' not supported as upperdir\n",
-                              tmp);
-                       path_put_init(path);
-                       err = -EINVAL;
-               }
                kfree(tmp);
        }
        return err;
 }
 
-static int ovl_parse_param_upperdir(const char *name, struct fs_context *fc,
-                                   bool workdir)
+static int ovl_mount_dir_check(struct fs_context *fc, const struct path *path,
+                              enum ovl_opt layer, const char *name, bool upper)
 {
-       int err;
-       struct ovl_fs *ofs = fc->s_fs_info;
-       struct ovl_config *config = &ofs->config;
        struct ovl_fs_context *ctx = fc->fs_private;
-       struct path path;
-       char *dup;
 
-       err = ovl_mount_dir(name, &path, true);
-       if (err)
-               return err;
+       if (ovl_dentry_weird(path->dentry))
+               return invalfc(fc, "filesystem on %s not supported", name);
+
+       if (!d_is_dir(path->dentry))
+               return invalfc(fc, "%s is not a directory", name);
+
 
        /*
         * Check whether upper path is read-only here to report failures
         * early. Don't forget to recheck when the superblock is created
         * as the mount attributes could change.
         */
-       if (__mnt_is_readonly(path.mnt)) {
-               path_put(&path);
-               return -EINVAL;
+       if (upper) {
+               if (path->dentry->d_flags & DCACHE_OP_REAL)
+                       return invalfc(fc, "filesystem on %s not supported as upperdir", name);
+               if (__mnt_is_readonly(path->mnt))
+                       return invalfc(fc, "filesystem on %s is read-only", name);
+       } else {
+               if (ctx->lowerdir_all && layer != Opt_lowerdir)
+                       return invalfc(fc, "lowerdir+ and datadir+ cannot follow lowerdir");
+               if (ctx->nr_data && layer == Opt_lowerdir_add)
+                       return invalfc(fc, "regular lower layers cannot follow data layers");
+               if (ctx->nr == OVL_MAX_STACK)
+                       return invalfc(fc, "too many lower directories, limit is %d",
+                                      OVL_MAX_STACK);
        }
+       return 0;
+}
 
-       dup = kstrdup(name, GFP_KERNEL);
-       if (!dup) {
-               path_put(&path);
+static int ovl_ctx_realloc_lower(struct fs_context *fc)
+{
+       struct ovl_fs_context *ctx = fc->fs_private;
+       struct ovl_fs_context_layer *l;
+       size_t nr;
+
+       if (ctx->nr < ctx->capacity)
+               return 0;
+
+       nr = min_t(size_t, max(4096 / sizeof(*l), ctx->capacity * 2),
+                  OVL_MAX_STACK);
+       l = krealloc_array(ctx->lower, nr, sizeof(*l), GFP_KERNEL_ACCOUNT);
+       if (!l)
                return -ENOMEM;
+
+       ctx->lower = l;
+       ctx->capacity = nr;
+       return 0;
+}
+
+static void ovl_add_layer(struct fs_context *fc, enum ovl_opt layer,
+                        struct path *path, char **pname)
+{
+       struct ovl_fs *ofs = fc->s_fs_info;
+       struct ovl_config *config = &ofs->config;
+       struct ovl_fs_context *ctx = fc->fs_private;
+       struct ovl_fs_context_layer *l;
+
+       switch (layer) {
+       case Opt_workdir:
+               swap(config->workdir, *pname);
+               swap(ctx->work, *path);
+               break;
+       case Opt_upperdir:
+               swap(config->upperdir, *pname);
+               swap(ctx->upper, *path);
+               break;
+       case Opt_datadir_add:
+               ctx->nr_data++;
+               fallthrough;
+       case Opt_lowerdir_add:
+               WARN_ON(ctx->nr >= ctx->capacity);
+               l = &ctx->lower[ctx->nr++];
+               memset(l, 0, sizeof(*l));
+               swap(l->name, *pname);
+               swap(l->path, *path);
+               break;
+       default:
+               WARN_ON(1);
        }
+}
 
-       if (workdir) {
-               kfree(config->workdir);
-               config->workdir = dup;
-               path_put(&ctx->work);
-               ctx->work = path;
-       } else {
-               kfree(config->upperdir);
-               config->upperdir = dup;
-               path_put(&ctx->upper);
-               ctx->upper = path;
+static int ovl_parse_layer(struct fs_context *fc, struct fs_parameter *param,
+                          enum ovl_opt layer)
+{
+       char *name = kstrdup(param->string, GFP_KERNEL);
+       bool upper = (layer == Opt_upperdir || layer == Opt_workdir);
+       struct path path;
+       int err;
+
+       if (!name)
+               return -ENOMEM;
+
+       if (upper)
+               err = ovl_mount_dir(name, &path);
+       else
+               err = ovl_mount_dir_noesc(name, &path);
+       if (err)
+               goto out_free;
+
+       err = ovl_mount_dir_check(fc, &path, layer, name, upper);
+       if (err)
+               goto out_put;
+
+       if (!upper) {
+               err = ovl_ctx_realloc_lower(fc);
+               if (err)
+                       goto out_put;
        }
-       return 0;
+
+       /* Store the user provided path string in ctx to show in mountinfo */
+       ovl_add_layer(fc, layer, &path, &name);
+
+out_put:
+       path_put(&path);
+out_free:
+       kfree(name);
+       return err;
 }
 
-static void ovl_parse_param_drop_lowerdir(struct ovl_fs_context *ctx)
+static void ovl_reset_lowerdirs(struct ovl_fs_context *ctx)
 {
-       for (size_t nr = 0; nr < ctx->nr; nr++) {
-               path_put(&ctx->lower[nr].path);
-               kfree(ctx->lower[nr].name);
-               ctx->lower[nr].name = NULL;
+       struct ovl_fs_context_layer *l = ctx->lower;
+
+       // Reset old user provided lowerdir string
+       kfree(ctx->lowerdir_all);
+       ctx->lowerdir_all = NULL;
+
+       for (size_t nr = 0; nr < ctx->nr; nr++, l++) {
+               path_put(&l->path);
+               kfree(l->name);
+               l->name = NULL;
        }
        ctx->nr = 0;
        ctx->nr_data = 0;
@@ -346,7 +419,7 @@ static void ovl_parse_param_drop_lowerdir(struct ovl_fs_context *ctx)
 /*
  * Parse lowerdir= mount option:
  *
- * (1) lowerdir=/lower1:/lower2:/lower3::/data1::/data2
+ * e.g.: lowerdir=/lower1:/lower2:/lower3::/data1::/data2
  *     Set "/lower1", "/lower2", and "/lower3" as lower layers and
  *     "/data1" and "/data2" as data lower layers. Any existing lower
  *     layers are replaced.
@@ -356,9 +429,9 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
        int err;
        struct ovl_fs_context *ctx = fc->fs_private;
        struct ovl_fs_context_layer *l;
-       char *dup = NULL, *dup_iter;
+       char *dup = NULL, *iter;
        ssize_t nr_lower = 0, nr = 0, nr_data = 0;
-       bool append = false, data_layer = false;
+       bool data_layer = false;
 
        /*
         * Ensure we're backwards compatible with mount(2)
@@ -366,16 +439,21 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
         */
 
        /* drop all existing lower layers */
-       if (!*name) {
-               ovl_parse_param_drop_lowerdir(ctx);
+       ovl_reset_lowerdirs(ctx);
+
+       if (!*name)
                return 0;
-       }
 
        if (*name == ':') {
                pr_err("cannot append lower layer");
                return -EINVAL;
        }
 
+       // Store user provided lowerdir string to show in mount options
+       ctx->lowerdir_all = kstrdup(name, GFP_KERNEL);
+       if (!ctx->lowerdir_all)
+               return -ENOMEM;
+
        dup = kstrdup(name, GFP_KERNEL);
        if (!dup)
                return -ENOMEM;
@@ -385,36 +463,11 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
        if (nr_lower < 0)
                goto out_err;
 
-       if ((nr_lower > OVL_MAX_STACK) ||
-           (append && (size_add(ctx->nr, nr_lower) > OVL_MAX_STACK))) {
+       if (nr_lower > OVL_MAX_STACK) {
                pr_err("too many lower directories, limit is %d\n", OVL_MAX_STACK);
                goto out_err;
        }
 
-       if (!append)
-               ovl_parse_param_drop_lowerdir(ctx);
-
-       /*
-        * (1) append
-        *
-        * We want nr <= nr_lower <= capacity We know nr > 0 and nr <=
-        * capacity. If nr == 0 this wouldn't be append. If nr +
-        * nr_lower is <= capacity then nr <= nr_lower <= capacity
-        * already holds. If nr + nr_lower exceeds capacity, we realloc.
-        *
-        * (2) replace
-        *
-        * Ensure we're backwards compatible with mount(2) which allows
-        * "lowerdir=/a:/b:/c,lowerdir=/d:/e:/f" causing the last
-        * specified lowerdir mount option to win.
-        *
-        * We want nr <= nr_lower <= capacity We know either (i) nr == 0
-        * or (ii) nr > 0. We also know nr_lower > 0. The capacity
-        * could've been changed multiple times already so we only know
-        * nr <= capacity. If nr + nr_lower > capacity we realloc,
-        * otherwise nr <= nr_lower <= capacity holds already.
-        */
-       nr_lower += ctx->nr;
        if (nr_lower > ctx->capacity) {
                err = -ENOMEM;
                l = krealloc_array(ctx->lower, nr_lower, sizeof(*ctx->lower),
@@ -426,41 +479,21 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
                ctx->capacity = nr_lower;
        }
 
-       /*
-        *   (3) By (1) and (2) we know nr <= nr_lower <= capacity.
-        *   (4) If ctx->nr == 0 => replace
-        *       We have verified above that the lowerdir mount option
-        *       isn't an append, i.e., the lowerdir mount option
-        *       doesn't start with ":" or "::".
-        * (4.1) The lowerdir mount options only contains regular lower
-        *       layers ":".
-        *       => Nothing to verify.
-        * (4.2) The lowerdir mount options contains regular ":" and
-        *       data "::" layers.
-        *       => We need to verify that data lower layers "::" aren't
-        *          followed by regular ":" lower layers
-        *   (5) If ctx->nr > 0 => append
-        *       We know that there's at least one regular layer
-        *       otherwise we would've failed when parsing the previous
-        *       lowerdir mount option.
-        * (5.1) The lowerdir mount option is a regular layer ":" append
-        *       => We need to verify that no data layers have been
-        *          specified before.
-        * (5.2) The lowerdir mount option is a data layer "::" append
-        *       We know that there's at least one regular layer or
-        *       other data layers. => There's nothing to verify.
-        */
-       dup_iter = dup;
-       for (nr = ctx->nr; nr < nr_lower; nr++) {
-               l = &ctx->lower[nr];
+       iter = dup;
+       l = ctx->lower;
+       for (nr = 0; nr < nr_lower; nr++, l++) {
                memset(l, 0, sizeof(*l));
 
-               err = ovl_mount_dir(dup_iter, &l->path, false);
+               err = ovl_mount_dir(iter, &l->path);
+               if (err)
+                       goto out_put;
+
+               err = ovl_mount_dir_check(fc, &l->path, Opt_lowerdir, iter, false);
                if (err)
                        goto out_put;
 
                err = -ENOMEM;
-               l->name = kstrdup(dup_iter, GFP_KERNEL_ACCOUNT);
+               l->name = kstrdup(iter, GFP_KERNEL_ACCOUNT);
                if (!l->name)
                        goto out_put;
 
@@ -472,8 +505,8 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
                        break;
 
                err = -EINVAL;
-               dup_iter = strchr(dup_iter, '\0') + 1;
-               if (*dup_iter) {
+               iter = strchr(iter, '\0') + 1;
+               if (*iter) {
                        /*
                         * This is a regular layer so we require that
                         * there are no data layers.
@@ -489,7 +522,7 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
 
                /* This is a data lower layer. */
                data_layer = true;
-               dup_iter++;
+               iter++;
        }
        ctx->nr = nr_lower;
        ctx->nr_data += nr_data;
@@ -497,21 +530,7 @@ static int ovl_parse_param_lowerdir(const char *name, struct fs_context *fc)
        return 0;
 
 out_put:
-       /*
-        * We know nr >= ctx->nr < nr_lower. If we failed somewhere
-        * we want to undo until nr == ctx->nr. This is correct for
-        * both ctx->nr == 0 and ctx->nr > 0.
-        */
-       for (; nr >= ctx->nr; nr--) {
-               l = &ctx->lower[nr];
-               kfree(l->name);
-               l->name = NULL;
-               path_put(&l->path);
-
-               /* don't overflow */
-               if (nr == 0)
-                       break;
-       }
+       ovl_reset_lowerdirs(ctx);
 
 out_err:
        kfree(dup);
@@ -556,11 +575,11 @@ static int ovl_parse_param(struct fs_context *fc, struct fs_parameter *param)
        case Opt_lowerdir:
                err = ovl_parse_param_lowerdir(param->string, fc);
                break;
+       case Opt_lowerdir_add:
+       case Opt_datadir_add:
        case Opt_upperdir:
-               fallthrough;
        case Opt_workdir:
-               err = ovl_parse_param_upperdir(param->string, fc,
-                                              (Opt_workdir == opt));
+               err = ovl_parse_layer(fc, param, opt);
                break;
        case Opt_default_permissions:
                config->default_permissions = true;
@@ -617,7 +636,7 @@ static int ovl_get_tree(struct fs_context *fc)
 
 static inline void ovl_fs_context_free(struct ovl_fs_context *ctx)
 {
-       ovl_parse_param_drop_lowerdir(ctx);
+       ovl_reset_lowerdirs(ctx);
        path_put(&ctx->upper);
        path_put(&ctx->work);
        kfree(ctx->lower);
@@ -933,23 +952,28 @@ int ovl_show_options(struct seq_file *m, struct dentry *dentry)
 {
        struct super_block *sb = dentry->d_sb;
        struct ovl_fs *ofs = OVL_FS(sb);
-       size_t nr, nr_merged_lower = ofs->numlayer - ofs->numdatalayer;
+       size_t nr, nr_merged_lower, nr_lower = 0;
+       char **lowerdirs = ofs->config.lowerdirs;
 
        /*
-        * lowerdirs[] starts from offset 1, then
-        * >= 0 regular lower layers prefixed with : and
-        * >= 0 data-only lower layers prefixed with ::
-        *
-        * we need to escase comma and space like seq_show_option() does and
-        * we also need to escape the colon separator from lowerdir paths.
+        * lowerdirs[0] holds the colon separated list that user provided
+        * with lowerdir mount option.
+        * lowerdirs[1..numlayer] hold the lowerdir paths that were added
+        * using the lowerdir+ and datadir+ mount options.
+        * For now, we do not allow mixing the legacy lowerdir mount option
+        * with the new lowerdir+ and datadir+ mount options.
         */
-       seq_puts(m, ",lowerdir=");
-       for (nr = 1; nr < ofs->numlayer; nr++) {
-               if (nr > 1)
-                       seq_putc(m, ':');
-               if (nr >= nr_merged_lower)
-                       seq_putc(m, ':');
-               seq_escape(m, ofs->config.lowerdirs[nr], ":, \t\n\\");
+       if (lowerdirs[0]) {
+               seq_show_option(m, "lowerdir", lowerdirs[0]);
+       } else {
+               nr_lower = ofs->numlayer;
+               nr_merged_lower = nr_lower - ofs->numdatalayer;
+       }
+       for (nr = 1; nr < nr_lower; nr++) {
+               if (nr < nr_merged_lower)
+                       seq_show_option(m, "lowerdir+", lowerdirs[nr]);
+               else
+                       seq_show_option(m, "datadir+", lowerdirs[nr]);
        }
        if (ofs->config.upperdir) {
                seq_show_option(m, "upperdir", ofs->config.upperdir);
index 8750da68ab2a468237f57f561c7dec2fecc00b06..c96d939820211ddc63e265670a2aff60d95eec49 100644 (file)
@@ -32,6 +32,7 @@ struct ovl_fs_context {
        size_t nr_data;
        struct ovl_opt_set set;
        struct ovl_fs_context_layer *lower;
+       char *lowerdir_all; /* user provided lowerdir string */
 };
 
 int ovl_init_fs_context(struct fs_context *fc);
index de39e067ae65a53effae5470b12c6a31726451cf..a490fc47c3e7ea351f53187b487efb166713332c 100644 (file)
@@ -25,6 +25,7 @@ struct ovl_cache_entry {
        struct ovl_cache_entry *next_maybe_whiteout;
        bool is_upper;
        bool is_whiteout;
+       bool check_xwhiteout;
        char name[];
 };
 
@@ -47,6 +48,7 @@ struct ovl_readdir_data {
        int err;
        bool is_upper;
        bool d_type_supported;
+       bool in_xwhiteouts_dir;
 };
 
 struct ovl_dir_file {
@@ -162,6 +164,8 @@ static struct ovl_cache_entry *ovl_cache_entry_new(struct ovl_readdir_data *rdd,
                p->ino = 0;
        p->is_upper = rdd->is_upper;
        p->is_whiteout = false;
+       /* Defer check for overlay.whiteout to ovl_iterate() */
+       p->check_xwhiteout = rdd->in_xwhiteouts_dir && d_type == DT_REG;
 
        if (d_type == DT_CHR) {
                p->next_maybe_whiteout = rdd->first_maybe_whiteout;
@@ -301,6 +305,8 @@ static inline int ovl_dir_read(const struct path *realpath,
        if (IS_ERR(realfile))
                return PTR_ERR(realfile);
 
+       rdd->in_xwhiteouts_dir = rdd->dentry &&
+               ovl_path_check_xwhiteouts_xattr(OVL_FS(rdd->dentry->d_sb), realpath);
        rdd->first_maybe_whiteout = NULL;
        rdd->ctx.pos = 0;
        do {
@@ -447,7 +453,7 @@ static u64 ovl_remap_lower_ino(u64 ino, int xinobits, int fsid,
 }
 
 /*
- * Set d_ino for upper entries. Non-upper entries should always report
+ * Set d_ino for upper entries if needed. Non-upper entries should always report
  * the uppermost real inode ino and should not call this function.
  *
  * When not all layer are on same fs, report real ino also for upper.
@@ -455,8 +461,11 @@ static u64 ovl_remap_lower_ino(u64 ino, int xinobits, int fsid,
  * When all layers are on the same fs, and upper has a reference to
  * copy up origin, call vfs_getattr() on the overlay entry to make
  * sure that d_ino will be consistent with st_ino from stat(2).
+ *
+ * Also checks the overlay.whiteout xattr by doing a full lookup which will return
+ * negative in this case.
  */
-static int ovl_cache_update_ino(const struct path *path, struct ovl_cache_entry *p)
+static int ovl_cache_update(const struct path *path, struct ovl_cache_entry *p, bool update_ino)
 
 {
        struct dentry *dir = path->dentry;
@@ -467,7 +476,7 @@ static int ovl_cache_update_ino(const struct path *path, struct ovl_cache_entry
        int xinobits = ovl_xino_bits(ofs);
        int err = 0;
 
-       if (!ovl_same_dev(ofs))
+       if (!ovl_same_dev(ofs) && !p->check_xwhiteout)
                goto out;
 
        if (p->name[0] == '.') {
@@ -481,6 +490,7 @@ static int ovl_cache_update_ino(const struct path *path, struct ovl_cache_entry
                        goto get;
                }
        }
+       /* This checks also for xwhiteouts */
        this = lookup_one(mnt_idmap(path->mnt), p->name, dir, p->len);
        if (IS_ERR_OR_NULL(this) || !this->d_inode) {
                /* Mark a stale entry */
@@ -494,6 +504,9 @@ static int ovl_cache_update_ino(const struct path *path, struct ovl_cache_entry
        }
 
 get:
+       if (!ovl_same_dev(ofs) || !update_ino)
+               goto out;
+
        type = ovl_path_type(this);
        if (OVL_TYPE_ORIGIN(type)) {
                struct kstat stat;
@@ -572,7 +585,7 @@ static int ovl_dir_read_impure(const struct path *path,  struct list_head *list,
        list_for_each_entry_safe(p, n, list, l_node) {
                if (strcmp(p->name, ".") != 0 &&
                    strcmp(p->name, "..") != 0) {
-                       err = ovl_cache_update_ino(path, p);
+                       err = ovl_cache_update(path, p, true);
                        if (err)
                                return err;
                }
@@ -778,13 +791,13 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
        while (od->cursor != &od->cache->entries) {
                p = list_entry(od->cursor, struct ovl_cache_entry, l_node);
                if (!p->is_whiteout) {
-                       if (!p->ino) {
-                               err = ovl_cache_update_ino(&file->f_path, p);
+                       if (!p->ino || p->check_xwhiteout) {
+                               err = ovl_cache_update(&file->f_path, p, !p->ino);
                                if (err)
                                        goto out;
                        }
                }
-               /* ovl_cache_update_ino() sets is_whiteout on stale entry */
+               /* ovl_cache_update() sets is_whiteout on stale entry */
                if (!p->is_whiteout) {
                        if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
                                break;
index 17864a8d2b85c4ff0b9339888daf9488f8998736..a0967bb250036d38c6ee922950aec7f46608aed3 100644 (file)
@@ -445,68 +445,6 @@ static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir)
        return ok;
 }
 
-static int ovl_own_xattr_get(const struct xattr_handler *handler,
-                            struct dentry *dentry, struct inode *inode,
-                            const char *name, void *buffer, size_t size)
-{
-       return -EOPNOTSUPP;
-}
-
-static int ovl_own_xattr_set(const struct xattr_handler *handler,
-                            struct mnt_idmap *idmap,
-                            struct dentry *dentry, struct inode *inode,
-                            const char *name, const void *value,
-                            size_t size, int flags)
-{
-       return -EOPNOTSUPP;
-}
-
-static int ovl_other_xattr_get(const struct xattr_handler *handler,
-                              struct dentry *dentry, struct inode *inode,
-                              const char *name, void *buffer, size_t size)
-{
-       return ovl_xattr_get(dentry, inode, name, buffer, size);
-}
-
-static int ovl_other_xattr_set(const struct xattr_handler *handler,
-                              struct mnt_idmap *idmap,
-                              struct dentry *dentry, struct inode *inode,
-                              const char *name, const void *value,
-                              size_t size, int flags)
-{
-       return ovl_xattr_set(dentry, inode, name, value, size, flags);
-}
-
-static const struct xattr_handler ovl_own_trusted_xattr_handler = {
-       .prefix = OVL_XATTR_TRUSTED_PREFIX,
-       .get = ovl_own_xattr_get,
-       .set = ovl_own_xattr_set,
-};
-
-static const struct xattr_handler ovl_own_user_xattr_handler = {
-       .prefix = OVL_XATTR_USER_PREFIX,
-       .get = ovl_own_xattr_get,
-       .set = ovl_own_xattr_set,
-};
-
-static const struct xattr_handler ovl_other_xattr_handler = {
-       .prefix = "", /* catch all */
-       .get = ovl_other_xattr_get,
-       .set = ovl_other_xattr_set,
-};
-
-static const struct xattr_handler * const ovl_trusted_xattr_handlers[] = {
-       &ovl_own_trusted_xattr_handler,
-       &ovl_other_xattr_handler,
-       NULL
-};
-
-static const struct xattr_handler * const ovl_user_xattr_handlers[] = {
-       &ovl_own_user_xattr_handler,
-       &ovl_other_xattr_handler,
-       NULL
-};
-
 static int ovl_setup_trap(struct super_block *sb, struct dentry *dir,
                          struct inode **ptrap, const char *name)
 {
@@ -647,7 +585,7 @@ static int ovl_check_rename_whiteout(struct ovl_fs *ofs)
        if (IS_ERR(whiteout))
                goto cleanup_temp;
 
-       err = ovl_is_whiteout(whiteout);
+       err = ovl_upper_is_whiteout(ofs, whiteout);
 
        /* Best effort cleanup of whiteout and temp file */
        if (err)
@@ -887,15 +825,20 @@ static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs,
 {
        struct vfsmount *mnt = ovl_upper_mnt(ofs);
        struct dentry *indexdir;
+       struct dentry *origin = ovl_lowerstack(oe)->dentry;
+       const struct ovl_fh *fh;
        int err;
 
+       fh = ovl_get_origin_fh(ofs, origin);
+       if (IS_ERR(fh))
+               return PTR_ERR(fh);
+
        err = mnt_want_write(mnt);
        if (err)
-               return err;
+               goto out_free_fh;
 
        /* Verify lower root is upper root origin */
-       err = ovl_verify_origin(ofs, upperpath->dentry,
-                               ovl_lowerstack(oe)->dentry, true);
+       err = ovl_verify_origin_fh(ofs, upperpath->dentry, fh, true);
        if (err) {
                pr_err("failed to verify upper root origin\n");
                goto out;
@@ -927,9 +870,10 @@ static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs,
                 * directory entries.
                 */
                if (ovl_check_origin_xattr(ofs, ofs->indexdir)) {
-                       err = ovl_verify_set_fh(ofs, ofs->indexdir,
-                                               OVL_XATTR_ORIGIN,
-                                               upperpath->dentry, true, false);
+                       err = ovl_verify_origin_xattr(ofs, ofs->indexdir,
+                                                     OVL_XATTR_ORIGIN,
+                                                     upperpath->dentry, true,
+                                                     false);
                        if (err)
                                pr_err("failed to verify index dir 'origin' xattr\n");
                }
@@ -947,6 +891,8 @@ static int ovl_get_indexdir(struct super_block *sb, struct ovl_fs *ofs,
 
 out:
        mnt_drop_write(mnt);
+out_free_fh:
+       kfree(fh);
        return err;
 }
 
@@ -1382,8 +1328,11 @@ int ovl_fill_super(struct super_block *sb, struct fs_context *fc)
        ofs->layers = layers;
        /*
         * Layer 0 is reserved for upper even if there's no upper.
-        * For consistency, config.lowerdirs[0] is NULL.
+        * config.lowerdirs[0] is used for storing the user provided colon
+        * separated lowerdir string.
         */
+       ofs->config.lowerdirs[0] = ctx->lowerdir_all;
+       ctx->lowerdir_all = NULL;
        ofs->numlayer = 1;
 
        sb->s_stack_depth = 0;
@@ -1493,8 +1442,7 @@ int ovl_fill_super(struct super_block *sb, struct fs_context *fc)
        cap_lower(cred->cap_effective, CAP_SYS_RESOURCE);
 
        sb->s_magic = OVERLAYFS_SUPER_MAGIC;
-       sb->s_xattr = ofs->config.userxattr ? ovl_user_xattr_handlers :
-               ovl_trusted_xattr_handlers;
+       sb->s_xattr = ovl_xattr_handlers(ofs);
        sb->s_fs_info = ofs;
 #ifdef CONFIG_FS_POSIX_ACL
        sb->s_flags |= SB_POSIXACL;
index 868afd8834c323d652aa33abb145084d0c235450..50a201e9cd398aab74a61634f26e51a7f6b47893 100644 (file)
 #include <linux/ratelimit.h>
 #include "overlayfs.h"
 
+/* Get write access to upper mnt - may fail if upper sb was remounted ro */
+int ovl_get_write_access(struct dentry *dentry)
+{
+       struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
+       return mnt_get_write_access(ovl_upper_mnt(ofs));
+}
+
+/* Get write access to upper sb - may block if upper sb is frozen */
+void ovl_start_write(struct dentry *dentry)
+{
+       struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
+       sb_start_write(ovl_upper_mnt(ofs)->mnt_sb);
+}
+
 int ovl_want_write(struct dentry *dentry)
 {
        struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
        return mnt_want_write(ovl_upper_mnt(ofs));
 }
 
+void ovl_put_write_access(struct dentry *dentry)
+{
+       struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
+       mnt_put_write_access(ovl_upper_mnt(ofs));
+}
+
+void ovl_end_write(struct dentry *dentry)
+{
+       struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
+       sb_end_write(ovl_upper_mnt(ofs)->mnt_sb);
+}
+
 void ovl_drop_write(struct dentry *dentry)
 {
        struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
@@ -55,7 +81,7 @@ int ovl_can_decode_fh(struct super_block *sb)
        if (!capable(CAP_DAC_READ_SEARCH))
                return 0;
 
-       if (!sb->s_export_op || !sb->s_export_op->fh_to_dentry)
+       if (!exportfs_can_decode_fh(sb->s_export_op))
                return 0;
 
        return sb->s_export_op->encode_fh ? -1 : FILEID_INO32_GEN;
@@ -575,6 +601,16 @@ bool ovl_is_whiteout(struct dentry *dentry)
        return inode && IS_WHITEOUT(inode);
 }
 
+/*
+ * Use this over ovl_is_whiteout for upper and lower files, as it also
+ * handles overlay.whiteout xattr whiteout files.
+ */
+bool ovl_path_is_whiteout(struct ovl_fs *ofs, const struct path *path)
+{
+       return ovl_is_whiteout(path->dentry) ||
+               ovl_path_check_xwhiteout_xattr(ofs, path);
+}
+
 struct file *ovl_path_open(const struct path *path, int flags)
 {
        struct inode *inode = d_inode(path->dentry);
@@ -644,22 +680,36 @@ bool ovl_already_copied_up(struct dentry *dentry, int flags)
        return false;
 }
 
+/*
+ * The copy up "transaction" keeps an elevated mnt write count on upper mnt,
+ * but leaves taking freeze protection on upper sb to lower level helpers.
+ */
 int ovl_copy_up_start(struct dentry *dentry, int flags)
 {
        struct inode *inode = d_inode(dentry);
        int err;
 
        err = ovl_inode_lock_interruptible(inode);
-       if (!err && ovl_already_copied_up_locked(dentry, flags)) {
+       if (err)
+               return err;
+
+       if (ovl_already_copied_up_locked(dentry, flags))
                err = 1; /* Already copied up */
-               ovl_inode_unlock(inode);
-       }
+       else
+               err = ovl_get_write_access(dentry);
+       if (err)
+               goto out_unlock;
+
+       return 0;
 
+out_unlock:
+       ovl_inode_unlock(inode);
        return err;
 }
 
 void ovl_copy_up_end(struct dentry *dentry)
 {
+       ovl_put_write_access(dentry);
        ovl_inode_unlock(d_inode(dentry));
 }
 
@@ -676,6 +726,32 @@ bool ovl_path_check_origin_xattr(struct ovl_fs *ofs, const struct path *path)
        return false;
 }
 
+bool ovl_path_check_xwhiteout_xattr(struct ovl_fs *ofs, const struct path *path)
+{
+       struct dentry *dentry = path->dentry;
+       int res;
+
+       /* xattr.whiteout must be a zero size regular file */
+       if (!d_is_reg(dentry) || i_size_read(d_inode(dentry)) != 0)
+               return false;
+
+       res = ovl_path_getxattr(ofs, path, OVL_XATTR_XWHITEOUT, NULL, 0);
+       return res >= 0;
+}
+
+bool ovl_path_check_xwhiteouts_xattr(struct ovl_fs *ofs, const struct path *path)
+{
+       struct dentry *dentry = path->dentry;
+       int res;
+
+       /* xattr.whiteouts must be a directory */
+       if (!d_is_dir(dentry))
+               return false;
+
+       res = ovl_path_getxattr(ofs, path, OVL_XATTR_XWHITEOUTS, NULL, 0);
+       return res >= 0;
+}
+
 /*
  * Load persistent uuid from xattr into s_uuid if found, or store a new
  * random generated value in s_uuid and in xattr.
@@ -760,6 +836,8 @@ bool ovl_path_check_dir_xattr(struct ovl_fs *ofs, const struct path *path,
 #define OVL_XATTR_UUID_POSTFIX         "uuid"
 #define OVL_XATTR_METACOPY_POSTFIX     "metacopy"
 #define OVL_XATTR_PROTATTR_POSTFIX     "protattr"
+#define OVL_XATTR_XWHITEOUT_POSTFIX    "whiteout"
+#define OVL_XATTR_XWHITEOUTS_POSTFIX   "whiteouts"
 
 #define OVL_XATTR_TAB_ENTRY(x) \
        [x] = { [false] = OVL_XATTR_TRUSTED_PREFIX x ## _POSTFIX, \
@@ -775,6 +853,8 @@ const char *const ovl_xattr_table[][2] = {
        OVL_XATTR_TAB_ENTRY(OVL_XATTR_UUID),
        OVL_XATTR_TAB_ENTRY(OVL_XATTR_METACOPY),
        OVL_XATTR_TAB_ENTRY(OVL_XATTR_PROTATTR),
+       OVL_XATTR_TAB_ENTRY(OVL_XATTR_XWHITEOUT),
+       OVL_XATTR_TAB_ENTRY(OVL_XATTR_XWHITEOUTS),
 };
 
 int ovl_check_setxattr(struct ovl_fs *ofs, struct dentry *upperdentry,
@@ -973,12 +1053,18 @@ static void ovl_cleanup_index(struct dentry *dentry)
        struct dentry *index = NULL;
        struct inode *inode;
        struct qstr name = { };
+       bool got_write = false;
        int err;
 
        err = ovl_get_index_name(ofs, lowerdentry, &name);
        if (err)
                goto fail;
 
+       err = ovl_want_write(dentry);
+       if (err)
+               goto fail;
+
+       got_write = true;
        inode = d_inode(upperdentry);
        if (!S_ISDIR(inode->i_mode) && inode->i_nlink != 1) {
                pr_warn_ratelimited("cleanup linked index (%pd2, ino=%lu, nlink=%u)\n",
@@ -1016,6 +1102,8 @@ static void ovl_cleanup_index(struct dentry *dentry)
                goto fail;
 
 out:
+       if (got_write)
+               ovl_drop_write(dentry);
        kfree(name.name);
        dput(index);
        return;
@@ -1062,8 +1150,12 @@ int ovl_nlink_start(struct dentry *dentry)
        if (err)
                return err;
 
+       err = ovl_want_write(dentry);
+       if (err)
+               goto out_unlock;
+
        if (d_is_dir(dentry) || !ovl_test_flag(OVL_INDEX, inode))
-               goto out;
+               return 0;
 
        old_cred = ovl_override_creds(dentry->d_sb);
        /*
@@ -1074,10 +1166,15 @@ int ovl_nlink_start(struct dentry *dentry)
         */
        err = ovl_set_nlink_upper(dentry);
        revert_creds(old_cred);
-
-out:
        if (err)
-               ovl_inode_unlock(inode);
+               goto out_drop_write;
+
+       return 0;
+
+out_drop_write:
+       ovl_drop_write(dentry);
+out_unlock:
+       ovl_inode_unlock(inode);
 
        return err;
 }
@@ -1086,6 +1183,8 @@ void ovl_nlink_end(struct dentry *dentry)
 {
        struct inode *inode = d_inode(dentry);
 
+       ovl_drop_write(dentry);
+
        if (ovl_test_flag(OVL_INDEX, inode) && inode->i_nlink == 0) {
                const struct cred *old_cred;
 
@@ -1403,6 +1502,7 @@ void ovl_copyattr(struct inode *inode)
        realinode = ovl_i_path_real(inode, &realpath);
        real_idmap = mnt_idmap(realpath.mnt);
 
+       spin_lock(&inode->i_lock);
        vfsuid = i_uid_into_vfsuid(real_idmap, realinode);
        vfsgid = i_gid_into_vfsgid(real_idmap, realinode);
 
@@ -1413,4 +1513,5 @@ void ovl_copyattr(struct inode *inode)
        inode_set_mtime_to_ts(inode, inode_get_mtime(realinode));
        inode_set_ctime_to_ts(inode, inode_get_ctime(realinode));
        i_size_write(inode, i_size_read(realinode));
+       spin_unlock(&inode->i_lock);
 }
diff --git a/fs/overlayfs/xattrs.c b/fs/overlayfs/xattrs.c
new file mode 100644 (file)
index 0000000..383978e
--- /dev/null
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/fs.h>
+#include <linux/xattr.h>
+#include "overlayfs.h"
+
+static bool ovl_is_escaped_xattr(struct super_block *sb, const char *name)
+{
+       struct ovl_fs *ofs = sb->s_fs_info;
+
+       if (ofs->config.userxattr)
+               return strncmp(name, OVL_XATTR_ESCAPE_USER_PREFIX,
+                              OVL_XATTR_ESCAPE_USER_PREFIX_LEN) == 0;
+       else
+               return strncmp(name, OVL_XATTR_ESCAPE_TRUSTED_PREFIX,
+                              OVL_XATTR_ESCAPE_TRUSTED_PREFIX_LEN - 1) == 0;
+}
+
+static bool ovl_is_own_xattr(struct super_block *sb, const char *name)
+{
+       struct ovl_fs *ofs = OVL_FS(sb);
+
+       if (ofs->config.userxattr)
+               return strncmp(name, OVL_XATTR_USER_PREFIX,
+                              OVL_XATTR_USER_PREFIX_LEN) == 0;
+       else
+               return strncmp(name, OVL_XATTR_TRUSTED_PREFIX,
+                              OVL_XATTR_TRUSTED_PREFIX_LEN) == 0;
+}
+
+bool ovl_is_private_xattr(struct super_block *sb, const char *name)
+{
+       return ovl_is_own_xattr(sb, name) && !ovl_is_escaped_xattr(sb, name);
+}
+
+static int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
+                        const void *value, size_t size, int flags)
+{
+       int err;
+       struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
+       struct dentry *upperdentry = ovl_i_dentry_upper(inode);
+       struct dentry *realdentry = upperdentry ?: ovl_dentry_lower(dentry);
+       struct path realpath;
+       const struct cred *old_cred;
+
+       if (!value && !upperdentry) {
+               ovl_path_lower(dentry, &realpath);
+               old_cred = ovl_override_creds(dentry->d_sb);
+               err = vfs_getxattr(mnt_idmap(realpath.mnt), realdentry, name, NULL, 0);
+               revert_creds(old_cred);
+               if (err < 0)
+                       goto out;
+       }
+
+       if (!upperdentry) {
+               err = ovl_copy_up(dentry);
+               if (err)
+                       goto out;
+
+               realdentry = ovl_dentry_upper(dentry);
+       }
+
+       err = ovl_want_write(dentry);
+       if (err)
+               goto out;
+
+       old_cred = ovl_override_creds(dentry->d_sb);
+       if (value) {
+               err = ovl_do_setxattr(ofs, realdentry, name, value, size,
+                                     flags);
+       } else {
+               WARN_ON(flags != XATTR_REPLACE);
+               err = ovl_do_removexattr(ofs, realdentry, name);
+       }
+       revert_creds(old_cred);
+       ovl_drop_write(dentry);
+
+       /* copy c/mtime */
+       ovl_copyattr(inode);
+out:
+       return err;
+}
+
+static int ovl_xattr_get(struct dentry *dentry, struct inode *inode, const char *name,
+                        void *value, size_t size)
+{
+       ssize_t res;
+       const struct cred *old_cred;
+       struct path realpath;
+
+       ovl_i_path_real(inode, &realpath);
+       old_cred = ovl_override_creds(dentry->d_sb);
+       res = vfs_getxattr(mnt_idmap(realpath.mnt), realpath.dentry, name, value, size);
+       revert_creds(old_cred);
+       return res;
+}
+
+static bool ovl_can_list(struct super_block *sb, const char *s)
+{
+       /* Never list private (.overlay) */
+       if (ovl_is_private_xattr(sb, s))
+               return false;
+
+       /* List all non-trusted xattrs */
+       if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0)
+               return true;
+
+       /* list other trusted for superuser only */
+       return ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN);
+}
+
+ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
+{
+       struct dentry *realdentry = ovl_dentry_real(dentry);
+       struct ovl_fs *ofs = OVL_FS(dentry->d_sb);
+       ssize_t res;
+       size_t len;
+       char *s;
+       const struct cred *old_cred;
+       size_t prefix_len, name_len;
+
+       old_cred = ovl_override_creds(dentry->d_sb);
+       res = vfs_listxattr(realdentry, list, size);
+       revert_creds(old_cred);
+       if (res <= 0 || size == 0)
+               return res;
+
+       prefix_len = ofs->config.userxattr ?
+               OVL_XATTR_USER_PREFIX_LEN : OVL_XATTR_TRUSTED_PREFIX_LEN;
+
+       /* filter out private xattrs */
+       for (s = list, len = res; len;) {
+               size_t slen = strnlen(s, len) + 1;
+
+               /* underlying fs providing us with an broken xattr list? */
+               if (WARN_ON(slen > len))
+                       return -EIO;
+
+               len -= slen;
+               if (!ovl_can_list(dentry->d_sb, s)) {
+                       res -= slen;
+                       memmove(s, s + slen, len);
+               } else if (ovl_is_escaped_xattr(dentry->d_sb, s)) {
+                       res -= OVL_XATTR_ESCAPE_PREFIX_LEN;
+                       name_len = slen - prefix_len - OVL_XATTR_ESCAPE_PREFIX_LEN;
+                       s += prefix_len;
+                       memmove(s, s + OVL_XATTR_ESCAPE_PREFIX_LEN, name_len + len);
+                       s += name_len;
+               } else {
+                       s += slen;
+               }
+       }
+
+       return res;
+}
+
+static char *ovl_xattr_escape_name(const char *prefix, const char *name)
+{
+       size_t prefix_len = strlen(prefix);
+       size_t name_len = strlen(name);
+       size_t escaped_len;
+       char *escaped, *s;
+
+       escaped_len = prefix_len + OVL_XATTR_ESCAPE_PREFIX_LEN + name_len;
+       if (escaped_len > XATTR_NAME_MAX)
+               return ERR_PTR(-EOPNOTSUPP);
+
+       escaped = kmalloc(escaped_len + 1, GFP_KERNEL);
+       if (escaped == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       s = escaped;
+       memcpy(s, prefix, prefix_len);
+       s += prefix_len;
+       memcpy(s, OVL_XATTR_ESCAPE_PREFIX, OVL_XATTR_ESCAPE_PREFIX_LEN);
+       s += OVL_XATTR_ESCAPE_PREFIX_LEN;
+       memcpy(s, name, name_len + 1);
+
+       return escaped;
+}
+
+static int ovl_own_xattr_get(const struct xattr_handler *handler,
+                            struct dentry *dentry, struct inode *inode,
+                            const char *name, void *buffer, size_t size)
+{
+       char *escaped;
+       int r;
+
+       escaped = ovl_xattr_escape_name(handler->prefix, name);
+       if (IS_ERR(escaped))
+               return PTR_ERR(escaped);
+
+       r = ovl_xattr_get(dentry, inode, escaped, buffer, size);
+
+       kfree(escaped);
+
+       return r;
+}
+
+static int ovl_own_xattr_set(const struct xattr_handler *handler,
+                            struct mnt_idmap *idmap,
+                            struct dentry *dentry, struct inode *inode,
+                            const char *name, const void *value,
+                            size_t size, int flags)
+{
+       char *escaped;
+       int r;
+
+       escaped = ovl_xattr_escape_name(handler->prefix, name);
+       if (IS_ERR(escaped))
+               return PTR_ERR(escaped);
+
+       r = ovl_xattr_set(dentry, inode, escaped, value, size, flags);
+
+       kfree(escaped);
+
+       return r;
+}
+
+static int ovl_other_xattr_get(const struct xattr_handler *handler,
+                              struct dentry *dentry, struct inode *inode,
+                              const char *name, void *buffer, size_t size)
+{
+       return ovl_xattr_get(dentry, inode, name, buffer, size);
+}
+
+static int ovl_other_xattr_set(const struct xattr_handler *handler,
+                              struct mnt_idmap *idmap,
+                              struct dentry *dentry, struct inode *inode,
+                              const char *name, const void *value,
+                              size_t size, int flags)
+{
+       return ovl_xattr_set(dentry, inode, name, value, size, flags);
+}
+
+static const struct xattr_handler ovl_own_trusted_xattr_handler = {
+       .prefix = OVL_XATTR_TRUSTED_PREFIX,
+       .get = ovl_own_xattr_get,
+       .set = ovl_own_xattr_set,
+};
+
+static const struct xattr_handler ovl_own_user_xattr_handler = {
+       .prefix = OVL_XATTR_USER_PREFIX,
+       .get = ovl_own_xattr_get,
+       .set = ovl_own_xattr_set,
+};
+
+static const struct xattr_handler ovl_other_xattr_handler = {
+       .prefix = "", /* catch all */
+       .get = ovl_other_xattr_get,
+       .set = ovl_other_xattr_set,
+};
+
+static const struct xattr_handler * const ovl_trusted_xattr_handlers[] = {
+       &ovl_own_trusted_xattr_handler,
+       &ovl_other_xattr_handler,
+       NULL
+};
+
+static const struct xattr_handler * const ovl_user_xattr_handlers[] = {
+       &ovl_own_user_xattr_handler,
+       &ovl_other_xattr_handler,
+       NULL
+};
+
+const struct xattr_handler * const *ovl_xattr_handlers(struct ovl_fs *ofs)
+{
+       return ofs->config.userxattr ? ovl_user_xattr_handlers :
+               ovl_trusted_xattr_handlers;
+}
+
index 37c28415df1e0227d52956a173e12cd6d9f6b9e5..d606e8cbcb7db2b4026675bd9cbc264834687807 100644 (file)
@@ -41,13 +41,12 @@ static struct dentry *cifs_get_parent(struct dentry *dentry)
 }
 
 const struct export_operations cifs_export_ops = {
+       .encode_fh = generic_encode_ino32_fh,
        .get_parent = cifs_get_parent,
-/*     Following five export operations are unneeded so far and can default:
-       .get_dentry =
-       .get_name =
-       .find_exported_dentry =
-       .decode_fh =
-       .encode_fs =  */
+/*
+ * Following export operations are mandatory for NFS export support:
+ *     .fh_to_dentry =
+ */
 };
 
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
index e6ba1e9b8589aaa456963f898946573a5491d460..6691ae68af0c09b1c27c0c4ff58269e57182c873 100644 (file)
@@ -366,11 +366,22 @@ static int smb1_allocate_rsp_buf(struct ksmbd_work *work)
        return 0;
 }
 
+/**
+ * set_smb1_rsp_status() - set error type in smb response header
+ * @work:      smb work containing smb response header
+ * @err:       error code to set in response
+ */
+static void set_smb1_rsp_status(struct ksmbd_work *work, __le32 err)
+{
+       work->send_no_response = 1;
+}
+
 static struct smb_version_ops smb1_server_ops = {
        .get_cmd_val = get_smb1_cmd_val,
        .init_rsp_hdr = init_smb1_rsp_hdr,
        .allocate_rsp_buf = smb1_allocate_rsp_buf,
        .check_user_session = smb1_check_user_session,
+       .set_rsp_status = set_smb1_rsp_status,
 };
 
 static int smb1_negotiate(struct ksmbd_work *work)
index 6c0305be895e56fb11464c5fe17245afc4354447..51b8bfab74813fb3f79bc85e2155eeebd84f2d79 100644 (file)
@@ -1107,6 +1107,7 @@ pass:
                struct smb_acl *pdacl;
                struct smb_sid *powner_sid = NULL, *pgroup_sid = NULL;
                int powner_sid_size = 0, pgroup_sid_size = 0, pntsd_size;
+               int pntsd_alloc_size;
 
                if (parent_pntsd->osidoffset) {
                        powner_sid = (struct smb_sid *)((char *)parent_pntsd +
@@ -1119,9 +1120,10 @@ pass:
                        pgroup_sid_size = 1 + 1 + 6 + (pgroup_sid->num_subauth * 4);
                }
 
-               pntsd = kzalloc(sizeof(struct smb_ntsd) + powner_sid_size +
-                               pgroup_sid_size + sizeof(struct smb_acl) +
-                               nt_size, GFP_KERNEL);
+               pntsd_alloc_size = sizeof(struct smb_ntsd) + powner_sid_size +
+                       pgroup_sid_size + sizeof(struct smb_acl) + nt_size;
+
+               pntsd = kzalloc(pntsd_alloc_size, GFP_KERNEL);
                if (!pntsd) {
                        rc = -ENOMEM;
                        goto free_aces_base;
@@ -1136,6 +1138,27 @@ pass:
                pntsd->gsidoffset = parent_pntsd->gsidoffset;
                pntsd->dacloffset = parent_pntsd->dacloffset;
 
+               if ((u64)le32_to_cpu(pntsd->osidoffset) + powner_sid_size >
+                   pntsd_alloc_size) {
+                       rc = -EINVAL;
+                       kfree(pntsd);
+                       goto free_aces_base;
+               }
+
+               if ((u64)le32_to_cpu(pntsd->gsidoffset) + pgroup_sid_size >
+                   pntsd_alloc_size) {
+                       rc = -EINVAL;
+                       kfree(pntsd);
+                       goto free_aces_base;
+               }
+
+               if ((u64)le32_to_cpu(pntsd->dacloffset) + sizeof(struct smb_acl) + nt_size >
+                   pntsd_alloc_size) {
+                       rc = -EINVAL;
+                       kfree(pntsd);
+                       goto free_aces_base;
+               }
+
                if (pntsd->osidoffset) {
                        struct smb_sid *owner_sid = (struct smb_sid *)((char *)pntsd +
                                        le32_to_cpu(pntsd->osidoffset));
index 1053127f71adfbaa946809da2f84a41ffd065564..c53dea5598fc63718e5df7f42e2079d5ac00497a 100644 (file)
@@ -1177,9 +1177,10 @@ static int ksmbd_vfs_lookup_in_dir(const struct path *dir, char *name,
 
 /**
  * ksmbd_vfs_kern_path_locked() - lookup a file and get path info
- * @name:      file path that is relative to share
- * @flags:     lookup flags
- * @path:      if lookup succeed, return path info
+ * @name:              file path that is relative to share
+ * @flags:             lookup flags
+ * @parent_path:       if lookup succeed, return parent_path info
+ * @path:              if lookup succeed, return path info
  * @caseless:  caseless filename lookup
  *
  * Return:     0 on success, otherwise error
index 723763746238d8b260b0b5c7dbca75495f65035f..62972f0ff8681c798f0b11e17d501373b2145bd9 100644 (file)
@@ -173,6 +173,7 @@ __le64 *squashfs_read_inode_lookup_table(struct super_block *sb,
 
 
 const struct export_operations squashfs_export_ops = {
+       .encode_fh = generic_encode_ino32_fh,
        .fh_to_dentry = squashfs_fh_to_dentry,
        .fh_to_parent = squashfs_fh_to_parent,
        .get_parent = squashfs_get_parent
index 77faad6627392629f41d141643dee7e07e539470..076392396e724e210d565e6f78b35d88613e471d 100644 (file)
@@ -2160,3 +2160,4 @@ int sb_init_dio_done_wq(struct super_block *sb)
                destroy_workqueue(wq);
        return 0;
 }
+EXPORT_SYMBOL_GPL(sb_init_dio_done_wq);
index 7af442de44c36db1cff22aab23795df6ac538fff..3b13c648d4900efeca9f17f118f2dbd0c5e1e8be 100644 (file)
@@ -725,7 +725,7 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
        struct inode *inode = d_inode(old_dentry);
        struct ubifs_inode *ui = ubifs_inode(inode);
        struct ubifs_inode *dir_ui = ubifs_inode(dir);
-       int err, sz_change = CALC_DENT_SIZE(dentry->d_name.len);
+       int err, sz_change;
        struct ubifs_budget_req req = { .new_dent = 1, .dirtied_ino = 2,
                                .dirtied_ino_d = ALIGN(ui->data_len, 8) };
        struct fscrypt_name nm;
@@ -749,6 +749,8 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
        if (err)
                return err;
 
+       sz_change = CALC_DENT_SIZE(fname_len(&nm));
+
        err = dbg_check_synced_i_size(c, inode);
        if (err)
                goto out_fname;
index 2e65fd2dbdc34d9286c8832f0684937924c20947..2d2b39f843ce9dba5caf51e6d4f9f7a335a4af0f 100644 (file)
@@ -1375,6 +1375,9 @@ static inline int mctime_update_needed(const struct inode *inode,
 /**
  * ubifs_update_time - update time of inode.
  * @inode: inode to update
+ * @time:  timespec structure to hold the current time value
+ * @flags: time updating control flag determines updating
+ *         which time fields of @inode
  *
  * This function updates time of the inode.
  */
index d69d2154645b22a998687e3076ea8ad34477d4fb..f0a5538c84b0f2be4a3b061dc5f1d84f1f024a0c 100644 (file)
@@ -1607,6 +1607,7 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode,
                                ubifs_err(c, "bad data node (block %u, inode %lu)",
                                          blk, inode->i_ino);
                                ubifs_dump_node(c, dn, dn_size);
+                               err = -EUCLEAN;
                                goto out_free;
                        }
 
index 0d0478815d4dbcf595699c0e8bb6ad7a5d21236f..09e270d6ed0258923ccc7680025fad26d1cac5c6 100644 (file)
@@ -919,8 +919,10 @@ static void free_buds(struct ubifs_info *c)
 {
        struct ubifs_bud *bud, *n;
 
-       rbtree_postorder_for_each_entry_safe(bud, n, &c->buds, rb)
+       rbtree_postorder_for_each_entry_safe(bud, n, &c->buds, rb) {
+               kfree(bud->log_hash);
                kfree(bud);
+       }
 }
 
 /**
@@ -1189,6 +1191,7 @@ static void destroy_journal(struct ubifs_info *c)
 
                bud = list_entry(c->old_buds.next, struct ubifs_bud, list);
                list_del(&bud->list);
+               kfree(bud->log_hash);
                kfree(bud);
        }
        ubifs_destroy_idx_gc(c);
index 6b7d95b65f4b635709f31d0c37c2e1e9a45cba48..f4728e65d1bda4d4838b4373de92df76f3c2a4ae 100644 (file)
@@ -65,6 +65,7 @@ static void do_insert_old_idx(struct ubifs_info *c,
                else {
                        ubifs_err(c, "old idx added twice!");
                        kfree(old_idx);
+                       return;
                }
        }
        rb_link_node(&old_idx->rb, parent, p);
index 23377c1baed9e09bd938b4d34d80d35b7e2fe8b6..a480810cd4e359f5b272b23677746fba57bddaea 100644 (file)
@@ -137,6 +137,7 @@ static struct dentry *ufs_get_parent(struct dentry *child)
 }
 
 static const struct export_operations ufs_export_ops = {
+       .encode_fh = generic_encode_ino32_fh,
        .fh_to_dentry   = ufs_fh_to_dentry,
        .fh_to_parent   = ufs_fh_to_parent,
        .get_parent     = ufs_get_parent,
index 30c931b38853c9a50d44465e78b41b7bdd993a41..be62acffad6ccdfb31748987cc507e03f08afb81 100644 (file)
@@ -21,7 +21,7 @@
 #include "xfs_bmap.h"
 #include "xfs_bmap_util.h"
 #include "xfs_bmap_btree.h"
-#include "xfs_rtalloc.h"
+#include "xfs_rtbitmap.h"
 #include "xfs_errortag.h"
 #include "xfs_error.h"
 #include "xfs_quota.h"
@@ -2989,7 +2989,7 @@ xfs_bmap_extsize_align(
         * If realtime, and the result isn't a multiple of the realtime
         * extent size we need to remove blocks until it is.
         */
-       if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
+       if (rt && (temp = xfs_extlen_to_rtxmod(mp, align_alen))) {
                /*
                 * We're not covering the original request, or
                 * we won't be able to once we fix the length.
@@ -3016,7 +3016,7 @@ xfs_bmap_extsize_align(
                else {
                        align_alen -= orig_off - align_off;
                        align_off = orig_off;
-                       align_alen -= align_alen % mp->m_sb.sb_rextsize;
+                       align_alen -= xfs_extlen_to_rtxmod(mp, align_alen);
                }
                /*
                 * Result doesn't cover the request, fail it.
@@ -4826,12 +4826,8 @@ xfs_bmap_del_extent_delay(
        ASSERT(got->br_startoff <= del->br_startoff);
        ASSERT(got_endoff >= del_endoff);
 
-       if (isrt) {
-               uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
-
-               do_div(rtexts, mp->m_sb.sb_rextsize);
-               xfs_mod_frextents(mp, rtexts);
-       }
+       if (isrt)
+               xfs_mod_frextents(mp, xfs_rtb_to_rtx(mp, del->br_blockcount));
 
        /*
         * Update the inode delalloc counter now and wait to update the
@@ -5057,33 +5053,20 @@ xfs_bmap_del_extent_real(
 
        flags = XFS_ILOG_CORE;
        if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
-               xfs_filblks_t   len;
-               xfs_extlen_t    mod;
-
-               len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize,
-                                 &mod);
-               ASSERT(mod == 0);
-
                if (!(bflags & XFS_BMAPI_REMAP)) {
-                       xfs_fsblock_t   bno;
-
-                       bno = div_u64_rem(del->br_startblock,
-                                       mp->m_sb.sb_rextsize, &mod);
-                       ASSERT(mod == 0);
-
-                       error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
+                       error = xfs_rtfree_blocks(tp, del->br_startblock,
+                                       del->br_blockcount);
                        if (error)
                                goto done;
                }
 
                do_fx = 0;
-               nblks = len * mp->m_sb.sb_rextsize;
                qfield = XFS_TRANS_DQ_RTBCOUNT;
        } else {
                do_fx = 1;
-               nblks = del->br_blockcount;
                qfield = XFS_TRANS_DQ_BCOUNT;
        }
+       nblks = del->br_blockcount;
 
        del_endblock = del->br_startblock + del->br_blockcount;
        if (cur) {
@@ -5289,7 +5272,6 @@ __xfs_bunmapi(
        int                     tmp_logflags;   /* partial logging flags */
        int                     wasdel;         /* was a delayed alloc extent */
        int                     whichfork;      /* data or attribute fork */
-       xfs_fsblock_t           sum;
        xfs_filblks_t           len = *rlen;    /* length to unmap in file */
        xfs_fileoff_t           end;
        struct xfs_iext_cursor  icur;
@@ -5384,8 +5366,8 @@ __xfs_bunmapi(
                if (!isrt)
                        goto delete;
 
-               sum = del.br_startblock + del.br_blockcount;
-               div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod);
+               mod = xfs_rtb_to_rtxoff(mp,
+                               del.br_startblock + del.br_blockcount);
                if (mod) {
                        /*
                         * Realtime extent not lined up at the end.
@@ -5432,7 +5414,8 @@ __xfs_bunmapi(
                                goto error0;
                        goto nodelete;
                }
-               div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod);
+
+               mod = xfs_rtb_to_rtxoff(mp, del.br_startblock);
                if (mod) {
                        xfs_extlen_t off = mp->m_sb.sb_rextsize - mod;
 
@@ -6209,8 +6192,8 @@ xfs_bmap_validate_extent(
                return __this_address;
 
        if (XFS_IS_REALTIME_INODE(ip) && whichfork == XFS_DATA_FORK) {
-               if (!xfs_verify_rtext(mp, irec->br_startblock,
-                                         irec->br_blockcount))
+               if (!xfs_verify_rtbext(mp, irec->br_startblock,
+                                          irec->br_blockcount))
                        return __this_address;
        } else {
                if (!xfs_verify_fsbext(mp, irec->br_startblock,
index 371dc07233e059a95545629107d409f21653dbe4..9a88aba1589f87b45c470c8a8ec9a3a559a4c86d 100644 (file)
@@ -98,7 +98,7 @@ typedef struct xfs_sb {
        uint32_t        sb_blocksize;   /* logical block size, bytes */
        xfs_rfsblock_t  sb_dblocks;     /* number of data blocks */
        xfs_rfsblock_t  sb_rblocks;     /* number of realtime blocks */
-       xfs_rtblock_t   sb_rextents;    /* number of realtime extents */
+       xfs_rtbxlen_t   sb_rextents;    /* number of realtime extents */
        uuid_t          sb_uuid;        /* user-visible file system unique id */
        xfs_fsblock_t   sb_logstart;    /* starting block of log if internal */
        xfs_ino_t       sb_rootino;     /* root inode number */
@@ -690,6 +690,22 @@ struct xfs_agfl {
            ASSERT(xfs_daddr_to_agno(mp, d) == \
                   xfs_daddr_to_agno(mp, (d) + (len) - 1)))
 
+/*
+ * Realtime bitmap information is accessed by the word, which is currently
+ * stored in host-endian format.
+ */
+union xfs_rtword_raw {
+       __u32           old;
+};
+
+/*
+ * Realtime summary counts are accessed by the word, which is currently
+ * stored in host-endian format.
+ */
+union xfs_suminfo_raw {
+       __u32           old;
+};
+
 /*
  * XFS Timestamps
  * ==============
@@ -1142,24 +1158,10 @@ static inline bool xfs_dinode_has_large_extent_counts(
 
 #define        XFS_BLOCKSIZE(mp)       ((mp)->m_sb.sb_blocksize)
 #define        XFS_BLOCKMASK(mp)       ((mp)->m_blockmask)
-#define        XFS_BLOCKWSIZE(mp)      ((mp)->m_blockwsize)
-#define        XFS_BLOCKWMASK(mp)      ((mp)->m_blockwmask)
 
 /*
- * RT Summary and bit manipulation macros.
+ * RT bit manipulation macros.
  */
-#define        XFS_SUMOFFS(mp,ls,bb)   ((int)((ls) * (mp)->m_sb.sb_rbmblocks + (bb)))
-#define        XFS_SUMOFFSTOBLOCK(mp,s)        \
-       (((s) * (uint)sizeof(xfs_suminfo_t)) >> (mp)->m_sb.sb_blocklog)
-#define        XFS_SUMPTR(mp,bp,so)    \
-       ((xfs_suminfo_t *)((bp)->b_addr + \
-               (((so) * (uint)sizeof(xfs_suminfo_t)) & XFS_BLOCKMASK(mp))))
-
-#define        XFS_BITTOBLOCK(mp,bi)   ((bi) >> (mp)->m_blkbit_log)
-#define        XFS_BLOCKTOBIT(mp,bb)   ((bb) << (mp)->m_blkbit_log)
-#define        XFS_BITTOWORD(mp,bi)    \
-       ((int)(((bi) >> XFS_NBWORDLOG) & XFS_BLOCKWMASK(mp)))
-
 #define        XFS_RTMIN(a,b)  ((a) < (b) ? (a) : (b))
 #define        XFS_RTMAX(a,b)  ((a) > (b) ? (a) : (b))
 
index 396648acb5be169dc54f98c657780a34d164f1f3..c269d704314d7d5afc0497985f46c936ea275f27 100644 (file)
@@ -16,6 +16,7 @@
 #include "xfs_trans.h"
 #include "xfs_rtalloc.h"
 #include "xfs_error.h"
+#include "xfs_rtbitmap.h"
 
 /*
  * Realtime allocator bitmap functions shared with userspace.
@@ -46,25 +47,69 @@ const struct xfs_buf_ops xfs_rtbuf_ops = {
        .verify_write = xfs_rtbuf_verify_write,
 };
 
+/* Release cached rt bitmap and summary buffers. */
+void
+xfs_rtbuf_cache_relse(
+       struct xfs_rtalloc_args *args)
+{
+       if (args->rbmbp) {
+               xfs_trans_brelse(args->tp, args->rbmbp);
+               args->rbmbp = NULL;
+               args->rbmoff = NULLFILEOFF;
+       }
+       if (args->sumbp) {
+               xfs_trans_brelse(args->tp, args->sumbp);
+               args->sumbp = NULL;
+               args->sumoff = NULLFILEOFF;
+       }
+}
+
 /*
  * Get a buffer for the bitmap or summary file block specified.
  * The buffer is returned read and locked.
  */
 int
 xfs_rtbuf_get(
-       xfs_mount_t     *mp,            /* file system mount structure */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   block,          /* block number in bitmap or summary */
-       int             issum,          /* is summary not bitmap */
-       struct xfs_buf  **bpp)          /* output: buffer for the block */
+       struct xfs_rtalloc_args *args,
+       xfs_fileoff_t           block,  /* block number in bitmap or summary */
+       int                     issum)  /* is summary not bitmap */
 {
-       struct xfs_buf  *bp;            /* block buffer, result */
-       xfs_inode_t     *ip;            /* bitmap or summary inode */
-       xfs_bmbt_irec_t map;
-       int             nmap = 1;
-       int             error;          /* error value */
+       struct xfs_mount        *mp = args->mp;
+       struct xfs_buf          **cbpp; /* cached block buffer */
+       xfs_fileoff_t           *coffp; /* cached block number */
+       struct xfs_buf          *bp;    /* block buffer, result */
+       struct xfs_inode        *ip;    /* bitmap or summary inode */
+       struct xfs_bmbt_irec    map;
+       enum xfs_blft           type;
+       int                     nmap = 1;
+       int                     error;
 
-       ip = issum ? mp->m_rsumip : mp->m_rbmip;
+       if (issum) {
+               cbpp = &args->sumbp;
+               coffp = &args->sumoff;
+               ip = mp->m_rsumip;
+               type = XFS_BLFT_RTSUMMARY_BUF;
+       } else {
+               cbpp = &args->rbmbp;
+               coffp = &args->rbmoff;
+               ip = mp->m_rbmip;
+               type = XFS_BLFT_RTBITMAP_BUF;
+       }
+
+       /*
+        * If we have a cached buffer, and the block number matches, use that.
+        */
+       if (*cbpp && *coffp == block)
+               return 0;
+
+       /*
+        * Otherwise we have to have to get the buffer.  If there was an old
+        * one, get rid of it first.
+        */
+       if (*cbpp) {
+               xfs_trans_brelse(args->tp, *cbpp);
+               *cbpp = NULL;
+       }
 
        error = xfs_bmapi_read(ip, block, 1, &map, &nmap, 0);
        if (error)
@@ -74,15 +119,15 @@ xfs_rtbuf_get(
                return -EFSCORRUPTED;
 
        ASSERT(map.br_startblock != NULLFSBLOCK);
-       error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
+       error = xfs_trans_read_buf(mp, args->tp, mp->m_ddev_targp,
                                   XFS_FSB_TO_DADDR(mp, map.br_startblock),
                                   mp->m_bsize, 0, &bp, &xfs_rtbuf_ops);
        if (error)
                return error;
 
-       xfs_trans_buf_set_type(tp, bp, issum ? XFS_BLFT_RTSUMMARY_BUF
-                                            : XFS_BLFT_RTBITMAP_BUF);
-       *bpp = bp;
+       xfs_trans_buf_set_type(args->tp, bp, type);
+       *cbpp = bp;
+       *coffp = block;
        return 0;
 }
 
@@ -92,47 +137,44 @@ xfs_rtbuf_get(
  */
 int
 xfs_rtfind_back(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   start,          /* starting block to look at */
-       xfs_rtblock_t   limit,          /* last block to look at */
-       xfs_rtblock_t   *rtblock)       /* out: start block found */
+       struct xfs_rtalloc_args *args,
+       xfs_rtxnum_t            start,  /* starting rtext to look at */
+       xfs_rtxnum_t            limit,  /* last rtext to look at */
+       xfs_rtxnum_t            *rtx)   /* out: start rtext found */
 {
-       xfs_rtword_t    *b;             /* current word in buffer */
-       int             bit;            /* bit number in the word */
-       xfs_rtblock_t   block;          /* bitmap block number */
-       struct xfs_buf  *bp;            /* buf for the block */
-       xfs_rtword_t    *bufp;          /* starting word in buffer */
-       int             error;          /* error value */
-       xfs_rtblock_t   firstbit;       /* first useful bit in the word */
-       xfs_rtblock_t   i;              /* current bit number rel. to start */
-       xfs_rtblock_t   len;            /* length of inspected area */
-       xfs_rtword_t    mask;           /* mask of relevant bits for value */
-       xfs_rtword_t    want;           /* mask for "good" values */
-       xfs_rtword_t    wdiff;          /* difference from wanted value */
-       int             word;           /* word number in the buffer */
+       struct xfs_mount        *mp = args->mp;
+       int                     bit;    /* bit number in the word */
+       xfs_fileoff_t           block;  /* bitmap block number */
+       int                     error;  /* error value */
+       xfs_rtxnum_t            firstbit; /* first useful bit in the word */
+       xfs_rtxnum_t            i;      /* current bit number rel. to start */
+       xfs_rtxnum_t            len;    /* length of inspected area */
+       xfs_rtword_t            mask;   /* mask of relevant bits for value */
+       xfs_rtword_t            want;   /* mask for "good" values */
+       xfs_rtword_t            wdiff;  /* difference from wanted value */
+       xfs_rtword_t            incore;
+       unsigned int            word;   /* word number in the buffer */
 
        /*
         * Compute and read in starting bitmap block for starting block.
         */
-       block = XFS_BITTOBLOCK(mp, start);
-       error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
-       if (error) {
+       block = xfs_rtx_to_rbmblock(mp, start);
+       error = xfs_rtbitmap_read_buf(args, block);
+       if (error)
                return error;
-       }
-       bufp = bp->b_addr;
+
        /*
         * Get the first word's index & point to it.
         */
-       word = XFS_BITTOWORD(mp, start);
-       b = &bufp[word];
+       word = xfs_rtx_to_rbmword(mp, start);
        bit = (int)(start & (XFS_NBWORD - 1));
        len = start - limit + 1;
        /*
         * Compute match value, based on the bit at start: if 1 (free)
         * then all-ones, else all-zeroes.
         */
-       want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
+       incore = xfs_rtbitmap_getword(args, word);
+       want = (incore & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
        /*
         * If the starting position is not word-aligned, deal with the
         * partial word.
@@ -149,13 +191,12 @@ xfs_rtfind_back(
                 * Calculate the difference between the value there
                 * and what we're looking for.
                 */
-               if ((wdiff = (*b ^ want) & mask)) {
+               if ((wdiff = (incore ^ want) & mask)) {
                        /*
                         * Different.  Mark where we are and return.
                         */
-                       xfs_trans_brelse(tp, bp);
                        i = bit - XFS_RTHIBIT(wdiff);
-                       *rtblock = start - i + 1;
+                       *rtx = start - i + 1;
                        return 0;
                }
                i = bit - firstbit + 1;
@@ -167,19 +208,11 @@ xfs_rtfind_back(
                        /*
                         * If done with this block, get the previous one.
                         */
-                       xfs_trans_brelse(tp, bp);
-                       error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
-                       if (error) {
+                       error = xfs_rtbitmap_read_buf(args, --block);
+                       if (error)
                                return error;
-                       }
-                       bufp = bp->b_addr;
-                       word = XFS_BLOCKWMASK(mp);
-                       b = &bufp[word];
-               } else {
-                       /*
-                        * Go on to the previous word in the buffer.
-                        */
-                       b--;
+
+                       word = mp->m_blockwsize - 1;
                }
        } else {
                /*
@@ -195,13 +228,13 @@ xfs_rtfind_back(
                /*
                 * Compute difference between actual and desired value.
                 */
-               if ((wdiff = *b ^ want)) {
+               incore = xfs_rtbitmap_getword(args, word);
+               if ((wdiff = incore ^ want)) {
                        /*
                         * Different, mark where we are and return.
                         */
-                       xfs_trans_brelse(tp, bp);
                        i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff);
-                       *rtblock = start - i + 1;
+                       *rtx = start - i + 1;
                        return 0;
                }
                i += XFS_NBWORD;
@@ -213,19 +246,11 @@ xfs_rtfind_back(
                        /*
                         * If done with this block, get the previous one.
                         */
-                       xfs_trans_brelse(tp, bp);
-                       error = xfs_rtbuf_get(mp, tp, --block, 0, &bp);
-                       if (error) {
+                       error = xfs_rtbitmap_read_buf(args, --block);
+                       if (error)
                                return error;
-                       }
-                       bufp = bp->b_addr;
-                       word = XFS_BLOCKWMASK(mp);
-                       b = &bufp[word];
-               } else {
-                       /*
-                        * Go on to the previous word in the buffer.
-                        */
-                       b--;
+
+                       word = mp->m_blockwsize - 1;
                }
        }
        /*
@@ -242,13 +267,13 @@ xfs_rtfind_back(
                /*
                 * Compute difference between actual and desired value.
                 */
-               if ((wdiff = (*b ^ want) & mask)) {
+               incore = xfs_rtbitmap_getword(args, word);
+               if ((wdiff = (incore ^ want) & mask)) {
                        /*
                         * Different, mark where we are and return.
                         */
-                       xfs_trans_brelse(tp, bp);
                        i += XFS_NBWORD - 1 - XFS_RTHIBIT(wdiff);
-                       *rtblock = start - i + 1;
+                       *rtx = start - i + 1;
                        return 0;
                } else
                        i = len;
@@ -256,8 +281,7 @@ xfs_rtfind_back(
        /*
         * No match, return that we scanned the whole area.
         */
-       xfs_trans_brelse(tp, bp);
-       *rtblock = start - i + 1;
+       *rtx = start - i + 1;
        return 0;
 }
 
@@ -267,47 +291,44 @@ xfs_rtfind_back(
  */
 int
 xfs_rtfind_forw(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   start,          /* starting block to look at */
-       xfs_rtblock_t   limit,          /* last block to look at */
-       xfs_rtblock_t   *rtblock)       /* out: start block found */
+       struct xfs_rtalloc_args *args,
+       xfs_rtxnum_t            start,  /* starting rtext to look at */
+       xfs_rtxnum_t            limit,  /* last rtext to look at */
+       xfs_rtxnum_t            *rtx)   /* out: start rtext found */
 {
-       xfs_rtword_t    *b;             /* current word in buffer */
-       int             bit;            /* bit number in the word */
-       xfs_rtblock_t   block;          /* bitmap block number */
-       struct xfs_buf  *bp;            /* buf for the block */
-       xfs_rtword_t    *bufp;          /* starting word in buffer */
-       int             error;          /* error value */
-       xfs_rtblock_t   i;              /* current bit number rel. to start */
-       xfs_rtblock_t   lastbit;        /* last useful bit in the word */
-       xfs_rtblock_t   len;            /* length of inspected area */
-       xfs_rtword_t    mask;           /* mask of relevant bits for value */
-       xfs_rtword_t    want;           /* mask for "good" values */
-       xfs_rtword_t    wdiff;          /* difference from wanted value */
-       int             word;           /* word number in the buffer */
+       struct xfs_mount        *mp = args->mp;
+       int                     bit;    /* bit number in the word */
+       xfs_fileoff_t           block;  /* bitmap block number */
+       int                     error;
+       xfs_rtxnum_t            i;      /* current bit number rel. to start */
+       xfs_rtxnum_t            lastbit;/* last useful bit in the word */
+       xfs_rtxnum_t            len;    /* length of inspected area */
+       xfs_rtword_t            mask;   /* mask of relevant bits for value */
+       xfs_rtword_t            want;   /* mask for "good" values */
+       xfs_rtword_t            wdiff;  /* difference from wanted value */
+       xfs_rtword_t            incore;
+       unsigned int            word;   /* word number in the buffer */
 
        /*
         * Compute and read in starting bitmap block for starting block.
         */
-       block = XFS_BITTOBLOCK(mp, start);
-       error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
-       if (error) {
+       block = xfs_rtx_to_rbmblock(mp, start);
+       error = xfs_rtbitmap_read_buf(args, block);
+       if (error)
                return error;
-       }
-       bufp = bp->b_addr;
+
        /*
         * Get the first word's index & point to it.
         */
-       word = XFS_BITTOWORD(mp, start);
-       b = &bufp[word];
+       word = xfs_rtx_to_rbmword(mp, start);
        bit = (int)(start & (XFS_NBWORD - 1));
        len = limit - start + 1;
        /*
         * Compute match value, based on the bit at start: if 1 (free)
         * then all-ones, else all-zeroes.
         */
-       want = (*b & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
+       incore = xfs_rtbitmap_getword(args, word);
+       want = (incore & ((xfs_rtword_t)1 << bit)) ? -1 : 0;
        /*
         * If the starting position is not word-aligned, deal with the
         * partial word.
@@ -323,13 +344,12 @@ xfs_rtfind_forw(
                 * Calculate the difference between the value there
                 * and what we're looking for.
                 */
-               if ((wdiff = (*b ^ want) & mask)) {
+               if ((wdiff = (incore ^ want) & mask)) {
                        /*
                         * Different.  Mark where we are and return.
                         */
-                       xfs_trans_brelse(tp, bp);
                        i = XFS_RTLOBIT(wdiff) - bit;
-                       *rtblock = start + i - 1;
+                       *rtx = start + i - 1;
                        return 0;
                }
                i = lastbit - bit;
@@ -337,22 +357,15 @@ xfs_rtfind_forw(
                 * Go on to next block if that's where the next word is
                 * and we need the next word.
                 */
-               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+               if (++word == mp->m_blockwsize && i < len) {
                        /*
                         * If done with this block, get the previous one.
                         */
-                       xfs_trans_brelse(tp, bp);
-                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
-                       if (error) {
+                       error = xfs_rtbitmap_read_buf(args, ++block);
+                       if (error)
                                return error;
-                       }
-                       b = bufp = bp->b_addr;
+
                        word = 0;
-               } else {
-                       /*
-                        * Go on to the previous word in the buffer.
-                        */
-                       b++;
                }
        } else {
                /*
@@ -368,13 +381,13 @@ xfs_rtfind_forw(
                /*
                 * Compute difference between actual and desired value.
                 */
-               if ((wdiff = *b ^ want)) {
+               incore = xfs_rtbitmap_getword(args, word);
+               if ((wdiff = incore ^ want)) {
                        /*
                         * Different, mark where we are and return.
                         */
-                       xfs_trans_brelse(tp, bp);
                        i += XFS_RTLOBIT(wdiff);
-                       *rtblock = start + i - 1;
+                       *rtx = start + i - 1;
                        return 0;
                }
                i += XFS_NBWORD;
@@ -382,22 +395,15 @@ xfs_rtfind_forw(
                 * Go on to next block if that's where the next word is
                 * and we need the next word.
                 */
-               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+               if (++word == mp->m_blockwsize && i < len) {
                        /*
                         * If done with this block, get the next one.
                         */
-                       xfs_trans_brelse(tp, bp);
-                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
-                       if (error) {
+                       error = xfs_rtbitmap_read_buf(args, ++block);
+                       if (error)
                                return error;
-                       }
-                       b = bufp = bp->b_addr;
+
                        word = 0;
-               } else {
-                       /*
-                        * Go on to the next word in the buffer.
-                        */
-                       b++;
                }
        }
        /*
@@ -412,13 +418,13 @@ xfs_rtfind_forw(
                /*
                 * Compute difference between actual and desired value.
                 */
-               if ((wdiff = (*b ^ want) & mask)) {
+               incore = xfs_rtbitmap_getword(args, word);
+               if ((wdiff = (incore ^ want) & mask)) {
                        /*
                         * Different, mark where we are and return.
                         */
-                       xfs_trans_brelse(tp, bp);
                        i += XFS_RTLOBIT(wdiff);
-                       *rtblock = start + i - 1;
+                       *rtx = start + i - 1;
                        return 0;
                } else
                        i = len;
@@ -426,11 +432,25 @@ xfs_rtfind_forw(
        /*
         * No match, return that we scanned the whole area.
         */
-       xfs_trans_brelse(tp, bp);
-       *rtblock = start + i - 1;
+       *rtx = start + i - 1;
        return 0;
 }
 
+/* Log rtsummary counter at @infoword. */
+static inline void
+xfs_trans_log_rtsummary(
+       struct xfs_rtalloc_args *args,
+       unsigned int            infoword)
+{
+       struct xfs_buf          *bp = args->sumbp;
+       size_t                  first, last;
+
+       first = (void *)xfs_rsumblock_infoptr(args, infoword) - bp->b_addr;
+       last = first + sizeof(xfs_suminfo_t) - 1;
+
+       xfs_trans_log_buf(args->tp, bp, first, last);
+}
+
 /*
  * Read and/or modify the summary information for a given extent size,
  * bitmap block combination.
@@ -442,86 +462,77 @@ xfs_rtfind_forw(
  */
 int
 xfs_rtmodify_summary_int(
-       xfs_mount_t     *mp,            /* file system mount structure */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       int             log,            /* log2 of extent size */
-       xfs_rtblock_t   bbno,           /* bitmap block number */
-       int             delta,          /* change to make to summary info */
-       struct xfs_buf  **rbpp,         /* in/out: summary block buffer */
-       xfs_fsblock_t   *rsb,           /* in/out: summary block number */
-       xfs_suminfo_t   *sum)           /* out: summary info for this block */
+       struct xfs_rtalloc_args *args,
+       int                     log,    /* log2 of extent size */
+       xfs_fileoff_t           bbno,   /* bitmap block number */
+       int                     delta,  /* change to make to summary info */
+       xfs_suminfo_t           *sum)   /* out: summary info for this block */
 {
-       struct xfs_buf  *bp;            /* buffer for the summary block */
-       int             error;          /* error value */
-       xfs_fsblock_t   sb;             /* summary fsblock */
-       int             so;             /* index into the summary file */
-       xfs_suminfo_t   *sp;            /* pointer to returned data */
+       struct xfs_mount        *mp = args->mp;
+       int                     error;
+       xfs_fileoff_t           sb;     /* summary fsblock */
+       xfs_rtsumoff_t          so;     /* index into the summary file */
+       unsigned int            infoword;
 
        /*
         * Compute entry number in the summary file.
         */
-       so = XFS_SUMOFFS(mp, log, bbno);
+       so = xfs_rtsumoffs(mp, log, bbno);
        /*
         * Compute the block number in the summary file.
         */
-       sb = XFS_SUMOFFSTOBLOCK(mp, so);
-       /*
-        * If we have an old buffer, and the block number matches, use that.
-        */
-       if (*rbpp && *rsb == sb)
-               bp = *rbpp;
-       /*
-        * Otherwise we have to get the buffer.
-        */
-       else {
-               /*
-                * If there was an old one, get rid of it first.
-                */
-               if (*rbpp)
-                       xfs_trans_brelse(tp, *rbpp);
-               error = xfs_rtbuf_get(mp, tp, sb, 1, &bp);
-               if (error) {
-                       return error;
-               }
-               /*
-                * Remember this buffer and block for the next call.
-                */
-               *rbpp = bp;
-               *rsb = sb;
-       }
+       sb = xfs_rtsumoffs_to_block(mp, so);
+
+       error = xfs_rtsummary_read_buf(args, sb);
+       if (error)
+               return error;
+
        /*
         * Point to the summary information, modify/log it, and/or copy it out.
         */
-       sp = XFS_SUMPTR(mp, bp, so);
+       infoword = xfs_rtsumoffs_to_infoword(mp, so);
        if (delta) {
-               uint first = (uint)((char *)sp - (char *)bp->b_addr);
+               xfs_suminfo_t   val = xfs_suminfo_add(args, infoword, delta);
 
-               *sp += delta;
                if (mp->m_rsum_cache) {
-                       if (*sp == 0 && log == mp->m_rsum_cache[bbno])
-                               mp->m_rsum_cache[bbno]++;
-                       if (*sp != 0 && log < mp->m_rsum_cache[bbno])
+                       if (val == 0 && log + 1 == mp->m_rsum_cache[bbno])
                                mp->m_rsum_cache[bbno] = log;
+                       if (val != 0 && log >= mp->m_rsum_cache[bbno])
+                               mp->m_rsum_cache[bbno] = log + 1;
                }
-               xfs_trans_log_buf(tp, bp, first, first + sizeof(*sp) - 1);
+               xfs_trans_log_rtsummary(args, infoword);
+               if (sum)
+                       *sum = val;
+       } else if (sum) {
+               *sum = xfs_suminfo_get(args, infoword);
        }
-       if (sum)
-               *sum = *sp;
        return 0;
 }
 
 int
 xfs_rtmodify_summary(
-       xfs_mount_t     *mp,            /* file system mount structure */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       int             log,            /* log2 of extent size */
-       xfs_rtblock_t   bbno,           /* bitmap block number */
-       int             delta,          /* change to make to summary info */
-       struct xfs_buf  **rbpp,         /* in/out: summary block buffer */
-       xfs_fsblock_t   *rsb)           /* in/out: summary block number */
+       struct xfs_rtalloc_args *args,
+       int                     log,    /* log2 of extent size */
+       xfs_fileoff_t           bbno,   /* bitmap block number */
+       int                     delta)  /* in/out: summary block number */
+{
+       return xfs_rtmodify_summary_int(args, log, bbno, delta, NULL);
+}
+
+/* Log rtbitmap block from the word @from to the byte before @next. */
+static inline void
+xfs_trans_log_rtbitmap(
+       struct xfs_rtalloc_args *args,
+       unsigned int            from,
+       unsigned int            next)
 {
-       return xfs_rtmodify_summary_int(mp, tp, log, bbno,
-                                       delta, rbpp, rsb, NULL);
+       struct xfs_buf          *bp = args->rbmbp;
+       size_t                  first, last;
+
+       first = (void *)xfs_rbmblock_wordptr(args, from) - bp->b_addr;
+       last = ((void *)xfs_rbmblock_wordptr(args, next) - 1) - bp->b_addr;
+
+       xfs_trans_log_buf(args->tp, bp, first, last);
 }
 
 /*
@@ -530,41 +541,37 @@ xfs_rtmodify_summary(
  */
 int
 xfs_rtmodify_range(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   start,          /* starting block to modify */
-       xfs_extlen_t    len,            /* length of extent to modify */
-       int             val)            /* 1 for free, 0 for allocated */
+       struct xfs_rtalloc_args *args,
+       xfs_rtxnum_t            start,  /* starting rtext to modify */
+       xfs_rtxlen_t            len,    /* length of extent to modify */
+       int                     val)    /* 1 for free, 0 for allocated */
 {
-       xfs_rtword_t    *b;             /* current word in buffer */
-       int             bit;            /* bit number in the word */
-       xfs_rtblock_t   block;          /* bitmap block number */
-       struct xfs_buf  *bp;            /* buf for the block */
-       xfs_rtword_t    *bufp;          /* starting word in buffer */
-       int             error;          /* error value */
-       xfs_rtword_t    *first;         /* first used word in the buffer */
-       int             i;              /* current bit number rel. to start */
-       int             lastbit;        /* last useful bit in word */
-       xfs_rtword_t    mask;           /* mask o frelevant bits for value */
-       int             word;           /* word number in the buffer */
+       struct xfs_mount        *mp = args->mp;
+       int                     bit;    /* bit number in the word */
+       xfs_fileoff_t           block;  /* bitmap block number */
+       int                     error;
+       int                     i;      /* current bit number rel. to start */
+       int                     lastbit; /* last useful bit in word */
+       xfs_rtword_t            mask;    /* mask of relevant bits for value */
+       xfs_rtword_t            incore;
+       unsigned int            firstword; /* first word used in the buffer */
+       unsigned int            word;   /* word number in the buffer */
 
        /*
         * Compute starting bitmap block number.
         */
-       block = XFS_BITTOBLOCK(mp, start);
+       block = xfs_rtx_to_rbmblock(mp, start);
        /*
         * Read the bitmap block, and point to its data.
         */
-       error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
-       if (error) {
+       error = xfs_rtbitmap_read_buf(args, block);
+       if (error)
                return error;
-       }
-       bufp = bp->b_addr;
+
        /*
         * Compute the starting word's address, and starting bit.
         */
-       word = XFS_BITTOWORD(mp, start);
-       first = b = &bufp[word];
+       firstword = word = xfs_rtx_to_rbmword(mp, start);
        bit = (int)(start & (XFS_NBWORD - 1));
        /*
         * 0 (allocated) => all zeroes; 1 (free) => all ones.
@@ -583,34 +590,28 @@ xfs_rtmodify_range(
                /*
                 * Set/clear the active bits.
                 */
+               incore = xfs_rtbitmap_getword(args, word);
                if (val)
-                       *b |= mask;
+                       incore |= mask;
                else
-                       *b &= ~mask;
+                       incore &= ~mask;
+               xfs_rtbitmap_setword(args, word, incore);
                i = lastbit - bit;
                /*
                 * Go on to the next block if that's where the next word is
                 * and we need the next word.
                 */
-               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+               if (++word == mp->m_blockwsize && i < len) {
                        /*
                         * Log the changed part of this block.
                         * Get the next one.
                         */
-                       xfs_trans_log_buf(tp, bp,
-                               (uint)((char *)first - (char *)bufp),
-                               (uint)((char *)b - (char *)bufp));
-                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
-                       if (error) {
+                       xfs_trans_log_rtbitmap(args, firstword, word);
+                       error = xfs_rtbitmap_read_buf(args, ++block);
+                       if (error)
                                return error;
-                       }
-                       first = b = bufp = bp->b_addr;
-                       word = 0;
-               } else {
-                       /*
-                        * Go on to the next word in the buffer
-                        */
-                       b++;
+
+                       firstword = word = 0;
                }
        } else {
                /*
@@ -626,31 +627,23 @@ xfs_rtmodify_range(
                /*
                 * Set the word value correctly.
                 */
-               *b = val;
+               xfs_rtbitmap_setword(args, word, val);
                i += XFS_NBWORD;
                /*
                 * Go on to the next block if that's where the next word is
                 * and we need the next word.
                 */
-               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+               if (++word == mp->m_blockwsize && i < len) {
                        /*
                         * Log the changed part of this block.
                         * Get the next one.
                         */
-                       xfs_trans_log_buf(tp, bp,
-                               (uint)((char *)first - (char *)bufp),
-                               (uint)((char *)b - (char *)bufp));
-                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
-                       if (error) {
+                       xfs_trans_log_rtbitmap(args, firstword, word);
+                       error = xfs_rtbitmap_read_buf(args, ++block);
+                       if (error)
                                return error;
-                       }
-                       first = b = bufp = bp->b_addr;
-                       word = 0;
-               } else {
-                       /*
-                        * Go on to the next word in the buffer
-                        */
-                       b++;
+
+                       firstword = word = 0;
                }
        }
        /*
@@ -665,18 +658,19 @@ xfs_rtmodify_range(
                /*
                 * Set/clear the active bits.
                 */
+               incore = xfs_rtbitmap_getword(args, word);
                if (val)
-                       *b |= mask;
+                       incore |= mask;
                else
-                       *b &= ~mask;
-               b++;
+                       incore &= ~mask;
+               xfs_rtbitmap_setword(args, word, incore);
+               word++;
        }
        /*
         * Log any remaining changed bytes.
         */
-       if (b > first)
-               xfs_trans_log_buf(tp, bp, (uint)((char *)first - (char *)bufp),
-                       (uint)((char *)b - (char *)bufp - 1));
+       if (word > firstword)
+               xfs_trans_log_rtbitmap(args, firstword, word);
        return 0;
 }
 
@@ -686,23 +680,21 @@ xfs_rtmodify_range(
  */
 int
 xfs_rtfree_range(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   start,          /* starting block to free */
-       xfs_extlen_t    len,            /* length to free */
-       struct xfs_buf  **rbpp,         /* in/out: summary block buffer */
-       xfs_fsblock_t   *rsb)           /* in/out: summary block number */
+       struct xfs_rtalloc_args *args,
+       xfs_rtxnum_t            start,  /* starting rtext to free */
+       xfs_rtxlen_t            len)    /* in/out: summary block number */
 {
-       xfs_rtblock_t   end;            /* end of the freed extent */
-       int             error;          /* error value */
-       xfs_rtblock_t   postblock;      /* first block freed > end */
-       xfs_rtblock_t   preblock;       /* first block freed < start */
+       struct xfs_mount        *mp = args->mp;
+       xfs_rtxnum_t            end;    /* end of the freed extent */
+       int                     error;  /* error value */
+       xfs_rtxnum_t            postblock; /* first rtext freed > end */
+       xfs_rtxnum_t            preblock;  /* first rtext freed < start */
 
        end = start + len - 1;
        /*
         * Modify the bitmap to mark this extent freed.
         */
-       error = xfs_rtmodify_range(mp, tp, start, len, 1);
+       error = xfs_rtmodify_range(args, start, len, 1);
        if (error) {
                return error;
        }
@@ -711,15 +703,15 @@ xfs_rtfree_range(
         * We need to find the beginning and end of the extent so we can
         * properly update the summary.
         */
-       error = xfs_rtfind_back(mp, tp, start, 0, &preblock);
+       error = xfs_rtfind_back(args, start, 0, &preblock);
        if (error) {
                return error;
        }
        /*
         * Find the next allocated block (end of allocated extent).
         */
-       error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
-               &postblock);
+       error = xfs_rtfind_forw(args, end, mp->m_sb.sb_rextents - 1,
+                       &postblock);
        if (error)
                return error;
        /*
@@ -727,9 +719,9 @@ xfs_rtfree_range(
         * old extent, add summary data for them to be allocated.
         */
        if (preblock < start) {
-               error = xfs_rtmodify_summary(mp, tp,
-                       XFS_RTBLOCKLOG(start - preblock),
-                       XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb);
+               error = xfs_rtmodify_summary(args,
+                               XFS_RTBLOCKLOG(start - preblock),
+                               xfs_rtx_to_rbmblock(mp, preblock), -1);
                if (error) {
                        return error;
                }
@@ -739,9 +731,9 @@ xfs_rtfree_range(
         * old extent, add summary data for them to be allocated.
         */
        if (postblock > end) {
-               error = xfs_rtmodify_summary(mp, tp,
-                       XFS_RTBLOCKLOG(postblock - end),
-                       XFS_BITTOBLOCK(mp, end + 1), -1, rbpp, rsb);
+               error = xfs_rtmodify_summary(args,
+                               XFS_RTBLOCKLOG(postblock - end),
+                               xfs_rtx_to_rbmblock(mp, end + 1), -1);
                if (error) {
                        return error;
                }
@@ -750,10 +742,9 @@ xfs_rtfree_range(
         * Increment the summary information corresponding to the entire
         * (new) free extent.
         */
-       error = xfs_rtmodify_summary(mp, tp,
-               XFS_RTBLOCKLOG(postblock + 1 - preblock),
-               XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb);
-       return error;
+       return xfs_rtmodify_summary(args,
+                       XFS_RTBLOCKLOG(postblock + 1 - preblock),
+                       xfs_rtx_to_rbmblock(mp, preblock), 1);
 }
 
 /*
@@ -762,43 +753,39 @@ xfs_rtfree_range(
  */
 int
 xfs_rtcheck_range(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   start,          /* starting block number of extent */
-       xfs_extlen_t    len,            /* length of extent */
-       int             val,            /* 1 for free, 0 for allocated */
-       xfs_rtblock_t   *new,           /* out: first block not matching */
-       int             *stat)          /* out: 1 for matches, 0 for not */
+       struct xfs_rtalloc_args *args,
+       xfs_rtxnum_t            start,  /* starting rtext number of extent */
+       xfs_rtxlen_t            len,    /* length of extent */
+       int                     val,    /* 1 for free, 0 for allocated */
+       xfs_rtxnum_t            *new,   /* out: first rtext not matching */
+       int                     *stat)  /* out: 1 for matches, 0 for not */
 {
-       xfs_rtword_t    *b;             /* current word in buffer */
-       int             bit;            /* bit number in the word */
-       xfs_rtblock_t   block;          /* bitmap block number */
-       struct xfs_buf  *bp;            /* buf for the block */
-       xfs_rtword_t    *bufp;          /* starting word in buffer */
-       int             error;          /* error value */
-       xfs_rtblock_t   i;              /* current bit number rel. to start */
-       xfs_rtblock_t   lastbit;        /* last useful bit in word */
-       xfs_rtword_t    mask;           /* mask of relevant bits for value */
-       xfs_rtword_t    wdiff;          /* difference from wanted value */
-       int             word;           /* word number in the buffer */
+       struct xfs_mount        *mp = args->mp;
+       int                     bit;    /* bit number in the word */
+       xfs_fileoff_t           block;  /* bitmap block number */
+       int                     error;
+       xfs_rtxnum_t            i;      /* current bit number rel. to start */
+       xfs_rtxnum_t            lastbit; /* last useful bit in word */
+       xfs_rtword_t            mask;   /* mask of relevant bits for value */
+       xfs_rtword_t            wdiff;  /* difference from wanted value */
+       xfs_rtword_t            incore;
+       unsigned int            word;   /* word number in the buffer */
 
        /*
         * Compute starting bitmap block number
         */
-       block = XFS_BITTOBLOCK(mp, start);
+       block = xfs_rtx_to_rbmblock(mp, start);
        /*
         * Read the bitmap block.
         */
-       error = xfs_rtbuf_get(mp, tp, block, 0, &bp);
-       if (error) {
+       error = xfs_rtbitmap_read_buf(args, block);
+       if (error)
                return error;
-       }
-       bufp = bp->b_addr;
+
        /*
         * Compute the starting word's address, and starting bit.
         */
-       word = XFS_BITTOWORD(mp, start);
-       b = &bufp[word];
+       word = xfs_rtx_to_rbmword(mp, start);
        bit = (int)(start & (XFS_NBWORD - 1));
        /*
         * 0 (allocated) => all zero's; 1 (free) => all one's.
@@ -820,11 +807,11 @@ xfs_rtcheck_range(
                /*
                 * Compute difference between actual and desired value.
                 */
-               if ((wdiff = (*b ^ val) & mask)) {
+               incore = xfs_rtbitmap_getword(args, word);
+               if ((wdiff = (incore ^ val) & mask)) {
                        /*
                         * Different, compute first wrong bit and return.
                         */
-                       xfs_trans_brelse(tp, bp);
                        i = XFS_RTLOBIT(wdiff) - bit;
                        *new = start + i;
                        *stat = 0;
@@ -835,22 +822,15 @@ xfs_rtcheck_range(
                 * Go on to next block if that's where the next word is
                 * and we need the next word.
                 */
-               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+               if (++word == mp->m_blockwsize && i < len) {
                        /*
                         * If done with this block, get the next one.
                         */
-                       xfs_trans_brelse(tp, bp);
-                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
-                       if (error) {
+                       error = xfs_rtbitmap_read_buf(args, ++block);
+                       if (error)
                                return error;
-                       }
-                       b = bufp = bp->b_addr;
+
                        word = 0;
-               } else {
-                       /*
-                        * Go on to the next word in the buffer.
-                        */
-                       b++;
                }
        } else {
                /*
@@ -866,11 +846,11 @@ xfs_rtcheck_range(
                /*
                 * Compute difference between actual and desired value.
                 */
-               if ((wdiff = *b ^ val)) {
+               incore = xfs_rtbitmap_getword(args, word);
+               if ((wdiff = incore ^ val)) {
                        /*
                         * Different, compute first wrong bit and return.
                         */
-                       xfs_trans_brelse(tp, bp);
                        i += XFS_RTLOBIT(wdiff);
                        *new = start + i;
                        *stat = 0;
@@ -881,22 +861,15 @@ xfs_rtcheck_range(
                 * Go on to next block if that's where the next word is
                 * and we need the next word.
                 */
-               if (++word == XFS_BLOCKWSIZE(mp) && i < len) {
+               if (++word == mp->m_blockwsize && i < len) {
                        /*
                         * If done with this block, get the next one.
                         */
-                       xfs_trans_brelse(tp, bp);
-                       error = xfs_rtbuf_get(mp, tp, ++block, 0, &bp);
-                       if (error) {
+                       error = xfs_rtbitmap_read_buf(args, ++block);
+                       if (error)
                                return error;
-                       }
-                       b = bufp = bp->b_addr;
+
                        word = 0;
-               } else {
-                       /*
-                        * Go on to the next word in the buffer.
-                        */
-                       b++;
                }
        }
        /*
@@ -911,11 +884,11 @@ xfs_rtcheck_range(
                /*
                 * Compute difference between actual and desired value.
                 */
-               if ((wdiff = (*b ^ val) & mask)) {
+               incore = xfs_rtbitmap_getword(args, word);
+               if ((wdiff = (incore ^ val) & mask)) {
                        /*
                         * Different, compute first wrong bit and return.
                         */
-                       xfs_trans_brelse(tp, bp);
                        i += XFS_RTLOBIT(wdiff);
                        *new = start + i;
                        *stat = 0;
@@ -926,7 +899,6 @@ xfs_rtcheck_range(
        /*
         * Successful, return.
         */
-       xfs_trans_brelse(tp, bp);
        *new = start + i;
        *stat = 1;
        return 0;
@@ -936,58 +908,57 @@ xfs_rtcheck_range(
 /*
  * Check that the given extent (block range) is allocated already.
  */
-STATIC int                             /* error */
+STATIC int
 xfs_rtcheck_alloc_range(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   bno,            /* starting block number of extent */
-       xfs_extlen_t    len)            /* length of extent */
+       struct xfs_rtalloc_args *args,
+       xfs_rtxnum_t            start,  /* starting rtext number of extent */
+       xfs_rtxlen_t            len)    /* length of extent */
 {
-       xfs_rtblock_t   new;            /* dummy for xfs_rtcheck_range */
-       int             stat;
-       int             error;
+       xfs_rtxnum_t            new;    /* dummy for xfs_rtcheck_range */
+       int                     stat;
+       int                     error;
 
-       error = xfs_rtcheck_range(mp, tp, bno, len, 0, &new, &stat);
+       error = xfs_rtcheck_range(args, start, len, 0, &new, &stat);
        if (error)
                return error;
        ASSERT(stat);
        return 0;
 }
 #else
-#define xfs_rtcheck_alloc_range(m,t,b,l)       (0)
+#define xfs_rtcheck_alloc_range(a,b,l) (0)
 #endif
 /*
  * Free an extent in the realtime subvolume.  Length is expressed in
  * realtime extents, as is the block number.
  */
-int                                    /* error */
+int
 xfs_rtfree_extent(
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   bno,            /* starting block number to free */
-       xfs_extlen_t    len)            /* length of extent freed */
+       struct xfs_trans        *tp,    /* transaction pointer */
+       xfs_rtxnum_t            start,  /* starting rtext number to free */
+       xfs_rtxlen_t            len)    /* length of extent freed */
 {
-       int             error;          /* error value */
-       xfs_mount_t     *mp;            /* file system mount structure */
-       xfs_fsblock_t   sb;             /* summary file block number */
-       struct xfs_buf  *sumbp = NULL;  /* summary file block buffer */
-       struct timespec64 atime;
-
-       mp = tp->t_mountp;
+       struct xfs_mount        *mp = tp->t_mountp;
+       struct xfs_rtalloc_args args = {
+               .mp             = mp,
+               .tp             = tp,
+       };
+       int                     error;
+       struct timespec64       atime;
 
        ASSERT(mp->m_rbmip->i_itemp != NULL);
        ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
 
-       error = xfs_rtcheck_alloc_range(mp, tp, bno, len);
+       error = xfs_rtcheck_alloc_range(&args, start, len);
        if (error)
                return error;
 
        /*
         * Free the range of realtime blocks.
         */
-       error = xfs_rtfree_range(mp, tp, bno, len, &sumbp, &sb);
-       if (error) {
-               return error;
-       }
+       error = xfs_rtfree_range(&args, start, len);
+       if (error)
+               goto out;
+
        /*
         * Mark more blocks free in the superblock.
         */
@@ -1002,11 +973,47 @@ xfs_rtfree_extent(
                        mp->m_rbmip->i_diflags |= XFS_DIFLAG_NEWRTBM;
 
                atime = inode_get_atime(VFS_I(mp->m_rbmip));
-               *((uint64_t *)&atime) = 0;
+               atime.tv_sec = 0;
                inode_set_atime_to_ts(VFS_I(mp->m_rbmip), atime);
                xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
        }
-       return 0;
+       error = 0;
+out:
+       xfs_rtbuf_cache_relse(&args);
+       return error;
+}
+
+/*
+ * Free some blocks in the realtime subvolume.  rtbno and rtlen are in units of
+ * rt blocks, not rt extents; must be aligned to the rt extent size; and rtlen
+ * cannot exceed XFS_MAX_BMBT_EXTLEN.
+ */
+int
+xfs_rtfree_blocks(
+       struct xfs_trans        *tp,
+       xfs_fsblock_t           rtbno,
+       xfs_filblks_t           rtlen)
+{
+       struct xfs_mount        *mp = tp->t_mountp;
+       xfs_rtxnum_t            start;
+       xfs_filblks_t           len;
+       xfs_extlen_t            mod;
+
+       ASSERT(rtlen <= XFS_MAX_BMBT_EXTLEN);
+
+       len = xfs_rtb_to_rtxrem(mp, rtlen, &mod);
+       if (mod) {
+               ASSERT(mod == 0);
+               return -EIO;
+       }
+
+       start = xfs_rtb_to_rtxrem(mp, rtbno, &mod);
+       if (mod) {
+               ASSERT(mod == 0);
+               return -EIO;
+       }
+
+       return xfs_rtfree_extent(tp, start, len);
 }
 
 /* Find all the free records within a given range. */
@@ -1019,10 +1026,14 @@ xfs_rtalloc_query_range(
        xfs_rtalloc_query_range_fn      fn,
        void                            *priv)
 {
+       struct xfs_rtalloc_args         args = {
+               .mp                     = mp,
+               .tp                     = tp,
+       };
        struct xfs_rtalloc_rec          rec;
-       xfs_rtblock_t                   rtstart;
-       xfs_rtblock_t                   rtend;
-       xfs_rtblock_t                   high_key;
+       xfs_rtxnum_t                    rtstart;
+       xfs_rtxnum_t                    rtend;
+       xfs_rtxnum_t                    high_key;
        int                             is_free;
        int                             error = 0;
 
@@ -1038,13 +1049,13 @@ xfs_rtalloc_query_range(
        rtstart = low_rec->ar_startext;
        while (rtstart <= high_key) {
                /* Is the first block free? */
-               error = xfs_rtcheck_range(mp, tp, rtstart, 1, 1, &rtend,
+               error = xfs_rtcheck_range(&args, rtstart, 1, 1, &rtend,
                                &is_free);
                if (error)
                        break;
 
                /* How long does the extent go for? */
-               error = xfs_rtfind_forw(mp, tp, rtstart, high_key, &rtend);
+               error = xfs_rtfind_forw(&args, rtstart, high_key, &rtend);
                if (error)
                        break;
 
@@ -1060,6 +1071,7 @@ xfs_rtalloc_query_range(
                rtstart = rtend + 1;
        }
 
+       xfs_rtbuf_cache_relse(&args);
        return error;
 }
 
@@ -1085,18 +1097,79 @@ int
 xfs_rtalloc_extent_is_free(
        struct xfs_mount                *mp,
        struct xfs_trans                *tp,
-       xfs_rtblock_t                   start,
-       xfs_extlen_t                    len,
+       xfs_rtxnum_t                    start,
+       xfs_rtxlen_t                    len,
        bool                            *is_free)
 {
-       xfs_rtblock_t                   end;
+       struct xfs_rtalloc_args         args = {
+               .mp                     = mp,
+               .tp                     = tp,
+       };
+       xfs_rtxnum_t                    end;
        int                             matches;
        int                             error;
 
-       error = xfs_rtcheck_range(mp, tp, start, len, 1, &end, &matches);
+       error = xfs_rtcheck_range(&args, start, len, 1, &end, &matches);
+       xfs_rtbuf_cache_relse(&args);
        if (error)
                return error;
 
        *is_free = matches;
        return 0;
 }
+
+/*
+ * Compute the number of rtbitmap blocks needed to track the given number of rt
+ * extents.
+ */
+xfs_filblks_t
+xfs_rtbitmap_blockcount(
+       struct xfs_mount        *mp,
+       xfs_rtbxlen_t           rtextents)
+{
+       return howmany_64(rtextents, NBBY * mp->m_sb.sb_blocksize);
+}
+
+/*
+ * Compute the number of rtbitmap words needed to populate every block of a
+ * bitmap that is large enough to track the given number of rt extents.
+ */
+unsigned long long
+xfs_rtbitmap_wordcount(
+       struct xfs_mount        *mp,
+       xfs_rtbxlen_t           rtextents)
+{
+       xfs_filblks_t           blocks;
+
+       blocks = xfs_rtbitmap_blockcount(mp, rtextents);
+       return XFS_FSB_TO_B(mp, blocks) >> XFS_WORDLOG;
+}
+
+/* Compute the number of rtsummary blocks needed to track the given rt space. */
+xfs_filblks_t
+xfs_rtsummary_blockcount(
+       struct xfs_mount        *mp,
+       unsigned int            rsumlevels,
+       xfs_extlen_t            rbmblocks)
+{
+       unsigned long long      rsumwords;
+
+       rsumwords = (unsigned long long)rsumlevels * rbmblocks;
+       return XFS_B_TO_FSB(mp, rsumwords << XFS_WORDLOG);
+}
+
+/*
+ * Compute the number of rtsummary info words needed to populate every block of
+ * a summary file that is large enough to track the given rt space.
+ */
+unsigned long long
+xfs_rtsummary_wordcount(
+       struct xfs_mount        *mp,
+       unsigned int            rsumlevels,
+       xfs_extlen_t            rbmblocks)
+{
+       xfs_filblks_t           blocks;
+
+       blocks = xfs_rtsummary_blockcount(mp, rsumlevels, rbmblocks);
+       return XFS_FSB_TO_B(mp, blocks) >> XFS_WORDLOG;
+}
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.h b/fs/xfs/libxfs/xfs_rtbitmap.h
new file mode 100644 (file)
index 0000000..c063705
--- /dev/null
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
+ * All Rights Reserved.
+ */
+#ifndef __XFS_RTBITMAP_H__
+#define        __XFS_RTBITMAP_H__
+
+struct xfs_rtalloc_args {
+       struct xfs_mount        *mp;
+       struct xfs_trans        *tp;
+
+       struct xfs_buf          *rbmbp; /* bitmap block buffer */
+       struct xfs_buf          *sumbp; /* summary block buffer */
+
+       xfs_fileoff_t           rbmoff; /* bitmap block number */
+       xfs_fileoff_t           sumoff; /* summary block number */
+};
+
+static inline xfs_rtblock_t
+xfs_rtx_to_rtb(
+       struct xfs_mount        *mp,
+       xfs_rtxnum_t            rtx)
+{
+       if (mp->m_rtxblklog >= 0)
+               return rtx << mp->m_rtxblklog;
+
+       return rtx * mp->m_sb.sb_rextsize;
+}
+
+static inline xfs_extlen_t
+xfs_rtxlen_to_extlen(
+       struct xfs_mount        *mp,
+       xfs_rtxlen_t            rtxlen)
+{
+       if (mp->m_rtxblklog >= 0)
+               return rtxlen << mp->m_rtxblklog;
+
+       return rtxlen * mp->m_sb.sb_rextsize;
+}
+
+/* Compute the misalignment between an extent length and a realtime extent .*/
+static inline unsigned int
+xfs_extlen_to_rtxmod(
+       struct xfs_mount        *mp,
+       xfs_extlen_t            len)
+{
+       if (mp->m_rtxblklog >= 0)
+               return len & mp->m_rtxblkmask;
+
+       return len % mp->m_sb.sb_rextsize;
+}
+
+static inline xfs_rtxlen_t
+xfs_extlen_to_rtxlen(
+       struct xfs_mount        *mp,
+       xfs_extlen_t            len)
+{
+       if (mp->m_rtxblklog >= 0)
+               return len >> mp->m_rtxblklog;
+
+       return len / mp->m_sb.sb_rextsize;
+}
+
+/* Convert an rt block number into an rt extent number. */
+static inline xfs_rtxnum_t
+xfs_rtb_to_rtx(
+       struct xfs_mount        *mp,
+       xfs_rtblock_t           rtbno)
+{
+       if (likely(mp->m_rtxblklog >= 0))
+               return rtbno >> mp->m_rtxblklog;
+
+       return div_u64(rtbno, mp->m_sb.sb_rextsize);
+}
+
+/* Return the offset of an rt block number within an rt extent. */
+static inline xfs_extlen_t
+xfs_rtb_to_rtxoff(
+       struct xfs_mount        *mp,
+       xfs_rtblock_t           rtbno)
+{
+       if (likely(mp->m_rtxblklog >= 0))
+               return rtbno & mp->m_rtxblkmask;
+
+       return do_div(rtbno, mp->m_sb.sb_rextsize);
+}
+
+/*
+ * Crack an rt block number into an rt extent number and an offset within that
+ * rt extent.  Returns the rt extent number directly and the offset in @off.
+ */
+static inline xfs_rtxnum_t
+xfs_rtb_to_rtxrem(
+       struct xfs_mount        *mp,
+       xfs_rtblock_t           rtbno,
+       xfs_extlen_t            *off)
+{
+       if (likely(mp->m_rtxblklog >= 0)) {
+               *off = rtbno & mp->m_rtxblkmask;
+               return rtbno >> mp->m_rtxblklog;
+       }
+
+       return div_u64_rem(rtbno, mp->m_sb.sb_rextsize, off);
+}
+
+/*
+ * Convert an rt block number into an rt extent number, rounding up to the next
+ * rt extent if the rt block is not aligned to an rt extent boundary.
+ */
+static inline xfs_rtxnum_t
+xfs_rtb_to_rtxup(
+       struct xfs_mount        *mp,
+       xfs_rtblock_t           rtbno)
+{
+       if (likely(mp->m_rtxblklog >= 0)) {
+               if (rtbno & mp->m_rtxblkmask)
+                       return (rtbno >> mp->m_rtxblklog) + 1;
+               return rtbno >> mp->m_rtxblklog;
+       }
+
+       if (do_div(rtbno, mp->m_sb.sb_rextsize))
+               rtbno++;
+       return rtbno;
+}
+
+/* Round this rtblock up to the nearest rt extent size. */
+static inline xfs_rtblock_t
+xfs_rtb_roundup_rtx(
+       struct xfs_mount        *mp,
+       xfs_rtblock_t           rtbno)
+{
+       return roundup_64(rtbno, mp->m_sb.sb_rextsize);
+}
+
+/* Round this rtblock down to the nearest rt extent size. */
+static inline xfs_rtblock_t
+xfs_rtb_rounddown_rtx(
+       struct xfs_mount        *mp,
+       xfs_rtblock_t           rtbno)
+{
+       return rounddown_64(rtbno, mp->m_sb.sb_rextsize);
+}
+
+/* Convert an rt extent number to a file block offset in the rt bitmap file. */
+static inline xfs_fileoff_t
+xfs_rtx_to_rbmblock(
+       struct xfs_mount        *mp,
+       xfs_rtxnum_t            rtx)
+{
+       return rtx >> mp->m_blkbit_log;
+}
+
+/* Convert an rt extent number to a word offset within an rt bitmap block. */
+static inline unsigned int
+xfs_rtx_to_rbmword(
+       struct xfs_mount        *mp,
+       xfs_rtxnum_t            rtx)
+{
+       return (rtx >> XFS_NBWORDLOG) & (mp->m_blockwsize - 1);
+}
+
+/* Convert a file block offset in the rt bitmap file to an rt extent number. */
+static inline xfs_rtxnum_t
+xfs_rbmblock_to_rtx(
+       struct xfs_mount        *mp,
+       xfs_fileoff_t           rbmoff)
+{
+       return rbmoff << mp->m_blkbit_log;
+}
+
+/* Return a pointer to a bitmap word within a rt bitmap block. */
+static inline union xfs_rtword_raw *
+xfs_rbmblock_wordptr(
+       struct xfs_rtalloc_args *args,
+       unsigned int            index)
+{
+       union xfs_rtword_raw    *words = args->rbmbp->b_addr;
+
+       return words + index;
+}
+
+/* Convert an ondisk bitmap word to its incore representation. */
+static inline xfs_rtword_t
+xfs_rtbitmap_getword(
+       struct xfs_rtalloc_args *args,
+       unsigned int            index)
+{
+       union xfs_rtword_raw    *word = xfs_rbmblock_wordptr(args, index);
+
+       return word->old;
+}
+
+/* Set an ondisk bitmap word from an incore representation. */
+static inline void
+xfs_rtbitmap_setword(
+       struct xfs_rtalloc_args *args,
+       unsigned int            index,
+       xfs_rtword_t            value)
+{
+       union xfs_rtword_raw    *word = xfs_rbmblock_wordptr(args, index);
+
+       word->old = value;
+}
+
+/*
+ * Convert a rt extent length and rt bitmap block number to a xfs_suminfo_t
+ * offset within the rt summary file.
+ */
+static inline xfs_rtsumoff_t
+xfs_rtsumoffs(
+       struct xfs_mount        *mp,
+       int                     log2_len,
+       xfs_fileoff_t           rbmoff)
+{
+       return log2_len * mp->m_sb.sb_rbmblocks + rbmoff;
+}
+
+/*
+ * Convert an xfs_suminfo_t offset to a file block offset within the rt summary
+ * file.
+ */
+static inline xfs_fileoff_t
+xfs_rtsumoffs_to_block(
+       struct xfs_mount        *mp,
+       xfs_rtsumoff_t          rsumoff)
+{
+       return XFS_B_TO_FSBT(mp, rsumoff * sizeof(xfs_suminfo_t));
+}
+
+/*
+ * Convert an xfs_suminfo_t offset to an info word offset within an rt summary
+ * block.
+ */
+static inline unsigned int
+xfs_rtsumoffs_to_infoword(
+       struct xfs_mount        *mp,
+       xfs_rtsumoff_t          rsumoff)
+{
+       unsigned int            mask = mp->m_blockmask >> XFS_SUMINFOLOG;
+
+       return rsumoff & mask;
+}
+
+/* Return a pointer to a summary info word within a rt summary block. */
+static inline union xfs_suminfo_raw *
+xfs_rsumblock_infoptr(
+       struct xfs_rtalloc_args *args,
+       unsigned int            index)
+{
+       union xfs_suminfo_raw   *info = args->sumbp->b_addr;
+
+       return info + index;
+}
+
+/* Get the current value of a summary counter. */
+static inline xfs_suminfo_t
+xfs_suminfo_get(
+       struct xfs_rtalloc_args *args,
+       unsigned int            index)
+{
+       union xfs_suminfo_raw   *info = xfs_rsumblock_infoptr(args, index);
+
+       return info->old;
+}
+
+/* Add to the current value of a summary counter and return the new value. */
+static inline xfs_suminfo_t
+xfs_suminfo_add(
+       struct xfs_rtalloc_args *args,
+       unsigned int            index,
+       int                     delta)
+{
+       union xfs_suminfo_raw   *info = xfs_rsumblock_infoptr(args, index);
+
+       info->old += delta;
+       return info->old;
+}
+
+/*
+ * Functions for walking free space rtextents in the realtime bitmap.
+ */
+struct xfs_rtalloc_rec {
+       xfs_rtxnum_t            ar_startext;
+       xfs_rtbxlen_t           ar_extcount;
+};
+
+typedef int (*xfs_rtalloc_query_range_fn)(
+       struct xfs_mount                *mp,
+       struct xfs_trans                *tp,
+       const struct xfs_rtalloc_rec    *rec,
+       void                            *priv);
+
+#ifdef CONFIG_XFS_RT
+void xfs_rtbuf_cache_relse(struct xfs_rtalloc_args *args);
+
+int xfs_rtbuf_get(struct xfs_rtalloc_args *args, xfs_fileoff_t block,
+               int issum);
+
+static inline int
+xfs_rtbitmap_read_buf(
+       struct xfs_rtalloc_args         *args,
+       xfs_fileoff_t                   block)
+{
+       return xfs_rtbuf_get(args, block, 0);
+}
+
+static inline int
+xfs_rtsummary_read_buf(
+       struct xfs_rtalloc_args         *args,
+       xfs_fileoff_t                   block)
+{
+       return xfs_rtbuf_get(args, block, 1);
+}
+
+int xfs_rtcheck_range(struct xfs_rtalloc_args *args, xfs_rtxnum_t start,
+               xfs_rtxlen_t len, int val, xfs_rtxnum_t *new, int *stat);
+int xfs_rtfind_back(struct xfs_rtalloc_args *args, xfs_rtxnum_t start,
+               xfs_rtxnum_t limit, xfs_rtxnum_t *rtblock);
+int xfs_rtfind_forw(struct xfs_rtalloc_args *args, xfs_rtxnum_t start,
+               xfs_rtxnum_t limit, xfs_rtxnum_t *rtblock);
+int xfs_rtmodify_range(struct xfs_rtalloc_args *args, xfs_rtxnum_t start,
+               xfs_rtxlen_t len, int val);
+int xfs_rtmodify_summary_int(struct xfs_rtalloc_args *args, int log,
+               xfs_fileoff_t bbno, int delta, xfs_suminfo_t *sum);
+int xfs_rtmodify_summary(struct xfs_rtalloc_args *args, int log,
+               xfs_fileoff_t bbno, int delta);
+int xfs_rtfree_range(struct xfs_rtalloc_args *args, xfs_rtxnum_t start,
+               xfs_rtxlen_t len);
+int xfs_rtalloc_query_range(struct xfs_mount *mp, struct xfs_trans *tp,
+               const struct xfs_rtalloc_rec *low_rec,
+               const struct xfs_rtalloc_rec *high_rec,
+               xfs_rtalloc_query_range_fn fn, void *priv);
+int xfs_rtalloc_query_all(struct xfs_mount *mp, struct xfs_trans *tp,
+                         xfs_rtalloc_query_range_fn fn,
+                         void *priv);
+int xfs_rtalloc_extent_is_free(struct xfs_mount *mp, struct xfs_trans *tp,
+                              xfs_rtxnum_t start, xfs_rtxlen_t len,
+                              bool *is_free);
+/*
+ * Free an extent in the realtime subvolume.  Length is expressed in
+ * realtime extents, as is the block number.
+ */
+int                                    /* error */
+xfs_rtfree_extent(
+       struct xfs_trans        *tp,    /* transaction pointer */
+       xfs_rtxnum_t            start,  /* starting rtext number to free */
+       xfs_rtxlen_t            len);   /* length of extent freed */
+
+/* Same as above, but in units of rt blocks. */
+int xfs_rtfree_blocks(struct xfs_trans *tp, xfs_fsblock_t rtbno,
+               xfs_filblks_t rtlen);
+
+xfs_filblks_t xfs_rtbitmap_blockcount(struct xfs_mount *mp, xfs_rtbxlen_t
+               rtextents);
+unsigned long long xfs_rtbitmap_wordcount(struct xfs_mount *mp,
+               xfs_rtbxlen_t rtextents);
+
+xfs_filblks_t xfs_rtsummary_blockcount(struct xfs_mount *mp,
+               unsigned int rsumlevels, xfs_extlen_t rbmblocks);
+unsigned long long xfs_rtsummary_wordcount(struct xfs_mount *mp,
+               unsigned int rsumlevels, xfs_extlen_t rbmblocks);
+#else /* CONFIG_XFS_RT */
+# define xfs_rtfree_extent(t,b,l)                      (-ENOSYS)
+# define xfs_rtfree_blocks(t,rb,rl)                    (-ENOSYS)
+# define xfs_rtalloc_query_range(m,t,l,h,f,p)          (-ENOSYS)
+# define xfs_rtalloc_query_all(m,t,f,p)                        (-ENOSYS)
+# define xfs_rtbitmap_read_buf(a,b)                    (-ENOSYS)
+# define xfs_rtsummary_read_buf(a,b)                   (-ENOSYS)
+# define xfs_rtbuf_cache_relse(a)                      (0)
+# define xfs_rtalloc_extent_is_free(m,t,s,l,i)         (-ENOSYS)
+static inline xfs_filblks_t
+xfs_rtbitmap_blockcount(struct xfs_mount *mp, xfs_rtbxlen_t rtextents)
+{
+       /* shut up gcc */
+       return 0;
+}
+# define xfs_rtbitmap_wordcount(mp, r)                 (0)
+# define xfs_rtsummary_blockcount(mp, l, b)            (0)
+# define xfs_rtsummary_wordcount(mp, l, b)             (0)
+#endif /* CONFIG_XFS_RT */
+
+#endif /* __XFS_RTBITMAP_H__ */
index 6264daaab37b06151bd12abc2ef9f41f61fcc675..1f74d0cd161841b410311e2490e7759e194fdfab 100644 (file)
@@ -975,6 +975,8 @@ xfs_sb_mount_common(
        mp->m_blockmask = sbp->sb_blocksize - 1;
        mp->m_blockwsize = sbp->sb_blocksize >> XFS_WORDLOG;
        mp->m_blockwmask = mp->m_blockwsize - 1;
+       mp->m_rtxblklog = log2_if_power2(sbp->sb_rextsize);
+       mp->m_rtxblkmask = mask64_if_power2(sbp->sb_rextsize);
 
        mp->m_alloc_mxr[0] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 1);
        mp->m_alloc_mxr[1] = xfs_allocbt_maxrecs(mp, sbp->sb_blocksize, 0);
index a5e14740ec9ac3935d5fe9e254d41292174df0a5..19134b23c10be3824de6a7949d6ccf9ebdfa8de0 100644 (file)
@@ -25,7 +25,7 @@ extern uint64_t       xfs_sb_version_to_features(struct xfs_sb *sbp);
 
 extern int     xfs_update_secondary_sbs(struct xfs_mount *mp);
 
-#define XFS_FS_GEOM_MAX_STRUCT_VER     (4)
+#define XFS_FS_GEOM_MAX_STRUCT_VER     (5)
 extern void    xfs_fs_geometry(struct xfs_mount *mp, struct xfs_fsop_geom *geo,
                                int struct_version);
 extern int     xfs_sb_read_secondary(struct xfs_mount *mp,
index 5b2f27cbdb8089d9386cf144288d960777f9d7de..6cd45e8c118daf63b60542b2c1934faff4de3813 100644 (file)
@@ -19,6 +19,7 @@
 #include "xfs_trans.h"
 #include "xfs_qm.h"
 #include "xfs_trans_space.h"
+#include "xfs_rtbitmap.h"
 
 #define _ALLOC true
 #define _FREE  false
@@ -217,11 +218,12 @@ xfs_rtalloc_block_count(
        struct xfs_mount        *mp,
        unsigned int            num_ops)
 {
-       unsigned int            blksz = XFS_FSB_TO_B(mp, 1);
-       unsigned int            rtbmp_bytes;
+       unsigned int            rtbmp_blocks;
+       xfs_rtxlen_t            rtxlen;
 
-       rtbmp_bytes = (XFS_MAX_BMBT_EXTLEN / mp->m_sb.sb_rextsize) / NBBY;
-       return (howmany(rtbmp_bytes, blksz) + 1) * num_ops;
+       rtxlen = xfs_extlen_to_rtxlen(mp, XFS_MAX_BMBT_EXTLEN);
+       rtbmp_blocks = xfs_rtbitmap_blockcount(mp, rtxlen);
+       return (rtbmp_blocks + 1) * num_ops;
 }
 
 /*
index 5c27659347329bc4b6fea4c88b9bc642cbe929cc..c299b16c9365fab61b98e13bebc38607d9768f87 100644 (file)
@@ -148,10 +148,10 @@ xfs_verify_rtbno(
 
 /* Verify that a realtime device extent is fully contained inside the volume. */
 bool
-xfs_verify_rtext(
+xfs_verify_rtbext(
        struct xfs_mount        *mp,
        xfs_rtblock_t           rtbno,
-       xfs_rtblock_t           len)
+       xfs_filblks_t           len)
 {
        if (rtbno + len <= rtbno)
                return false;
index 851220021484177e969ec4b08ae96582d708abcc..533200c4ccc25aa876ae30cd852066f143a2d1a1 100644 (file)
@@ -11,6 +11,7 @@ typedef uint32_t      prid_t;         /* project ID */
 typedef uint32_t       xfs_agblock_t;  /* blockno in alloc. group */
 typedef uint32_t       xfs_agino_t;    /* inode # within allocation grp */
 typedef uint32_t       xfs_extlen_t;   /* extent length in blocks */
+typedef uint32_t       xfs_rtxlen_t;   /* file extent length in rtextents */
 typedef uint32_t       xfs_agnumber_t; /* allocation group number */
 typedef uint64_t       xfs_extnum_t;   /* # of extents in a file */
 typedef uint32_t       xfs_aextnum_t;  /* # extents in an attribute fork */
@@ -18,6 +19,7 @@ typedef int64_t               xfs_fsize_t;    /* bytes in a file */
 typedef uint64_t       xfs_ufsize_t;   /* unsigned bytes in a file */
 
 typedef int32_t                xfs_suminfo_t;  /* type of bitmap summary info */
+typedef uint32_t       xfs_rtsumoff_t; /* offset of an rtsummary info word */
 typedef uint32_t       xfs_rtword_t;   /* word type for bitmap manipulations */
 
 typedef int64_t                xfs_lsn_t;      /* log sequence number */
@@ -31,6 +33,8 @@ typedef uint64_t      xfs_rfsblock_t; /* blockno in filesystem (raw) */
 typedef uint64_t       xfs_rtblock_t;  /* extent (block) in realtime area */
 typedef uint64_t       xfs_fileoff_t;  /* block number in a file */
 typedef uint64_t       xfs_filblks_t;  /* number of blocks in a file */
+typedef uint64_t       xfs_rtxnum_t;   /* rtextent number */
+typedef uint64_t       xfs_rtbxlen_t;  /* rtbitmap extent length in rtextents */
 
 typedef int64_t                xfs_srtblock_t; /* signed version of xfs_rtblock_t */
 
@@ -47,6 +51,7 @@ typedef void *                xfs_failaddr_t;
 #define        NULLRFSBLOCK    ((xfs_rfsblock_t)-1)
 #define        NULLRTBLOCK     ((xfs_rtblock_t)-1)
 #define        NULLFILEOFF     ((xfs_fileoff_t)-1)
+#define        NULLRTEXTNO     ((xfs_rtxnum_t)-1)
 
 #define        NULLAGBLOCK     ((xfs_agblock_t)-1)
 #define        NULLAGNUMBER    ((xfs_agnumber_t)-1)
@@ -145,6 +150,7 @@ typedef uint32_t    xfs_dqid_t;
  */
 #define        XFS_NBBYLOG     3               /* log2(NBBY) */
 #define        XFS_WORDLOG     2               /* log2(sizeof(xfs_rtword_t)) */
+#define        XFS_SUMINFOLOG  2               /* log2(sizeof(xfs_suminfo_t)) */
 #define        XFS_NBWORDLOG   (XFS_NBBYLOG + XFS_WORDLOG)
 #define        XFS_NBWORD      (1 << XFS_NBWORDLOG)
 #define        XFS_WORDMASK    ((1 << XFS_WORDLOG) - 1)
@@ -229,8 +235,8 @@ bool xfs_verify_ino(struct xfs_mount *mp, xfs_ino_t ino);
 bool xfs_internal_inum(struct xfs_mount *mp, xfs_ino_t ino);
 bool xfs_verify_dir_ino(struct xfs_mount *mp, xfs_ino_t ino);
 bool xfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno);
-bool xfs_verify_rtext(struct xfs_mount *mp, xfs_rtblock_t rtbno,
-               xfs_rtblock_t len);
+bool xfs_verify_rtbext(struct xfs_mount *mp, xfs_rtblock_t rtbno,
+               xfs_filblks_t len);
 bool xfs_verify_icount(struct xfs_mount *mp, unsigned long long icount);
 bool xfs_verify_dablk(struct xfs_mount *mp, xfs_fileoff_t off);
 void xfs_icount_range(struct xfs_mount *mp, unsigned long long *min,
index 75588915572e92c3b99c0d89b033ae590086fc18..06d8c1996a3389e4396b3c354a3f88ab76c94d81 100644 (file)
@@ -410,7 +410,7 @@ xchk_bmap_iextent(
 
        /* Make sure the extent points to a valid place. */
        if (info->is_rt &&
-           !xfs_verify_rtext(mp, irec->br_startblock, irec->br_blockcount))
+           !xfs_verify_rtbext(mp, irec->br_startblock, irec->br_blockcount))
                xchk_fblock_set_corrupt(info->sc, info->whichfork,
                                irec->br_startoff);
        if (!info->is_rt &&
index 05be757668bb25ca0bef1d7db4fb0ea5bd996738..5799e9a94f1f665dba31af5b5908248311aa1fb1 100644 (file)
@@ -16,7 +16,7 @@
 #include "xfs_health.h"
 #include "xfs_btree.h"
 #include "xfs_ag.h"
-#include "xfs_rtalloc.h"
+#include "xfs_rtbitmap.h"
 #include "xfs_inode.h"
 #include "xfs_icache.h"
 #include "scrub/scrub.h"
index 59d7912fb75f1eae7ad88fe4acac9ae95553f135..889f556bc98f60cde87832cd0d1612cf49d7c919 100644 (file)
@@ -20,6 +20,7 @@
 #include "xfs_reflink.h"
 #include "xfs_rmap.h"
 #include "xfs_bmap_util.h"
+#include "xfs_rtbitmap.h"
 #include "scrub/scrub.h"
 #include "scrub/common.h"
 #include "scrub/btree.h"
@@ -225,7 +226,7 @@ xchk_inode_extsize(
         */
        if ((flags & XFS_DIFLAG_RTINHERIT) &&
            (flags & XFS_DIFLAG_EXTSZINHERIT) &&
-           value % sc->mp->m_sb.sb_rextsize > 0)
+           xfs_extlen_to_rtxmod(sc->mp, value) > 0)
                xchk_ino_set_warning(sc, ino);
 }
 
index 008ddb599e13244b9fcbb57e99191e59035e08dd..41a1d89ae8e6cdce6e465ea68d7fc77a02c189ba 100644 (file)
@@ -11,7 +11,7 @@
 #include "xfs_mount.h"
 #include "xfs_log_format.h"
 #include "xfs_trans.h"
-#include "xfs_rtalloc.h"
+#include "xfs_rtbitmap.h"
 #include "xfs_inode.h"
 #include "xfs_bmap.h"
 #include "scrub/scrub.h"
@@ -48,12 +48,12 @@ xchk_rtbitmap_rec(
 {
        struct xfs_scrub        *sc = priv;
        xfs_rtblock_t           startblock;
-       xfs_rtblock_t           blockcount;
+       xfs_filblks_t           blockcount;
 
-       startblock = rec->ar_startext * mp->m_sb.sb_rextsize;
-       blockcount = rec->ar_extcount * mp->m_sb.sb_rextsize;
+       startblock = xfs_rtx_to_rtb(mp, rec->ar_startext);
+       blockcount = xfs_rtx_to_rtb(mp, rec->ar_extcount);
 
-       if (!xfs_verify_rtext(mp, startblock, blockcount))
+       if (!xfs_verify_rtbext(mp, startblock, blockcount))
                xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 0);
        return 0;
 }
@@ -128,26 +128,22 @@ out:
 void
 xchk_xref_is_used_rt_space(
        struct xfs_scrub        *sc,
-       xfs_rtblock_t           fsbno,
+       xfs_rtblock_t           rtbno,
        xfs_extlen_t            len)
 {
-       xfs_rtblock_t           startext;
-       xfs_rtblock_t           endext;
-       xfs_rtblock_t           extcount;
+       xfs_rtxnum_t            startext;
+       xfs_rtxnum_t            endext;
        bool                    is_free;
        int                     error;
 
        if (xchk_skip_xref(sc->sm))
                return;
 
-       startext = fsbno;
-       endext = fsbno + len - 1;
-       do_div(startext, sc->mp->m_sb.sb_rextsize);
-       do_div(endext, sc->mp->m_sb.sb_rextsize);
-       extcount = endext - startext + 1;
+       startext = xfs_rtb_to_rtx(sc->mp, rtbno);
+       endext = xfs_rtb_to_rtx(sc->mp, rtbno + len - 1);
        xfs_ilock(sc->mp->m_rbmip, XFS_ILOCK_SHARED | XFS_ILOCK_RTBITMAP);
-       error = xfs_rtalloc_extent_is_free(sc->mp, sc->tp, startext, extcount,
-                       &is_free);
+       error = xfs_rtalloc_extent_is_free(sc->mp, sc->tp, startext,
+                       endext - startext + 1, &is_free);
        if (!xchk_should_check_xref(sc, &error, NULL))
                goto out_unlock;
        if (is_free)
index 437ed9acbb27383e7a389ff2526f9ba4e1cdd04d..8b15c47408d0310db7889864b9b0b10a3a7a60f7 100644 (file)
@@ -13,7 +13,7 @@
 #include "xfs_inode.h"
 #include "xfs_log_format.h"
 #include "xfs_trans.h"
-#include "xfs_rtalloc.h"
+#include "xfs_rtbitmap.h"
 #include "xfs_bit.h"
 #include "xfs_bmap.h"
 #include "scrub/scrub.h"
@@ -81,34 +81,45 @@ typedef unsigned int xchk_rtsumoff_t;
 static inline int
 xfsum_load(
        struct xfs_scrub        *sc,
-       xchk_rtsumoff_t         sumoff,
-       xfs_suminfo_t           *info)
+       xfs_rtsumoff_t          sumoff,
+       union xfs_suminfo_raw   *rawinfo)
 {
-       return xfile_obj_load(sc->xfile, info, sizeof(xfs_suminfo_t),
+       return xfile_obj_load(sc->xfile, rawinfo,
+                       sizeof(union xfs_suminfo_raw),
                        sumoff << XFS_WORDLOG);
 }
 
 static inline int
 xfsum_store(
        struct xfs_scrub        *sc,
-       xchk_rtsumoff_t         sumoff,
-       const xfs_suminfo_t     info)
+       xfs_rtsumoff_t          sumoff,
+       const union xfs_suminfo_raw rawinfo)
 {
-       return xfile_obj_store(sc->xfile, &info, sizeof(xfs_suminfo_t),
+       return xfile_obj_store(sc->xfile, &rawinfo,
+                       sizeof(union xfs_suminfo_raw),
                        sumoff << XFS_WORDLOG);
 }
 
 static inline int
 xfsum_copyout(
        struct xfs_scrub        *sc,
-       xchk_rtsumoff_t         sumoff,
-       xfs_suminfo_t           *info,
+       xfs_rtsumoff_t          sumoff,
+       union xfs_suminfo_raw   *rawinfo,
        unsigned int            nr_words)
 {
-       return xfile_obj_load(sc->xfile, info, nr_words << XFS_WORDLOG,
+       return xfile_obj_load(sc->xfile, rawinfo, nr_words << XFS_WORDLOG,
                        sumoff << XFS_WORDLOG);
 }
 
+static inline xfs_suminfo_t
+xchk_rtsum_inc(
+       struct xfs_mount        *mp,
+       union xfs_suminfo_raw   *v)
+{
+       v->old += 1;
+       return v->old;
+}
+
 /* Update the summary file to reflect the free extent that we've accumulated. */
 STATIC int
 xchk_rtsum_record_free(
@@ -121,23 +132,24 @@ xchk_rtsum_record_free(
        xfs_fileoff_t                   rbmoff;
        xfs_rtblock_t                   rtbno;
        xfs_filblks_t                   rtlen;
-       xchk_rtsumoff_t                 offs;
+       xfs_rtsumoff_t                  offs;
        unsigned int                    lenlog;
-       xfs_suminfo_t                   v = 0;
+       union xfs_suminfo_raw           v;
+       xfs_suminfo_t                   value;
        int                             error = 0;
 
        if (xchk_should_terminate(sc, &error))
                return error;
 
        /* Compute the relevant location in the rtsum file. */
-       rbmoff = XFS_BITTOBLOCK(mp, rec->ar_startext);
+       rbmoff = xfs_rtx_to_rbmblock(mp, rec->ar_startext);
        lenlog = XFS_RTBLOCKLOG(rec->ar_extcount);
-       offs = XFS_SUMOFFS(mp, lenlog, rbmoff);
+       offs = xfs_rtsumoffs(mp, lenlog, rbmoff);
 
-       rtbno = rec->ar_startext * mp->m_sb.sb_rextsize;
-       rtlen = rec->ar_extcount * mp->m_sb.sb_rextsize;
+       rtbno = xfs_rtx_to_rtb(mp, rec->ar_startext);
+       rtlen = xfs_rtx_to_rtb(mp, rec->ar_extcount);
 
-       if (!xfs_verify_rtext(mp, rtbno, rtlen)) {
+       if (!xfs_verify_rtbext(mp, rtbno, rtlen)) {
                xchk_ino_xref_set_corrupt(sc, mp->m_rbmip->i_ino);
                return -EFSCORRUPTED;
        }
@@ -147,9 +159,9 @@ xchk_rtsum_record_free(
        if (error)
                return error;
 
-       v++;
+       value = xchk_rtsum_inc(sc->mp, &v);
        trace_xchk_rtsum_record_free(mp, rec->ar_startext, rec->ar_extcount,
-                       lenlog, offs, v);
+                       lenlog, offs, value);
 
        return xfsum_store(sc, offs, v);
 }
@@ -160,12 +172,11 @@ xchk_rtsum_compute(
        struct xfs_scrub        *sc)
 {
        struct xfs_mount        *mp = sc->mp;
-       unsigned long long      rtbmp_bytes;
+       unsigned long long      rtbmp_blocks;
 
        /* If the bitmap size doesn't match the computed size, bail. */
-       rtbmp_bytes = howmany_64(mp->m_sb.sb_rextents, NBBY);
-       if (roundup_64(rtbmp_bytes, mp->m_sb.sb_blocksize) !=
-                       mp->m_rbmip->i_disk_size)
+       rtbmp_blocks = xfs_rtbitmap_blockcount(mp, mp->m_sb.sb_rextents);
+       if (XFS_FSB_TO_B(mp, rtbmp_blocks) != mp->m_rbmip->i_disk_size)
                return -EFSCORRUPTED;
 
        return xfs_rtalloc_query_all(sc->mp, sc->tp, xchk_rtsum_record_free,
@@ -177,14 +188,18 @@ STATIC int
 xchk_rtsum_compare(
        struct xfs_scrub        *sc)
 {
+       struct xfs_rtalloc_args args = {
+               .mp             = sc->mp,
+               .tp             = sc->tp,
+       };
        struct xfs_mount        *mp = sc->mp;
-       struct xfs_buf          *bp;
        struct xfs_bmbt_irec    map;
        xfs_fileoff_t           off;
        xchk_rtsumoff_t         sumoff = 0;
        int                     nmap;
 
        for (off = 0; off < XFS_B_TO_FSB(mp, mp->m_rsumsize); off++) {
+               union xfs_suminfo_raw *ondisk_info;
                int             error = 0;
 
                if (xchk_should_terminate(sc, &error))
@@ -205,22 +220,23 @@ xchk_rtsum_compare(
                }
 
                /* Read a block's worth of ondisk rtsummary file. */
-               error = xfs_rtbuf_get(mp, sc->tp, off, 1, &bp);
+               error = xfs_rtsummary_read_buf(&args, off);
                if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, off, &error))
                        return error;
 
                /* Read a block's worth of computed rtsummary file. */
                error = xfsum_copyout(sc, sumoff, sc->buf, mp->m_blockwsize);
                if (error) {
-                       xfs_trans_brelse(sc->tp, bp);
+                       xfs_rtbuf_cache_relse(&args);
                        return error;
                }
 
-               if (memcmp(bp->b_addr, sc->buf,
+               ondisk_info = xfs_rsumblock_infoptr(&args, 0);
+               if (memcmp(ondisk_info, sc->buf,
                                        mp->m_blockwsize << XFS_WORDLOG) != 0)
                        xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, off);
 
-               xfs_trans_brelse(sc->tp, bp);
+               xfs_rtbuf_cache_relse(&args);
                sumoff += mp->m_blockwsize;
        }
 
index 46249e7b17e09f66b6266758b159a537ab5f66d5..29afa48512355e08be55b2e13392ab1522ebf796 100644 (file)
@@ -13,6 +13,7 @@
 #include "xfs_inode.h"
 #include "xfs_btree.h"
 #include "xfs_ag.h"
+#include "xfs_rtbitmap.h"
 #include "scrub/scrub.h"
 #include "scrub/xfile.h"
 #include "scrub/xfarray.h"
index cbd4d01e253c068e6a47f9366a582d2e1d9e42bd..4a8bc6f3c8f2eaf6f270ce56b7a8dec5fd3a46c1 100644 (file)
@@ -1036,17 +1036,18 @@ TRACE_EVENT(xfarray_sort_stats,
 
 #ifdef CONFIG_XFS_RT
 TRACE_EVENT(xchk_rtsum_record_free,
-       TP_PROTO(struct xfs_mount *mp, xfs_rtblock_t start,
-                uint64_t len, unsigned int log, loff_t pos, xfs_suminfo_t v),
-       TP_ARGS(mp, start, len, log, pos, v),
+       TP_PROTO(struct xfs_mount *mp, xfs_rtxnum_t start,
+                xfs_rtbxlen_t len, unsigned int log, loff_t pos,
+                xfs_suminfo_t value),
+       TP_ARGS(mp, start, len, log, pos, value),
        TP_STRUCT__entry(
                __field(dev_t, dev)
                __field(dev_t, rtdev)
-               __field(xfs_rtblock_t, start)
+               __field(xfs_rtxnum_t, start)
                __field(unsigned long long, len)
                __field(unsigned int, log)
                __field(loff_t, pos)
-               __field(xfs_suminfo_t, v)
+               __field(xfs_suminfo_t, value)
        ),
        TP_fast_assign(
                __entry->dev = mp->m_super->s_dev;
@@ -1055,7 +1056,7 @@ TRACE_EVENT(xchk_rtsum_record_free,
                __entry->len = len;
                __entry->log = log;
                __entry->pos = pos;
-               __entry->v = v;
+               __entry->value = value;
        ),
        TP_printk("dev %d:%d rtdev %d:%d rtx 0x%llx rtxcount 0x%llx log %u rsumpos 0x%llx sumcount %u",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
@@ -1064,7 +1065,7 @@ TRACE_EVENT(xchk_rtsum_record_free,
                  __entry->len,
                  __entry->log,
                  __entry->pos,
-                 __entry->v)
+                 __entry->value)
 );
 #endif /* CONFIG_XFS_RT */
 
index 40e0a1f1f7530cba2193655b0be5ff661bfb107e..731260a5af6db43d06849d32a7384a90188f4031 100644 (file)
@@ -28,6 +28,7 @@
 #include "xfs_icache.h"
 #include "xfs_iomap.h"
 #include "xfs_reflink.h"
+#include "xfs_rtbitmap.h"
 
 /* Kernel only BMAP related definitions and functions */
 
@@ -75,28 +76,28 @@ xfs_bmap_rtalloc(
 {
        struct xfs_mount        *mp = ap->ip->i_mount;
        xfs_fileoff_t           orig_offset = ap->offset;
-       xfs_rtblock_t           rtb;
-       xfs_extlen_t            prod = 0;  /* product factor for allocators */
+       xfs_rtxnum_t            rtx;
+       xfs_rtxlen_t            prod = 0;  /* product factor for allocators */
        xfs_extlen_t            mod = 0;   /* product factor for allocators */
-       xfs_extlen_t            ralen = 0; /* realtime allocation length */
+       xfs_rtxlen_t            ralen = 0; /* realtime allocation length */
        xfs_extlen_t            align;     /* minimum allocation alignment */
        xfs_extlen_t            orig_length = ap->length;
        xfs_extlen_t            minlen = mp->m_sb.sb_rextsize;
-       xfs_extlen_t            raminlen;
+       xfs_rtxlen_t            raminlen;
        bool                    rtlocked = false;
        bool                    ignore_locality = false;
        int                     error;
 
        align = xfs_get_extsz_hint(ap->ip);
 retry:
-       prod = align / mp->m_sb.sb_rextsize;
+       prod = xfs_extlen_to_rtxlen(mp, align);
        error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
                                        align, 1, ap->eof, 0,
                                        ap->conv, &ap->offset, &ap->length);
        if (error)
                return error;
        ASSERT(ap->length);
-       ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
+       ASSERT(xfs_extlen_to_rtxmod(mp, ap->length) == 0);
 
        /*
         * If we shifted the file offset downward to satisfy an extent size
@@ -116,17 +117,14 @@ retry:
                prod = 1;
        /*
         * Set ralen to be the actual requested length in rtextents.
-        */
-       ralen = ap->length / mp->m_sb.sb_rextsize;
-       /*
+        *
         * If the old value was close enough to XFS_BMBT_MAX_EXTLEN that
         * we rounded up to it, cut it back so it's valid again.
         * Note that if it's a really large request (bigger than
         * XFS_BMBT_MAX_EXTLEN), we don't hear about that number, and can't
         * adjust the starting point to match it.
         */
-       if (ralen * mp->m_sb.sb_rextsize >= XFS_MAX_BMBT_EXTLEN)
-               ralen = XFS_MAX_BMBT_EXTLEN / mp->m_sb.sb_rextsize;
+       ralen = xfs_extlen_to_rtxlen(mp, min(ap->length, XFS_MAX_BMBT_EXTLEN));
 
        /*
         * Lock out modifications to both the RT bitmap and summary inodes
@@ -144,12 +142,10 @@ retry:
         * pick an extent that will space things out in the rt area.
         */
        if (ap->eof && ap->offset == 0) {
-               xfs_rtblock_t rtx; /* realtime extent no */
-
                error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
                if (error)
                        return error;
-               ap->blkno = rtx * mp->m_sb.sb_rextsize;
+               ap->blkno = xfs_rtx_to_rtb(mp, rtx);
        } else {
                ap->blkno = 0;
        }
@@ -160,20 +156,18 @@ retry:
         * Realtime allocation, done through xfs_rtallocate_extent.
         */
        if (ignore_locality)
-               ap->blkno = 0;
+               rtx = 0;
        else
-               do_div(ap->blkno, mp->m_sb.sb_rextsize);
-       rtb = ap->blkno;
-       ap->length = ralen;
-       raminlen = max_t(xfs_extlen_t, 1, minlen / mp->m_sb.sb_rextsize);
-       error = xfs_rtallocate_extent(ap->tp, ap->blkno, raminlen, ap->length,
-                       &ralen, ap->wasdel, prod, &rtb);
+               rtx = xfs_rtb_to_rtx(mp, ap->blkno);
+       raminlen = max_t(xfs_rtxlen_t, 1, xfs_extlen_to_rtxlen(mp, minlen));
+       error = xfs_rtallocate_extent(ap->tp, rtx, raminlen, ralen, &ralen,
+                       ap->wasdel, prod, &rtx);
        if (error)
                return error;
 
-       if (rtb != NULLRTBLOCK) {
-               ap->blkno = rtb * mp->m_sb.sb_rextsize;
-               ap->length = ralen * mp->m_sb.sb_rextsize;
+       if (rtx != NULLRTEXTNO) {
+               ap->blkno = xfs_rtx_to_rtb(mp, rtx);
+               ap->length = xfs_rtxlen_to_extlen(mp, ralen);
                ap->ip->i_nblocks += ap->length;
                xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
                if (ap->wasdel)
@@ -690,7 +684,7 @@ xfs_can_free_eofblocks(
         */
        end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
        if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1)
-               end_fsb = roundup_64(end_fsb, mp->m_sb.sb_rextsize);
+               end_fsb = xfs_rtb_roundup_rtx(mp, end_fsb);
        last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
        if (last_fsb <= end_fsb)
                return false;
@@ -780,12 +774,10 @@ xfs_alloc_file_space(
 {
        xfs_mount_t             *mp = ip->i_mount;
        xfs_off_t               count;
-       xfs_filblks_t           allocated_fsb;
        xfs_filblks_t           allocatesize_fsb;
        xfs_extlen_t            extsz, temp;
        xfs_fileoff_t           startoffset_fsb;
        xfs_fileoff_t           endoffset_fsb;
-       int                     nimaps;
        int                     rt;
        xfs_trans_t             *tp;
        xfs_bmbt_irec_t         imaps[1], *imapp;
@@ -808,7 +800,6 @@ xfs_alloc_file_space(
 
        count = len;
        imapp = &imaps[0];
-       nimaps = 1;
        startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
        endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
        allocatesize_fsb = endoffset_fsb - startoffset_fsb;
@@ -819,6 +810,7 @@ xfs_alloc_file_space(
        while (allocatesize_fsb && !error) {
                xfs_fileoff_t   s, e;
                unsigned int    dblocks, rblocks, resblks;
+               int             nimaps = 1;
 
                /*
                 * Determine space reservations for data/realtime.
@@ -884,15 +876,19 @@ xfs_alloc_file_space(
                if (error)
                        break;
 
-               allocated_fsb = imapp->br_blockcount;
-
-               if (nimaps == 0) {
-                       error = -ENOSPC;
-                       break;
+               /*
+                * If the allocator cannot find a single free extent large
+                * enough to cover the start block of the requested range,
+                * xfs_bmapi_write will return 0 but leave *nimaps set to 0.
+                *
+                * In that case we simply need to keep looping with the same
+                * startoffset_fsb so that one of the following allocations
+                * will eventually reach the requested range.
+                */
+               if (nimaps) {
+                       startoffset_fsb += imapp->br_blockcount;
+                       allocatesize_fsb -= imapp->br_blockcount;
                }
-
-               startoffset_fsb += allocated_fsb;
-               allocatesize_fsb -= allocated_fsb;
        }
 
        return error;
@@ -989,10 +985,8 @@ xfs_free_file_space(
 
        /* We can only free complete realtime extents. */
        if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1) {
-               startoffset_fsb = roundup_64(startoffset_fsb,
-                                            mp->m_sb.sb_rextsize);
-               endoffset_fsb = rounddown_64(endoffset_fsb,
-                                            mp->m_sb.sb_rextsize);
+               startoffset_fsb = xfs_rtb_roundup_rtx(mp, startoffset_fsb);
+               endoffset_fsb = xfs_rtb_rounddown_rtx(mp, endoffset_fsb);
        }
 
        /*
index 203700278ddbb620a6e9d7c95b68a86f4ea52c63..e33e5e13b95f462ffe33c232739af6c4762758a0 100644 (file)
@@ -214,6 +214,43 @@ xfs_ilock_iocb(
        return 0;
 }
 
+static int
+xfs_ilock_iocb_for_write(
+       struct kiocb            *iocb,
+       unsigned int            *lock_mode)
+{
+       ssize_t                 ret;
+       struct xfs_inode        *ip = XFS_I(file_inode(iocb->ki_filp));
+
+       ret = xfs_ilock_iocb(iocb, *lock_mode);
+       if (ret)
+               return ret;
+
+       if (*lock_mode == XFS_IOLOCK_EXCL)
+               return 0;
+       if (!xfs_iflags_test(ip, XFS_IREMAPPING))
+               return 0;
+
+       xfs_iunlock(ip, *lock_mode);
+       *lock_mode = XFS_IOLOCK_EXCL;
+       return xfs_ilock_iocb(iocb, *lock_mode);
+}
+
+static unsigned int
+xfs_ilock_for_write_fault(
+       struct xfs_inode        *ip)
+{
+       /* get a shared lock if no remapping in progress */
+       xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
+       if (!xfs_iflags_test(ip, XFS_IREMAPPING))
+               return XFS_MMAPLOCK_SHARED;
+
+       /* wait for remapping to complete */
+       xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
+       xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
+       return XFS_MMAPLOCK_EXCL;
+}
+
 STATIC ssize_t
 xfs_file_dio_read(
        struct kiocb            *iocb,
@@ -551,7 +588,7 @@ xfs_file_dio_write_aligned(
        unsigned int            iolock = XFS_IOLOCK_SHARED;
        ssize_t                 ret;
 
-       ret = xfs_ilock_iocb(iocb, iolock);
+       ret = xfs_ilock_iocb_for_write(iocb, &iolock);
        if (ret)
                return ret;
        ret = xfs_file_write_checks(iocb, from, &iolock);
@@ -618,7 +655,7 @@ retry_exclusive:
                flags = IOMAP_DIO_FORCE_WAIT;
        }
 
-       ret = xfs_ilock_iocb(iocb, iolock);
+       ret = xfs_ilock_iocb_for_write(iocb, &iolock);
        if (ret)
                return ret;
 
@@ -1180,7 +1217,7 @@ xfs_file_remap_range(
        if (xfs_file_sync_writes(file_in) || xfs_file_sync_writes(file_out))
                xfs_log_force_inode(dest);
 out_unlock:
-       xfs_iunlock2_io_mmap(src, dest);
+       xfs_iunlock2_remapping(src, dest);
        if (ret)
                trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
        return remapped > 0 ? remapped : ret;
@@ -1328,6 +1365,7 @@ __xfs_filemap_fault(
        struct inode            *inode = file_inode(vmf->vma->vm_file);
        struct xfs_inode        *ip = XFS_I(inode);
        vm_fault_t              ret;
+       unsigned int            lock_mode = 0;
 
        trace_xfs_filemap_fault(ip, order, write_fault);
 
@@ -1336,25 +1374,24 @@ __xfs_filemap_fault(
                file_update_time(vmf->vma->vm_file);
        }
 
+       if (IS_DAX(inode) || write_fault)
+               lock_mode = xfs_ilock_for_write_fault(XFS_I(inode));
+
        if (IS_DAX(inode)) {
                pfn_t pfn;
 
-               xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
                ret = xfs_dax_fault(vmf, order, write_fault, &pfn);
                if (ret & VM_FAULT_NEEDDSYNC)
                        ret = dax_finish_sync_fault(vmf, order, pfn);
-               xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
+       } else if (write_fault) {
+               ret = iomap_page_mkwrite(vmf, &xfs_page_mkwrite_iomap_ops);
        } else {
-               if (write_fault) {
-                       xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
-                       ret = iomap_page_mkwrite(vmf,
-                                       &xfs_page_mkwrite_iomap_ops);
-                       xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
-               } else {
-                       ret = filemap_fault(vmf);
-               }
+               ret = filemap_fault(vmf);
        }
 
+       if (lock_mode)
+               xfs_iunlock(XFS_I(inode), lock_mode);
+
        if (write_fault)
                sb_end_pagefault(inode->i_sb);
        return ret;
index 736e5545f584025ec204dfc6059e83fc06e99bdb..5a72217f5feb9b990ff3a41d1cb79519cf2b3091 100644 (file)
@@ -23,7 +23,7 @@
 #include "xfs_refcount.h"
 #include "xfs_refcount_btree.h"
 #include "xfs_alloc_btree.h"
-#include "xfs_rtalloc.h"
+#include "xfs_rtbitmap.h"
 #include "xfs_ag.h"
 
 /* Convert an xfs_fsmap to an fsmap. */
@@ -483,11 +483,11 @@ xfs_getfsmap_rtdev_rtbitmap_helper(
        xfs_rtblock_t                   rtbno;
        xfs_daddr_t                     rec_daddr, len_daddr;
 
-       rtbno = rec->ar_startext * mp->m_sb.sb_rextsize;
+       rtbno = xfs_rtx_to_rtb(mp, rec->ar_startext);
        rec_daddr = XFS_FSB_TO_BB(mp, rtbno);
        irec.rm_startblock = rtbno;
 
-       rtbno = rec->ar_extcount * mp->m_sb.sb_rextsize;
+       rtbno = xfs_rtx_to_rtb(mp, rec->ar_extcount);
        len_daddr = XFS_FSB_TO_BB(mp, rtbno);
        irec.rm_blockcount = rtbno;
 
@@ -514,7 +514,7 @@ xfs_getfsmap_rtdev_rtbitmap(
        uint64_t                        eofs;
        int                             error;
 
-       eofs = XFS_FSB_TO_BB(mp, mp->m_sb.sb_rextents * mp->m_sb.sb_rextsize);
+       eofs = XFS_FSB_TO_BB(mp, xfs_rtx_to_rtb(mp, mp->m_sb.sb_rextents));
        if (keys[0].fmr_physical >= eofs)
                return 0;
        start_rtb = XFS_BB_TO_FSBT(mp,
@@ -539,11 +539,8 @@ xfs_getfsmap_rtdev_rtbitmap(
         * Set up query parameters to return free rtextents covering the range
         * we want.
         */
-       alow.ar_startext = start_rtb;
-       ahigh.ar_startext = end_rtb;
-       do_div(alow.ar_startext, mp->m_sb.sb_rextsize);
-       if (do_div(ahigh.ar_startext, mp->m_sb.sb_rextsize))
-               ahigh.ar_startext++;
+       alow.ar_startext = xfs_rtb_to_rtx(mp, start_rtb);
+       ahigh.ar_startext = xfs_rtb_to_rtxup(mp, end_rtb);
        error = xfs_rtalloc_query_range(mp, tp, &alow, &ahigh,
                        xfs_getfsmap_rtdev_rtbitmap_helper, info);
        if (error)
index 36f5cf802c07b65ad74e9d158a79d36b36ad63a3..c0f1c89786c2ac083482ece0bb312692ab8255a3 100644 (file)
@@ -918,6 +918,13 @@ xfs_droplink(
        xfs_trans_t *tp,
        xfs_inode_t *ip)
 {
+       if (VFS_I(ip)->i_nlink == 0) {
+               xfs_alert(ip->i_mount,
+                         "%s: Attempt to drop inode (%llu) with nlink zero.",
+                         __func__, ip->i_ino);
+               return -EFSCORRUPTED;
+       }
+
        xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
 
        drop_nlink(VFS_I(ip));
@@ -3621,6 +3628,23 @@ xfs_iunlock2_io_mmap(
                inode_unlock(VFS_I(ip1));
 }
 
+/* Drop the MMAPLOCK and the IOLOCK after a remap completes. */
+void
+xfs_iunlock2_remapping(
+       struct xfs_inode        *ip1,
+       struct xfs_inode        *ip2)
+{
+       xfs_iflags_clear(ip1, XFS_IREMAPPING);
+
+       if (ip1 != ip2)
+               xfs_iunlock(ip1, XFS_MMAPLOCK_SHARED);
+       xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
+
+       if (ip1 != ip2)
+               inode_unlock_shared(VFS_I(ip1));
+       inode_unlock(VFS_I(ip2));
+}
+
 /*
  * Reload the incore inode list for this inode.  Caller should ensure that
  * the link count cannot change, either by taking ILOCK_SHARED or otherwise
index 0c5bdb91152e1cf920cfb5cb76268d0b025640fc..3dc47937da5d17d81e46fd590435f574d11558dc 100644 (file)
@@ -347,6 +347,14 @@ static inline bool xfs_inode_has_large_extent_counts(struct xfs_inode *ip)
 /* Quotacheck is running but inode has not been added to quota counts. */
 #define XFS_IQUOTAUNCHECKED    (1 << 14)
 
+/*
+ * Remap in progress. Callers that wish to update file data while
+ * holding a shared IOLOCK or MMAPLOCK must drop the lock and retake
+ * the lock in exclusive mode. Relocking the file will block until
+ * IREMAPPING is cleared.
+ */
+#define XFS_IREMAPPING         (1U << 15)
+
 /* All inode state flags related to inode reclaim. */
 #define XFS_ALL_IRECLAIM_FLAGS (XFS_IRECLAIMABLE | \
                                 XFS_IRECLAIM | \
@@ -595,6 +603,7 @@ void xfs_end_io(struct work_struct *work);
 
 int xfs_ilock2_io_mmap(struct xfs_inode *ip1, struct xfs_inode *ip2);
 void xfs_iunlock2_io_mmap(struct xfs_inode *ip1, struct xfs_inode *ip2);
+void xfs_iunlock2_remapping(struct xfs_inode *ip1, struct xfs_inode *ip2);
 
 static inline bool
 xfs_inode_unlinked_incomplete(
index 17c51804f9c6aed2d3f4043172b67cf3754cd140..cd7803fda8b1be501af6d19d900d9d80bef5a391 100644 (file)
@@ -19,6 +19,7 @@
 #include "xfs_log.h"
 #include "xfs_log_priv.h"
 #include "xfs_error.h"
+#include "xfs_rtbitmap.h"
 
 #include <linux/iversion.h>
 
@@ -107,7 +108,7 @@ xfs_inode_item_precommit(
         */
        if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
            (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) &&
-           (ip->i_extsize % ip->i_mount->m_sb.sb_rextsize) > 0) {
+           xfs_extlen_to_rtxmod(ip->i_mount, ip->i_extsize) > 0) {
                ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
                                   XFS_DIFLAG_EXTSZINHERIT);
                ip->i_extsize = 0;
index 55bb01173cde8c06099a79b2bcd631fea25bf771..a82470e027f7278ac4b3271cd67a27ed97235c77 100644 (file)
@@ -38,6 +38,7 @@
 #include "xfs_reflink.h"
 #include "xfs_ioctl.h"
 #include "xfs_xattr.h"
+#include "xfs_rtbitmap.h"
 
 #include <linux/mount.h>
 #include <linux/namei.h>
@@ -1004,7 +1005,7 @@ xfs_fill_fsxattr(
                 * later.
                 */
                if ((ip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
-                   ip->i_extsize % mp->m_sb.sb_rextsize > 0) {
+                   xfs_extlen_to_rtxmod(mp, ip->i_extsize) > 0) {
                        fa->fsx_xflags &= ~(FS_XFLAG_EXTSIZE |
                                            FS_XFLAG_EXTSZINHERIT);
                        fa->fsx_extsize = 0;
@@ -1130,7 +1131,7 @@ xfs_ioctl_setattr_xflags(
        /* If realtime flag is set then must have realtime device */
        if (fa->fsx_xflags & FS_XFLAG_REALTIME) {
                if (mp->m_sb.sb_rblocks == 0 || mp->m_sb.sb_rextsize == 0 ||
-                   (ip->i_extsize % mp->m_sb.sb_rextsize))
+                   xfs_extlen_to_rtxmod(mp, ip->i_extsize))
                        return -EINVAL;
        }
 
index e9d317a3dafe4e673c439e504bc7fc24ca503dbe..d7873e0360f0b9f170fc8cdaffa0ce49b1c5ed37 100644 (file)
@@ -198,6 +198,18 @@ static inline uint64_t howmany_64(uint64_t x, uint32_t y)
        return x;
 }
 
+/* If @b is a power of 2, return log2(b).  Else return -1. */
+static inline int8_t log2_if_power2(unsigned long b)
+{
+       return is_power_of_2(b) ? ilog2(b) : -1;
+}
+
+/* If @b is a power of 2, return a mask of the lower bits, else return zero. */
+static inline unsigned long long mask64_if_power2(unsigned long b)
+{
+       return is_power_of_2(b) ? b - 1 : 0;
+}
+
 int xfs_rw_bdev(struct block_device *bdev, sector_t sector, unsigned int count,
                char *data, enum req_op op);
 
index 219681d29fbc496cd594c21ddb921b03918e3755..503fe3c7edbf82cd1f194b3fd3d33b27bb06167d 100644 (file)
@@ -101,9 +101,9 @@ typedef struct xfs_mount {
 
        /*
         * Optional cache of rt summary level per bitmap block with the
-        * invariant that m_rsum_cache[bbno] <= the minimum i for which
-        * rsum[i][bbno] != 0. Reads and writes are serialized by the rsumip
-        * inode lock.
+        * invariant that m_rsum_cache[bbno] > the maximum i for which
+        * rsum[i][bbno] != 0, or 0 if rsum[i][bbno] == 0 for all i.
+        * Reads and writes are serialized by the rsumip inode lock.
         */
        uint8_t                 *m_rsum_cache;
        struct xfs_mru_cache    *m_filestream;  /* per-mount filestream data */
@@ -119,6 +119,7 @@ typedef struct xfs_mount {
        uint8_t                 m_blkbb_log;    /* blocklog - BBSHIFT */
        uint8_t                 m_agno_log;     /* log #ag's */
        uint8_t                 m_sectbb_log;   /* sectlog - BBSHIFT */
+       int8_t                  m_rtxblklog;    /* log2 of rextsize, if possible */
        uint                    m_blockmask;    /* sb_blocksize-1 */
        uint                    m_blockwsize;   /* sb_blocksize in words */
        uint                    m_blockwmask;   /* blockwsize-1 */
@@ -152,6 +153,7 @@ typedef struct xfs_mount {
        uint64_t                m_features;     /* active filesystem features */
        uint64_t                m_low_space[XFS_LOWSP_MAX];
        uint64_t                m_low_rtexts[XFS_LOWSP_MAX];
+       uint64_t                m_rtxblkmask;   /* rt extent block mask */
        struct xfs_ino_geometry m_ino_geo;      /* inode geometry */
        struct xfs_trans_resv   m_resv;         /* precomputed res values */
                                                /* low free space thresholds */
index c4cc99b70dd303e84c6684220f1085ed22c343f3..21a7e350b4c58ee9b9e1cd164a73ee9a23cb7b7d 100644 (file)
@@ -72,6 +72,10 @@ xfs_check_ondisk_structs(void)
        XFS_CHECK_STRUCT_SIZE(xfs_attr_leaf_map_t,              4);
        XFS_CHECK_STRUCT_SIZE(xfs_attr_leaf_name_local_t,       4);
 
+       /* realtime structures */
+       XFS_CHECK_STRUCT_SIZE(union xfs_rtword_raw,             4);
+       XFS_CHECK_STRUCT_SIZE(union xfs_suminfo_raw,            4);
+
        /*
         * m68k has problems with xfs_attr_leaf_name_remote_t, but we pad it to
         * 4 bytes anyway so it's not obviously a problem.  Hence for the moment
index eb9102453affbf34c9378a7b354de22e16f6d7a3..658edee8381dcdca656135a74a9b5a1b6e13a8cb 100644 (file)
@@ -1540,6 +1540,10 @@ xfs_reflink_remap_prep(
        if (ret)
                goto out_unlock;
 
+       xfs_iflags_set(src, XFS_IREMAPPING);
+       if (inode_in != inode_out)
+               xfs_ilock_demote(src, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL);
+
        return 0;
 out_unlock:
        xfs_iunlock2_io_mmap(src, dest);
index 2e1a4e5cd03def7495310d7213d2b99dbe617a51..88c48de5c9c810c97e38170ecb4bf84fe42c7b35 100644 (file)
@@ -19,6 +19,7 @@
 #include "xfs_icache.h"
 #include "xfs_rtalloc.h"
 #include "xfs_sb.h"
+#include "xfs_rtbitmap.h"
 
 /*
  * Read and return the summary information for a given extent size,
  */
 static int
 xfs_rtget_summary(
-       xfs_mount_t     *mp,            /* file system mount structure */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       int             log,            /* log2 of extent size */
-       xfs_rtblock_t   bbno,           /* bitmap block number */
-       struct xfs_buf  **rbpp,         /* in/out: summary block buffer */
-       xfs_fsblock_t   *rsb,           /* in/out: summary block number */
-       xfs_suminfo_t   *sum)           /* out: summary info for this block */
+       struct xfs_rtalloc_args *args,
+       int                     log,    /* log2 of extent size */
+       xfs_fileoff_t           bbno,   /* bitmap block number */
+       xfs_suminfo_t           *sum)   /* out: summary info for this block */
 {
-       return xfs_rtmodify_summary_int(mp, tp, log, bbno, 0, rbpp, rsb, sum);
+       return xfs_rtmodify_summary_int(args, log, bbno, 0, sum);
 }
 
 /*
  * Return whether there are any free extents in the size range given
  * by low and high, for the bitmap block bbno.
  */
-STATIC int                             /* error */
+STATIC int
 xfs_rtany_summary(
-       xfs_mount_t     *mp,            /* file system mount structure */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       int             low,            /* low log2 extent size */
-       int             high,           /* high log2 extent size */
-       xfs_rtblock_t   bbno,           /* bitmap block number */
-       struct xfs_buf  **rbpp,         /* in/out: summary block buffer */
-       xfs_fsblock_t   *rsb,           /* in/out: summary block number */
-       int             *stat)          /* out: any good extents here? */
+       struct xfs_rtalloc_args *args,
+       int                     low,    /* low log2 extent size */
+       int                     high,   /* high log2 extent size */
+       xfs_fileoff_t           bbno,   /* bitmap block number */
+       int                     *maxlog) /* out: max log2 extent size free */
 {
-       int             error;          /* error value */
-       int             log;            /* loop counter, log2 of ext. size */
-       xfs_suminfo_t   sum;            /* summary data */
-
-       /* There are no extents at levels < m_rsum_cache[bbno]. */
-       if (mp->m_rsum_cache && low < mp->m_rsum_cache[bbno])
-               low = mp->m_rsum_cache[bbno];
+       struct xfs_mount        *mp = args->mp;
+       int                     error;
+       int                     log;    /* loop counter, log2 of ext. size */
+       xfs_suminfo_t           sum;    /* summary data */
+
+       /* There are no extents at levels >= m_rsum_cache[bbno]. */
+       if (mp->m_rsum_cache) {
+               high = min(high, mp->m_rsum_cache[bbno] - 1);
+               if (low > high) {
+                       *maxlog = -1;
+                       return 0;
+               }
+       }
 
        /*
         * Loop over logs of extent sizes.
         */
-       for (log = low; log <= high; log++) {
+       for (log = high; log >= low; log--) {
                /*
                 * Get one summary datum.
                 */
-               error = xfs_rtget_summary(mp, tp, log, bbno, rbpp, rsb, &sum);
+               error = xfs_rtget_summary(args, log, bbno, &sum);
                if (error) {
                        return error;
                }
@@ -77,18 +78,18 @@ xfs_rtany_summary(
                 * If there are any, return success.
                 */
                if (sum) {
-                       *stat = 1;
+                       *maxlog = log;
                        goto out;
                }
        }
        /*
         * Found nothing, return failure.
         */
-       *stat = 0;
+       *maxlog = -1;
 out:
-       /* There were no extents at levels < log. */
-       if (mp->m_rsum_cache && log > mp->m_rsum_cache[bbno])
-               mp->m_rsum_cache[bbno] = log;
+       /* There were no extents at levels > log. */
+       if (mp->m_rsum_cache && log + 1 < mp->m_rsum_cache[bbno])
+               mp->m_rsum_cache[bbno] = log + 1;
        return 0;
 }
 
@@ -97,60 +98,54 @@ out:
  * Copy and transform the summary file, given the old and new
  * parameters in the mount structures.
  */
-STATIC int                             /* error */
+STATIC int
 xfs_rtcopy_summary(
-       xfs_mount_t     *omp,           /* old file system mount point */
-       xfs_mount_t     *nmp,           /* new file system mount point */
-       xfs_trans_t     *tp)            /* transaction pointer */
+       struct xfs_rtalloc_args *oargs,
+       struct xfs_rtalloc_args *nargs)
 {
-       xfs_rtblock_t   bbno;           /* bitmap block number */
-       struct xfs_buf  *bp;            /* summary buffer */
-       int             error;          /* error return value */
-       int             log;            /* summary level number (log length) */
-       xfs_suminfo_t   sum;            /* summary data */
-       xfs_fsblock_t   sumbno;         /* summary block number */
+       xfs_fileoff_t           bbno;   /* bitmap block number */
+       int                     error;
+       int                     log;    /* summary level number (log length) */
+       xfs_suminfo_t           sum;    /* summary data */
 
-       bp = NULL;
-       for (log = omp->m_rsumlevels - 1; log >= 0; log--) {
-               for (bbno = omp->m_sb.sb_rbmblocks - 1;
+       for (log = oargs->mp->m_rsumlevels - 1; log >= 0; log--) {
+               for (bbno = oargs->mp->m_sb.sb_rbmblocks - 1;
                     (xfs_srtblock_t)bbno >= 0;
                     bbno--) {
-                       error = xfs_rtget_summary(omp, tp, log, bbno, &bp,
-                               &sumbno, &sum);
+                       error = xfs_rtget_summary(oargs, log, bbno, &sum);
                        if (error)
-                               return error;
+                               goto out;
                        if (sum == 0)
                                continue;
-                       error = xfs_rtmodify_summary(omp, tp, log, bbno, -sum,
-                               &bp, &sumbno);
+                       error = xfs_rtmodify_summary(oargs, log, bbno, -sum);
                        if (error)
-                               return error;
-                       error = xfs_rtmodify_summary(nmp, tp, log, bbno, sum,
-                               &bp, &sumbno);
+                               goto out;
+                       error = xfs_rtmodify_summary(nargs, log, bbno, sum);
                        if (error)
-                               return error;
+                               goto out;
                        ASSERT(sum > 0);
                }
        }
+       error = 0;
+out:
+       xfs_rtbuf_cache_relse(oargs);
        return 0;
 }
 /*
  * Mark an extent specified by start and len allocated.
  * Updates all the summary information as well as the bitmap.
  */
-STATIC int                             /* error */
+STATIC int
 xfs_rtallocate_range(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   start,          /* start block to allocate */
-       xfs_extlen_t    len,            /* length to allocate */
-       struct xfs_buf  **rbpp,         /* in/out: summary block buffer */
-       xfs_fsblock_t   *rsb)           /* in/out: summary block number */
+       struct xfs_rtalloc_args *args,
+       xfs_rtxnum_t            start,  /* start rtext to allocate */
+       xfs_rtxlen_t            len)    /* in/out: summary block number */
 {
-       xfs_rtblock_t   end;            /* end of the allocated extent */
-       int             error;          /* error value */
-       xfs_rtblock_t   postblock = 0;  /* first block allocated > end */
-       xfs_rtblock_t   preblock = 0;   /* first block allocated < start */
+       struct xfs_mount        *mp = args->mp;
+       xfs_rtxnum_t            end;    /* end of the allocated rtext */
+       int                     error;
+       xfs_rtxnum_t            postblock = 0; /* first rtext allocated > end */
+       xfs_rtxnum_t            preblock = 0; /* first rtext allocated < start */
 
        end = start + len - 1;
        /*
@@ -158,15 +153,15 @@ xfs_rtallocate_range(
         * We need to find the beginning and end of the extent so we can
         * properly update the summary.
         */
-       error = xfs_rtfind_back(mp, tp, start, 0, &preblock);
+       error = xfs_rtfind_back(args, start, 0, &preblock);
        if (error) {
                return error;
        }
        /*
         * Find the next allocated block (end of free extent).
         */
-       error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
-               &postblock);
+       error = xfs_rtfind_forw(args, end, mp->m_sb.sb_rextents - 1,
+                       &postblock);
        if (error) {
                return error;
        }
@@ -174,9 +169,9 @@ xfs_rtallocate_range(
         * Decrement the summary information corresponding to the entire
         * (old) free extent.
         */
-       error = xfs_rtmodify_summary(mp, tp,
-               XFS_RTBLOCKLOG(postblock + 1 - preblock),
-               XFS_BITTOBLOCK(mp, preblock), -1, rbpp, rsb);
+       error = xfs_rtmodify_summary(args,
+                       XFS_RTBLOCKLOG(postblock + 1 - preblock),
+                       xfs_rtx_to_rbmblock(mp, preblock), -1);
        if (error) {
                return error;
        }
@@ -185,9 +180,9 @@ xfs_rtallocate_range(
         * old extent, add summary data for them to be free.
         */
        if (preblock < start) {
-               error = xfs_rtmodify_summary(mp, tp,
-                       XFS_RTBLOCKLOG(start - preblock),
-                       XFS_BITTOBLOCK(mp, preblock), 1, rbpp, rsb);
+               error = xfs_rtmodify_summary(args,
+                               XFS_RTBLOCKLOG(start - preblock),
+                               xfs_rtx_to_rbmblock(mp, preblock), 1);
                if (error) {
                        return error;
                }
@@ -197,9 +192,9 @@ xfs_rtallocate_range(
         * old extent, add summary data for them to be free.
         */
        if (postblock > end) {
-               error = xfs_rtmodify_summary(mp, tp,
-                       XFS_RTBLOCKLOG(postblock - end),
-                       XFS_BITTOBLOCK(mp, end + 1), 1, rbpp, rsb);
+               error = xfs_rtmodify_summary(args,
+                               XFS_RTBLOCKLOG(postblock - end),
+                               xfs_rtx_to_rbmblock(mp, end + 1), 1);
                if (error) {
                        return error;
                }
@@ -207,54 +202,69 @@ xfs_rtallocate_range(
        /*
         * Modify the bitmap to mark this extent allocated.
         */
-       error = xfs_rtmodify_range(mp, tp, start, len, 0);
+       error = xfs_rtmodify_range(args, start, len, 0);
        return error;
 }
 
+/*
+ * Make sure we don't run off the end of the rt volume.  Be careful that
+ * adjusting maxlen downwards doesn't cause us to fail the alignment checks.
+ */
+static inline xfs_rtxlen_t
+xfs_rtallocate_clamp_len(
+       struct xfs_mount        *mp,
+       xfs_rtxnum_t            startrtx,
+       xfs_rtxlen_t            rtxlen,
+       xfs_rtxlen_t            prod)
+{
+       xfs_rtxlen_t            ret;
+
+       ret = min(mp->m_sb.sb_rextents, startrtx + rtxlen) - startrtx;
+       return rounddown(ret, prod);
+}
+
 /*
  * Attempt to allocate an extent minlen<=len<=maxlen starting from
  * bitmap block bbno.  If we don't get maxlen then use prod to trim
- * the length, if given.  Returns error; returns starting block in *rtblock.
+ * the length, if given.  Returns error; returns starting block in *rtx.
  * The lengths are all in rtextents.
  */
-STATIC int                             /* error */
+STATIC int
 xfs_rtallocate_extent_block(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   bbno,           /* bitmap block number */
-       xfs_extlen_t    minlen,         /* minimum length to allocate */
-       xfs_extlen_t    maxlen,         /* maximum length to allocate */
-       xfs_extlen_t    *len,           /* out: actual length allocated */
-       xfs_rtblock_t   *nextp,         /* out: next block to try */
-       struct xfs_buf  **rbpp,         /* in/out: summary block buffer */
-       xfs_fsblock_t   *rsb,           /* in/out: summary block number */
-       xfs_extlen_t    prod,           /* extent product factor */
-       xfs_rtblock_t   *rtblock)       /* out: start block allocated */
+       struct xfs_rtalloc_args *args,
+       xfs_fileoff_t           bbno,   /* bitmap block number */
+       xfs_rtxlen_t            minlen, /* minimum length to allocate */
+       xfs_rtxlen_t            maxlen, /* maximum length to allocate */
+       xfs_rtxlen_t            *len,   /* out: actual length allocated */
+       xfs_rtxnum_t            *nextp, /* out: next rtext to try */
+       xfs_rtxlen_t            prod,   /* extent product factor */
+       xfs_rtxnum_t            *rtx)   /* out: start rtext allocated */
 {
-       xfs_rtblock_t   besti;          /* best rtblock found so far */
-       xfs_rtblock_t   bestlen;        /* best length found so far */
-       xfs_rtblock_t   end;            /* last rtblock in chunk */
-       int             error;          /* error value */
-       xfs_rtblock_t   i;              /* current rtblock trying */
-       xfs_rtblock_t   next;           /* next rtblock to try */
-       int             stat;           /* status from internal calls */
+       struct xfs_mount        *mp = args->mp;
+       xfs_rtxnum_t            besti;  /* best rtext found so far */
+       xfs_rtxnum_t            bestlen;/* best length found so far */
+       xfs_rtxnum_t            end;    /* last rtext in chunk */
+       int                     error;
+       xfs_rtxnum_t            i;      /* current rtext trying */
+       xfs_rtxnum_t            next;   /* next rtext to try */
+       int                     stat;   /* status from internal calls */
 
        /*
         * Loop over all the extents starting in this bitmap block,
         * looking for one that's long enough.
         */
-       for (i = XFS_BLOCKTOBIT(mp, bbno), besti = -1, bestlen = 0,
-               end = XFS_BLOCKTOBIT(mp, bbno + 1) - 1;
+       for (i = xfs_rbmblock_to_rtx(mp, bbno), besti = -1, bestlen = 0,
+               end = xfs_rbmblock_to_rtx(mp, bbno + 1) - 1;
             i <= end;
             i++) {
                /* Make sure we don't scan off the end of the rt volume. */
-               maxlen = min(mp->m_sb.sb_rextents, i + maxlen) - i;
+               maxlen = xfs_rtallocate_clamp_len(mp, i, maxlen, prod);
 
                /*
                 * See if there's a free extent of maxlen starting at i.
                 * If it's not so then next will contain the first non-free.
                 */
-               error = xfs_rtcheck_range(mp, tp, i, maxlen, 1, &next, &stat);
+               error = xfs_rtcheck_range(args, i, maxlen, 1, &next, &stat);
                if (error) {
                        return error;
                }
@@ -262,13 +272,12 @@ xfs_rtallocate_extent_block(
                        /*
                         * i for maxlen is all free, allocate and return that.
                         */
-                       error = xfs_rtallocate_range(mp, tp, i, maxlen, rbpp,
-                               rsb);
+                       error = xfs_rtallocate_range(args, i, maxlen);
                        if (error) {
                                return error;
                        }
                        *len = maxlen;
-                       *rtblock = i;
+                       *rtx = i;
                        return 0;
                }
                /*
@@ -278,7 +287,7 @@ xfs_rtallocate_extent_block(
                 * so far, remember it.
                 */
                if (minlen < maxlen) {
-                       xfs_rtblock_t   thislen;        /* this extent size */
+                       xfs_rtxnum_t    thislen;        /* this extent size */
 
                        thislen = next - i;
                        if (thislen >= minlen && thislen > bestlen) {
@@ -290,7 +299,7 @@ xfs_rtallocate_extent_block(
                 * If not done yet, find the start of the next free space.
                 */
                if (next < end) {
-                       error = xfs_rtfind_forw(mp, tp, next, end, &i);
+                       error = xfs_rtfind_forw(args, next, end, &i);
                        if (error) {
                                return error;
                        }
@@ -301,7 +310,7 @@ xfs_rtallocate_extent_block(
         * Searched the whole thing & didn't find a maxlen free extent.
         */
        if (minlen < maxlen && besti != -1) {
-               xfs_extlen_t    p;      /* amount to trim length by */
+               xfs_rtxlen_t    p;      /* amount to trim length by */
 
                /*
                 * If size should be a multiple of prod, make that so.
@@ -315,51 +324,49 @@ xfs_rtallocate_extent_block(
                /*
                 * Allocate besti for bestlen & return that.
                 */
-               error = xfs_rtallocate_range(mp, tp, besti, bestlen, rbpp, rsb);
+               error = xfs_rtallocate_range(args, besti, bestlen);
                if (error) {
                        return error;
                }
                *len = bestlen;
-               *rtblock = besti;
+               *rtx = besti;
                return 0;
        }
        /*
         * Allocation failed.  Set *nextp to the next block to try.
         */
        *nextp = next;
-       *rtblock = NULLRTBLOCK;
+       *rtx = NULLRTEXTNO;
        return 0;
 }
 
 /*
  * Allocate an extent of length minlen<=len<=maxlen, starting at block
  * bno.  If we don't get maxlen then use prod to trim the length, if given.
- * Returns error; returns starting block in *rtblock.
+ * Returns error; returns starting block in *rtx.
  * The lengths are all in rtextents.
  */
-STATIC int                             /* error */
+STATIC int
 xfs_rtallocate_extent_exact(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   bno,            /* starting block number to allocate */
-       xfs_extlen_t    minlen,         /* minimum length to allocate */
-       xfs_extlen_t    maxlen,         /* maximum length to allocate */
-       xfs_extlen_t    *len,           /* out: actual length allocated */
-       struct xfs_buf  **rbpp,         /* in/out: summary block buffer */
-       xfs_fsblock_t   *rsb,           /* in/out: summary block number */
-       xfs_extlen_t    prod,           /* extent product factor */
-       xfs_rtblock_t   *rtblock)       /* out: start block allocated */
+       struct xfs_rtalloc_args *args,
+       xfs_rtxnum_t            start,  /* starting rtext number to allocate */
+       xfs_rtxlen_t            minlen, /* minimum length to allocate */
+       xfs_rtxlen_t            maxlen, /* maximum length to allocate */
+       xfs_rtxlen_t            *len,   /* out: actual length allocated */
+       xfs_rtxlen_t            prod,   /* extent product factor */
+       xfs_rtxnum_t            *rtx)   /* out: start rtext allocated */
 {
-       int             error;          /* error value */
-       xfs_extlen_t    i;              /* extent length trimmed due to prod */
-       int             isfree;         /* extent is free */
-       xfs_rtblock_t   next;           /* next block to try (dummy) */
+       int                     error;
+       xfs_rtxlen_t            i;      /* extent length trimmed due to prod */
+       int                     isfree; /* extent is free */
+       xfs_rtxnum_t            next;   /* next rtext to try (dummy) */
 
-       ASSERT(minlen % prod == 0 && maxlen % prod == 0);
+       ASSERT(minlen % prod == 0);
+       ASSERT(maxlen % prod == 0);
        /*
         * Check if the range in question (for maxlen) is free.
         */
-       error = xfs_rtcheck_range(mp, tp, bno, maxlen, 1, &next, &isfree);
+       error = xfs_rtcheck_range(args, start, maxlen, 1, &next, &isfree);
        if (error) {
                return error;
        }
@@ -367,23 +374,23 @@ xfs_rtallocate_extent_exact(
                /*
                 * If it is, allocate it and return success.
                 */
-               error = xfs_rtallocate_range(mp, tp, bno, maxlen, rbpp, rsb);
+               error = xfs_rtallocate_range(args, start, maxlen);
                if (error) {
                        return error;
                }
                *len = maxlen;
-               *rtblock = bno;
+               *rtx = start;
                return 0;
        }
        /*
         * If not, allocate what there is, if it's at least minlen.
         */
-       maxlen = next - bno;
+       maxlen = next - start;
        if (maxlen < minlen) {
                /*
                 * Failed, return failure status.
                 */
-               *rtblock = NULLRTBLOCK;
+               *rtx = NULLRTEXTNO;
                return 0;
        }
        /*
@@ -395,81 +402,82 @@ xfs_rtallocate_extent_exact(
                        /*
                         * Now we can't do it, return failure status.
                         */
-                       *rtblock = NULLRTBLOCK;
+                       *rtx = NULLRTEXTNO;
                        return 0;
                }
        }
        /*
         * Allocate what we can and return it.
         */
-       error = xfs_rtallocate_range(mp, tp, bno, maxlen, rbpp, rsb);
+       error = xfs_rtallocate_range(args, start, maxlen);
        if (error) {
                return error;
        }
        *len = maxlen;
-       *rtblock = bno;
+       *rtx = start;
        return 0;
 }
 
 /*
  * Allocate an extent of length minlen<=len<=maxlen, starting as near
- * to bno as possible.  If we don't get maxlen then use prod to trim
+ * to start as possible.  If we don't get maxlen then use prod to trim
  * the length, if given.  The lengths are all in rtextents.
  */
-STATIC int                             /* error */
+STATIC int
 xfs_rtallocate_extent_near(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   bno,            /* starting block number to allocate */
-       xfs_extlen_t    minlen,         /* minimum length to allocate */
-       xfs_extlen_t    maxlen,         /* maximum length to allocate */
-       xfs_extlen_t    *len,           /* out: actual length allocated */
-       struct xfs_buf  **rbpp,         /* in/out: summary block buffer */
-       xfs_fsblock_t   *rsb,           /* in/out: summary block number */
-       xfs_extlen_t    prod,           /* extent product factor */
-       xfs_rtblock_t   *rtblock)       /* out: start block allocated */
+       struct xfs_rtalloc_args *args,
+       xfs_rtxnum_t            start,  /* starting rtext number to allocate */
+       xfs_rtxlen_t            minlen, /* minimum length to allocate */
+       xfs_rtxlen_t            maxlen, /* maximum length to allocate */
+       xfs_rtxlen_t            *len,   /* out: actual length allocated */
+       xfs_rtxlen_t            prod,   /* extent product factor */
+       xfs_rtxnum_t            *rtx)   /* out: start rtext allocated */
 {
-       int             any;            /* any useful extents from summary */
-       xfs_rtblock_t   bbno;           /* bitmap block number */
-       int             error;          /* error value */
-       int             i;              /* bitmap block offset (loop control) */
-       int             j;              /* secondary loop control */
-       int             log2len;        /* log2 of minlen */
-       xfs_rtblock_t   n;              /* next block to try */
-       xfs_rtblock_t   r;              /* result block */
-
-       ASSERT(minlen % prod == 0 && maxlen % prod == 0);
+       struct xfs_mount        *mp = args->mp;
+       int                     maxlog; /* max useful extent from summary */
+       xfs_fileoff_t           bbno;   /* bitmap block number */
+       int                     error;
+       int                     i;      /* bitmap block offset (loop control) */
+       int                     j;      /* secondary loop control */
+       int                     log2len; /* log2 of minlen */
+       xfs_rtxnum_t            n;      /* next rtext to try */
+       xfs_rtxnum_t            r;      /* result rtext */
+
+       ASSERT(minlen % prod == 0);
+       ASSERT(maxlen % prod == 0);
+
        /*
         * If the block number given is off the end, silently set it to
         * the last block.
         */
-       if (bno >= mp->m_sb.sb_rextents)
-               bno = mp->m_sb.sb_rextents - 1;
+       if (start >= mp->m_sb.sb_rextents)
+               start = mp->m_sb.sb_rextents - 1;
 
        /* Make sure we don't run off the end of the rt volume. */
-       maxlen = min(mp->m_sb.sb_rextents, bno + maxlen) - bno;
+       maxlen = xfs_rtallocate_clamp_len(mp, start, maxlen, prod);
        if (maxlen < minlen) {
-               *rtblock = NULLRTBLOCK;
+               *rtx = NULLRTEXTNO;
                return 0;
        }
 
        /*
         * Try the exact allocation first.
         */
-       error = xfs_rtallocate_extent_exact(mp, tp, bno, minlen, maxlen, len,
-               rbpp, rsb, prod, &r);
+       error = xfs_rtallocate_extent_exact(args, start, minlen, maxlen, len,
+                       prod, &r);
        if (error) {
                return error;
        }
        /*
         * If the exact allocation worked, return that.
         */
-       if (r != NULLRTBLOCK) {
-               *rtblock = r;
+       if (r != NULLRTEXTNO) {
+               *rtx = r;
                return 0;
        }
-       bbno = XFS_BITTOBLOCK(mp, bno);
+       bbno = xfs_rtx_to_rbmblock(mp, start);
        i = 0;
+       j = -1;
        ASSERT(minlen != 0);
        log2len = xfs_highbit32(minlen);
        /*
@@ -480,8 +488,8 @@ xfs_rtallocate_extent_near(
                 * Get summary information of extents of all useful levels
                 * starting in this bitmap block.
                 */
-               error = xfs_rtany_summary(mp, tp, log2len, mp->m_rsumlevels - 1,
-                       bbno + i, rbpp, rsb, &any);
+               error = xfs_rtany_summary(args, log2len, mp->m_rsumlevels - 1,
+                               bbno + i, &maxlog);
                if (error) {
                        return error;
                }
@@ -489,7 +497,10 @@ xfs_rtallocate_extent_near(
                 * If there are any useful extents starting here, try
                 * allocating one.
                 */
-               if (any) {
+               if (maxlog >= 0) {
+                       xfs_extlen_t maxavail =
+                               min_t(xfs_rtblock_t, maxlen,
+                                     (1ULL << (maxlog + 1)) - 1);
                        /*
                         * On the positive side of the starting location.
                         */
@@ -498,17 +509,17 @@ xfs_rtallocate_extent_near(
                                 * Try to allocate an extent starting in
                                 * this block.
                                 */
-                               error = xfs_rtallocate_extent_block(mp, tp,
-                                       bbno + i, minlen, maxlen, len, &n, rbpp,
-                                       rsb, prod, &r);
+                               error = xfs_rtallocate_extent_block(args,
+                                               bbno + i, minlen, maxavail, len,
+                                               &n, prod, &r);
                                if (error) {
                                        return error;
                                }
                                /*
                                 * If it worked, return it.
                                 */
-                               if (r != NULLRTBLOCK) {
-                                       *rtblock = r;
+                               if (r != NULLRTEXTNO) {
+                                       *rtx = r;
                                        return 0;
                                }
                        }
@@ -516,68 +527,46 @@ xfs_rtallocate_extent_near(
                         * On the negative side of the starting location.
                         */
                        else {          /* i < 0 */
+                               int     maxblocks;
+
                                /*
-                                * Loop backwards through the bitmap blocks from
-                                * the starting point-1 up to where we are now.
-                                * There should be an extent which ends in this
-                                * bitmap block and is long enough.
+                                * Loop backwards to find the end of the extent
+                                * we found in the realtime summary.
+                                *
+                                * maxblocks is the maximum possible number of
+                                * bitmap blocks from the start of the extent
+                                * to the end of the extent.
                                 */
-                               for (j = -1; j > i; j--) {
-                                       /*
-                                        * Grab the summary information for
-                                        * this bitmap block.
-                                        */
-                                       error = xfs_rtany_summary(mp, tp,
-                                               log2len, mp->m_rsumlevels - 1,
-                                               bbno + j, rbpp, rsb, &any);
-                                       if (error) {
-                                               return error;
-                                       }
-                                       /*
-                                        * If there's no extent given in the
-                                        * summary that means the extent we
-                                        * found must carry over from an
-                                        * earlier block.  If there is an
-                                        * extent given, we've already tried
-                                        * that allocation, don't do it again.
-                                        */
-                                       if (any)
-                                               continue;
-                                       error = xfs_rtallocate_extent_block(mp,
-                                               tp, bbno + j, minlen, maxlen,
-                                               len, &n, rbpp, rsb, prod, &r);
+                               if (maxlog == 0)
+                                       maxblocks = 0;
+                               else if (maxlog < mp->m_blkbit_log)
+                                       maxblocks = 1;
+                               else
+                                       maxblocks = 2 << (maxlog - mp->m_blkbit_log);
+
+                               /*
+                                * We need to check bbno + i + maxblocks down to
+                                * bbno + i. We already checked bbno down to
+                                * bbno + j + 1, so we don't need to check those
+                                * again.
+                                */
+                               j = min(i + maxblocks, j);
+                               for (; j >= i; j--) {
+                                       error = xfs_rtallocate_extent_block(args,
+                                                       bbno + j, minlen,
+                                                       maxavail, len, &n, prod,
+                                                       &r);
                                        if (error) {
                                                return error;
                                        }
                                        /*
                                         * If it works, return the extent.
                                         */
-                                       if (r != NULLRTBLOCK) {
-                                               *rtblock = r;
+                                       if (r != NULLRTEXTNO) {
+                                               *rtx = r;
                                                return 0;
                                        }
                                }
-                               /*
-                                * There weren't intervening bitmap blocks
-                                * with a long enough extent, or the
-                                * allocation didn't work for some reason
-                                * (i.e. it's a little * too short).
-                                * Try to allocate from the summary block
-                                * that we found.
-                                */
-                               error = xfs_rtallocate_extent_block(mp, tp,
-                                       bbno + i, minlen, maxlen, len, &n, rbpp,
-                                       rsb, prod, &r);
-                               if (error) {
-                                       return error;
-                               }
-                               /*
-                                * If it works, return the extent.
-                                */
-                               if (r != NULLRTBLOCK) {
-                                       *rtblock = r;
-                                       return 0;
-                               }
                        }
                }
                /*
@@ -610,7 +599,7 @@ xfs_rtallocate_extent_near(
                else
                        break;
        }
-       *rtblock = NULLRTBLOCK;
+       *rtx = NULLRTEXTNO;
        return 0;
 }
 
@@ -619,26 +608,25 @@ xfs_rtallocate_extent_near(
  * specified.  If we don't get maxlen then use prod to trim
  * the length, if given.  The lengths are all in rtextents.
  */
-STATIC int                             /* error */
+STATIC int
 xfs_rtallocate_extent_size(
-       xfs_mount_t     *mp,            /* file system mount point */
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_extlen_t    minlen,         /* minimum length to allocate */
-       xfs_extlen_t    maxlen,         /* maximum length to allocate */
-       xfs_extlen_t    *len,           /* out: actual length allocated */
-       struct xfs_buf  **rbpp,         /* in/out: summary block buffer */
-       xfs_fsblock_t   *rsb,           /* in/out: summary block number */
-       xfs_extlen_t    prod,           /* extent product factor */
-       xfs_rtblock_t   *rtblock)       /* out: start block allocated */
+       struct xfs_rtalloc_args *args,
+       xfs_rtxlen_t            minlen, /* minimum length to allocate */
+       xfs_rtxlen_t            maxlen, /* maximum length to allocate */
+       xfs_rtxlen_t            *len,   /* out: actual length allocated */
+       xfs_rtxlen_t            prod,   /* extent product factor */
+       xfs_rtxnum_t            *rtx)   /* out: start rtext allocated */
 {
-       int             error;          /* error value */
-       int             i;              /* bitmap block number */
-       int             l;              /* level number (loop control) */
-       xfs_rtblock_t   n;              /* next block to be tried */
-       xfs_rtblock_t   r;              /* result block number */
-       xfs_suminfo_t   sum;            /* summary information for extents */
-
-       ASSERT(minlen % prod == 0 && maxlen % prod == 0);
+       struct xfs_mount        *mp = args->mp;
+       int                     error;
+       xfs_fileoff_t           i;      /* bitmap block number */
+       int                     l;      /* level number (loop control) */
+       xfs_rtxnum_t            n;      /* next rtext to be tried */
+       xfs_rtxnum_t            r;      /* result rtext number */
+       xfs_suminfo_t           sum;    /* summary information for extents */
+
+       ASSERT(minlen % prod == 0);
+       ASSERT(maxlen % prod == 0);
        ASSERT(maxlen != 0);
 
        /*
@@ -656,8 +644,7 @@ xfs_rtallocate_extent_size(
                        /*
                         * Get the summary for this level/block.
                         */
-                       error = xfs_rtget_summary(mp, tp, l, i, rbpp, rsb,
-                               &sum);
+                       error = xfs_rtget_summary(args, l, i, &sum);
                        if (error) {
                                return error;
                        }
@@ -669,16 +656,16 @@ xfs_rtallocate_extent_size(
                        /*
                         * Try allocating the extent.
                         */
-                       error = xfs_rtallocate_extent_block(mp, tp, i, maxlen,
-                               maxlen, len, &n, rbpp, rsb, prod, &r);
+                       error = xfs_rtallocate_extent_block(args, i, maxlen,
+                                       maxlen, len, &n, prod, &r);
                        if (error) {
                                return error;
                        }
                        /*
                         * If it worked, return that.
                         */
-                       if (r != NULLRTBLOCK) {
-                               *rtblock = r;
+                       if (r != NULLRTEXTNO) {
+                               *rtx = r;
                                return 0;
                        }
                        /*
@@ -686,8 +673,8 @@ xfs_rtallocate_extent_size(
                         * allocator is beyond the next bitmap block,
                         * skip to that bitmap block.
                         */
-                       if (XFS_BITTOBLOCK(mp, n) > i + 1)
-                               i = XFS_BITTOBLOCK(mp, n) - 1;
+                       if (xfs_rtx_to_rbmblock(mp, n) > i + 1)
+                               i = xfs_rtx_to_rbmblock(mp, n) - 1;
                }
        }
        /*
@@ -695,7 +682,7 @@ xfs_rtallocate_extent_size(
         * we're asking for a fixed size extent.
         */
        if (minlen > --maxlen) {
-               *rtblock = NULLRTBLOCK;
+               *rtx = NULLRTEXTNO;
                return 0;
        }
        ASSERT(minlen != 0);
@@ -715,8 +702,7 @@ xfs_rtallocate_extent_size(
                        /*
                         * Get the summary information for this level/block.
                         */
-                       error = xfs_rtget_summary(mp, tp, l, i, rbpp, rsb,
-                                                 &sum);
+                       error = xfs_rtget_summary(args, l, i, &sum);
                        if (error) {
                                return error;
                        }
@@ -730,18 +716,18 @@ xfs_rtallocate_extent_size(
                         * minlen/maxlen are in the possible range for
                         * this summary level.
                         */
-                       error = xfs_rtallocate_extent_block(mp, tp, i,
+                       error = xfs_rtallocate_extent_block(args, i,
                                        XFS_RTMAX(minlen, 1 << l),
                                        XFS_RTMIN(maxlen, (1 << (l + 1)) - 1),
-                                       len, &n, rbpp, rsb, prod, &r);
+                                       len, &n, prod, &r);
                        if (error) {
                                return error;
                        }
                        /*
                         * If it worked, return that extent.
                         */
-                       if (r != NULLRTBLOCK) {
-                               *rtblock = r;
+                       if (r != NULLRTEXTNO) {
+                               *rtx = r;
                                return 0;
                        }
                        /*
@@ -749,14 +735,14 @@ xfs_rtallocate_extent_size(
                         * allocator is beyond the next bitmap block,
                         * skip to that bitmap block.
                         */
-                       if (XFS_BITTOBLOCK(mp, n) > i + 1)
-                               i = XFS_BITTOBLOCK(mp, n) - 1;
+                       if (xfs_rtx_to_rbmblock(mp, n) > i + 1)
+                               i = xfs_rtx_to_rbmblock(mp, n) - 1;
                }
        }
        /*
         * Got nothing, return failure.
         */
-       *rtblock = NULLRTBLOCK;
+       *rtx = NULLRTEXTNO;
        return 0;
 }
 
@@ -886,12 +872,14 @@ xfs_alloc_rsum_cache(
        xfs_extlen_t    rbmblocks)      /* number of rt bitmap blocks */
 {
        /*
-        * The rsum cache is initialized to all zeroes, which is trivially a
-        * lower bound on the minimum level with any free extents. We can
-        * continue without the cache if it couldn't be allocated.
+        * The rsum cache is initialized to the maximum value, which is
+        * trivially an upper bound on the maximum level with any free extents.
+        * We can continue without the cache if it couldn't be allocated.
         */
-       mp->m_rsum_cache = kvzalloc(rbmblocks, GFP_KERNEL);
-       if (!mp->m_rsum_cache)
+       mp->m_rsum_cache = kvmalloc(rbmblocks, GFP_KERNEL);
+       if (mp->m_rsum_cache)
+               memset(mp->m_rsum_cache, -1, rbmblocks);
+       else
                xfs_warn(mp, "could not allocate realtime summary cache");
 }
 
@@ -907,13 +895,13 @@ xfs_growfs_rt(
        xfs_mount_t     *mp,            /* mount point for filesystem */
        xfs_growfs_rt_t *in)            /* growfs rt input struct */
 {
-       xfs_rtblock_t   bmbno;          /* bitmap block number */
+       xfs_fileoff_t   bmbno;          /* bitmap block number */
        struct xfs_buf  *bp;            /* temporary buffer */
        int             error;          /* error return value */
        xfs_mount_t     *nmp;           /* new (fake) mount structure */
        xfs_rfsblock_t  nrblocks;       /* new number of realtime blocks */
        xfs_extlen_t    nrbmblocks;     /* new number of rt bitmap blocks */
-       xfs_rtblock_t   nrextents;      /* new number of realtime extents */
+       xfs_rtxnum_t    nrextents;      /* new number of realtime extents */
        uint8_t         nrextslog;      /* new log2 of sb_rextents */
        xfs_extlen_t    nrsumblocks;    /* new number of summary blocks */
        uint            nrsumlevels;    /* new rt summary levels */
@@ -922,7 +910,6 @@ xfs_growfs_rt(
        xfs_extlen_t    rbmblocks;      /* current number of rt bitmap blocks */
        xfs_extlen_t    rsumblocks;     /* current number of rt summary blks */
        xfs_sb_t        *sbp;           /* old superblock */
-       xfs_fsblock_t   sumbno;         /* summary block number */
        uint8_t         *rsum_cache;    /* old summary cache */
 
        sbp = &mp->m_sb;
@@ -954,7 +941,7 @@ xfs_growfs_rt(
                return -EINVAL;
 
        /* Unsupported realtime features. */
-       if (xfs_has_rmapbt(mp) || xfs_has_reflink(mp))
+       if (xfs_has_rmapbt(mp) || xfs_has_reflink(mp) || xfs_has_quota(mp))
                return -EOPNOTSUPP;
 
        nrblocks = in->newblocks;
@@ -976,11 +963,10 @@ xfs_growfs_rt(
         */
        nrextents = nrblocks;
        do_div(nrextents, in->extsize);
-       nrbmblocks = howmany_64(nrextents, NBBY * sbp->sb_blocksize);
+       nrbmblocks = xfs_rtbitmap_blockcount(mp, nrextents);
        nrextslog = xfs_highbit32(nrextents);
        nrsumlevels = nrextslog + 1;
-       nrsumsize = (uint)sizeof(xfs_suminfo_t) * nrsumlevels * nrbmblocks;
-       nrsumblocks = XFS_B_TO_FSB(mp, nrsumsize);
+       nrsumblocks = xfs_rtsummary_blockcount(mp, nrsumlevels, nrbmblocks);
        nrsumsize = XFS_FSB_TO_B(mp, nrsumblocks);
        /*
         * New summary size can't be more than half the size of
@@ -1023,6 +1009,12 @@ xfs_growfs_rt(
                     ((sbp->sb_rextents & ((1 << mp->m_blkbit_log) - 1)) != 0);
             bmbno < nrbmblocks;
             bmbno++) {
+               struct xfs_rtalloc_args args = {
+                       .mp             = mp,
+               };
+               struct xfs_rtalloc_args nargs = {
+                       .mp             = nmp,
+               };
                struct xfs_trans        *tp;
                xfs_rfsblock_t          nrblocks_step;
 
@@ -1032,19 +1024,17 @@ xfs_growfs_rt(
                 * Calculate new sb and mount fields for this round.
                 */
                nsbp->sb_rextsize = in->extsize;
+               nmp->m_rtxblklog = -1; /* don't use shift or masking */
                nsbp->sb_rbmblocks = bmbno + 1;
                nrblocks_step = (bmbno + 1) * NBBY * nsbp->sb_blocksize *
                                nsbp->sb_rextsize;
                nsbp->sb_rblocks = min(nrblocks, nrblocks_step);
-               nsbp->sb_rextents = nsbp->sb_rblocks;
-               do_div(nsbp->sb_rextents, nsbp->sb_rextsize);
+               nsbp->sb_rextents = xfs_rtb_to_rtx(nmp, nsbp->sb_rblocks);
                ASSERT(nsbp->sb_rextents != 0);
                nsbp->sb_rextslog = xfs_highbit32(nsbp->sb_rextents);
                nrsumlevels = nmp->m_rsumlevels = nsbp->sb_rextslog + 1;
-               nrsumsize =
-                       (uint)sizeof(xfs_suminfo_t) * nrsumlevels *
-                       nsbp->sb_rbmblocks;
-               nrsumblocks = XFS_B_TO_FSB(mp, nrsumsize);
+               nrsumblocks = xfs_rtsummary_blockcount(mp, nrsumlevels,
+                               nsbp->sb_rbmblocks);
                nmp->m_rsumsize = nrsumsize = XFS_FSB_TO_B(mp, nrsumblocks);
                /*
                 * Start a transaction, get the log reservation.
@@ -1053,6 +1043,9 @@ xfs_growfs_rt(
                                &tp);
                if (error)
                        break;
+               args.tp = tp;
+               nargs.tp = tp;
+
                /*
                 * Lock out other callers by grabbing the bitmap inode lock.
                 */
@@ -1086,7 +1079,7 @@ xfs_growfs_rt(
                 */
                if (sbp->sb_rbmblocks != nsbp->sb_rbmblocks ||
                    mp->m_rsumlevels != nmp->m_rsumlevels) {
-                       error = xfs_rtcopy_summary(mp, nmp, tp);
+                       error = xfs_rtcopy_summary(&args, &nargs);
                        if (error)
                                goto error_cancel;
                }
@@ -1111,9 +1104,9 @@ xfs_growfs_rt(
                /*
                 * Free new extent.
                 */
-               bp = NULL;
-               error = xfs_rtfree_range(nmp, tp, sbp->sb_rextents,
-                       nsbp->sb_rextents - sbp->sb_rextents, &bp, &sumbno);
+               error = xfs_rtfree_range(&nargs, sbp->sb_rextents,
+                               nsbp->sb_rextents - sbp->sb_rextents);
+               xfs_rtbuf_cache_relse(&nargs);
                if (error) {
 error_cancel:
                        xfs_trans_cancel(tp);
@@ -1171,59 +1164,60 @@ out_free:
  * parameters.  The length units are all in realtime extents, as is the
  * result block number.
  */
-int                                    /* error */
+int
 xfs_rtallocate_extent(
-       xfs_trans_t     *tp,            /* transaction pointer */
-       xfs_rtblock_t   bno,            /* starting block number to allocate */
-       xfs_extlen_t    minlen,         /* minimum length to allocate */
-       xfs_extlen_t    maxlen,         /* maximum length to allocate */
-       xfs_extlen_t    *len,           /* out: actual length allocated */
-       int             wasdel,         /* was a delayed allocation extent */
-       xfs_extlen_t    prod,           /* extent product factor */
-       xfs_rtblock_t   *rtblock)       /* out: start block allocated */
+       struct xfs_trans        *tp,
+       xfs_rtxnum_t            start,  /* starting rtext number to allocate */
+       xfs_rtxlen_t            minlen, /* minimum length to allocate */
+       xfs_rtxlen_t            maxlen, /* maximum length to allocate */
+       xfs_rtxlen_t            *len,   /* out: actual length allocated */
+       int                     wasdel, /* was a delayed allocation extent */
+       xfs_rtxlen_t            prod,   /* extent product factor */
+       xfs_rtxnum_t            *rtblock) /* out: start rtext allocated */
 {
-       xfs_mount_t     *mp = tp->t_mountp;
-       int             error;          /* error value */
-       xfs_rtblock_t   r;              /* result allocated block */
-       xfs_fsblock_t   sb;             /* summary file block number */
-       struct xfs_buf  *sumbp;         /* summary file block buffer */
-
-       ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
+       struct xfs_rtalloc_args args = {
+               .mp             = tp->t_mountp,
+               .tp             = tp,
+       };
+       int                     error;  /* error value */
+       xfs_rtxnum_t            r;      /* result allocated rtext */
+
+       ASSERT(xfs_isilocked(args.mp->m_rbmip, XFS_ILOCK_EXCL));
        ASSERT(minlen > 0 && minlen <= maxlen);
 
        /*
         * If prod is set then figure out what to do to minlen and maxlen.
         */
        if (prod > 1) {
-               xfs_extlen_t    i;
+               xfs_rtxlen_t    i;
 
                if ((i = maxlen % prod))
                        maxlen -= i;
                if ((i = minlen % prod))
                        minlen += prod - i;
                if (maxlen < minlen) {
-                       *rtblock = NULLRTBLOCK;
+                       *rtblock = NULLRTEXTNO;
                        return 0;
                }
        }
 
 retry:
-       sumbp = NULL;
-       if (bno == 0) {
-               error = xfs_rtallocate_extent_size(mp, tp, minlen, maxlen, len,
-                               &sumbp, &sb, prod, &r);
+       if (start == 0) {
+               error = xfs_rtallocate_extent_size(&args, minlen,
+                               maxlen, len, prod, &r);
        } else {
-               error = xfs_rtallocate_extent_near(mp, tp, bno, minlen, maxlen,
-                               len, &sumbp, &sb, prod, &r);
+               error = xfs_rtallocate_extent_near(&args, start, minlen,
+                               maxlen, len, prod, &r);
        }
 
+       xfs_rtbuf_cache_relse(&args);
        if (error)
                return error;
 
        /*
         * If it worked, update the superblock.
         */
-       if (r != NULLRTBLOCK) {
+       if (r != NULLRTEXTNO) {
                long    slen = (long)*len;
 
                ASSERT(*len >= minlen && *len <= maxlen);
@@ -1250,6 +1244,7 @@ xfs_rtmount_init(
        struct xfs_buf          *bp;    /* buffer for last block of subvolume */
        struct xfs_sb           *sbp;   /* filesystem superblock copy in mount */
        xfs_daddr_t             d;      /* address of last block of subvolume */
+       unsigned int            rsumblocks;
        int                     error;
 
        sbp = &mp->m_sb;
@@ -1261,10 +1256,9 @@ xfs_rtmount_init(
                return -ENODEV;
        }
        mp->m_rsumlevels = sbp->sb_rextslog + 1;
-       mp->m_rsumsize =
-               (uint)sizeof(xfs_suminfo_t) * mp->m_rsumlevels *
-               sbp->sb_rbmblocks;
-       mp->m_rsumsize = roundup(mp->m_rsumsize, sbp->sb_blocksize);
+       rsumblocks = xfs_rtsummary_blockcount(mp, mp->m_rsumlevels,
+                       mp->m_sb.sb_rbmblocks);
+       mp->m_rsumsize = XFS_FSB_TO_B(mp, rsumblocks);
        mp->m_rbmip = mp->m_rsumip = NULL;
        /*
         * Check that the realtime section is an ok size.
@@ -1418,27 +1412,27 @@ xfs_rtunmount_inodes(
  * of rtextents and the fraction.
  * The fraction sequence is 0, 1/2, 1/4, 3/4, 1/8, ..., 7/8, 1/16, ...
  */
-int                                    /* error */
+int                                            /* error */
 xfs_rtpick_extent(
        xfs_mount_t             *mp,            /* file system mount point */
        xfs_trans_t             *tp,            /* transaction pointer */
-       xfs_extlen_t            len,            /* allocation length (rtextents) */
-       xfs_rtblock_t           *pick)          /* result rt extent */
-       {
-       xfs_rtblock_t           b;              /* result block */
+       xfs_rtxlen_t            len,            /* allocation length (rtextents) */
+       xfs_rtxnum_t            *pick)          /* result rt extent */
+{
+       xfs_rtxnum_t            b;              /* result rtext */
        int                     log2;           /* log of sequence number */
        uint64_t                resid;          /* residual after log removed */
        uint64_t                seq;            /* sequence number of file creation */
-       struct timespec64       ts;             /* temporary timespec64 storage */
+       struct timespec64       ts;             /* timespec in inode */
 
        ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
 
+       ts = inode_get_atime(VFS_I(mp->m_rbmip));
        if (!(mp->m_rbmip->i_diflags & XFS_DIFLAG_NEWRTBM)) {
                mp->m_rbmip->i_diflags |= XFS_DIFLAG_NEWRTBM;
                seq = 0;
        } else {
-               ts = inode_get_atime(VFS_I(mp->m_rbmip));
-               seq = (uint64_t)ts.tv_sec;
+               seq = ts.tv_sec;
        }
        if ((log2 = xfs_highbit64(seq)) == -1)
                b = 0;
@@ -1451,7 +1445,7 @@ xfs_rtpick_extent(
                if (b + len > mp->m_sb.sb_rextents)
                        b = mp->m_sb.sb_rextents - len;
        }
-       ts.tv_sec = (time64_t)seq + 1;
+       ts.tv_sec = seq + 1;
        inode_set_atime_to_ts(VFS_I(mp->m_rbmip), ts);
        xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
        *pick = b;
index 62c7ad79cbb618af46c8b21a325b038b6c5cc205..f7cb9ffe51ca68fbf8a87e6f64be68dc399c9d5f 100644 (file)
 struct xfs_mount;
 struct xfs_trans;
 
-/*
- * XXX: Most of the realtime allocation functions deal in units of realtime
- * extents, not realtime blocks.  This looks funny when paired with the type
- * name and screams for a larger cleanup.
- */
-struct xfs_rtalloc_rec {
-       xfs_rtblock_t           ar_startext;
-       xfs_rtblock_t           ar_extcount;
-};
-
-typedef int (*xfs_rtalloc_query_range_fn)(
-       struct xfs_mount                *mp,
-       struct xfs_trans                *tp,
-       const struct xfs_rtalloc_rec    *rec,
-       void                            *priv);
-
 #ifdef CONFIG_XFS_RT
 /*
  * Function prototypes for exported functions.
@@ -40,23 +24,14 @@ typedef int (*xfs_rtalloc_query_range_fn)(
 int                                    /* error */
 xfs_rtallocate_extent(
        struct xfs_trans        *tp,    /* transaction pointer */
-       xfs_rtblock_t           bno,    /* starting block number to allocate */
-       xfs_extlen_t            minlen, /* minimum length to allocate */
-       xfs_extlen_t            maxlen, /* maximum length to allocate */
-       xfs_extlen_t            *len,   /* out: actual length allocated */
+       xfs_rtxnum_t            start,  /* starting rtext number to allocate */
+       xfs_rtxlen_t            minlen, /* minimum length to allocate */
+       xfs_rtxlen_t            maxlen, /* maximum length to allocate */
+       xfs_rtxlen_t            *len,   /* out: actual length allocated */
        int                     wasdel, /* was a delayed allocation extent */
-       xfs_extlen_t            prod,   /* extent product factor */
-       xfs_rtblock_t           *rtblock); /* out: start block allocated */
+       xfs_rtxlen_t            prod,   /* extent product factor */
+       xfs_rtxnum_t            *rtblock); /* out: start rtext allocated */
 
-/*
- * Free an extent in the realtime subvolume.  Length is expressed in
- * realtime extents, as is the block number.
- */
-int                                    /* error */
-xfs_rtfree_extent(
-       struct xfs_trans        *tp,    /* transaction pointer */
-       xfs_rtblock_t           bno,    /* starting block number to free */
-       xfs_extlen_t            len);   /* length of extent freed */
 
 /*
  * Initialize realtime fields in the mount structure.
@@ -87,8 +62,8 @@ int                                   /* error */
 xfs_rtpick_extent(
        struct xfs_mount        *mp,    /* file system mount point */
        struct xfs_trans        *tp,    /* transaction pointer */
-       xfs_extlen_t            len,    /* allocation length (rtextents) */
-       xfs_rtblock_t           *pick); /* result rt extent */
+       xfs_rtxlen_t            len,    /* allocation length (rtextents) */
+       xfs_rtxnum_t            *pick); /* result rt extent */
 
 /*
  * Grow the realtime area of the filesystem.
@@ -98,55 +73,12 @@ xfs_growfs_rt(
        struct xfs_mount        *mp,    /* file system mount structure */
        xfs_growfs_rt_t         *in);   /* user supplied growfs struct */
 
-/*
- * From xfs_rtbitmap.c
- */
-int xfs_rtbuf_get(struct xfs_mount *mp, struct xfs_trans *tp,
-                 xfs_rtblock_t block, int issum, struct xfs_buf **bpp);
-int xfs_rtcheck_range(struct xfs_mount *mp, struct xfs_trans *tp,
-                     xfs_rtblock_t start, xfs_extlen_t len, int val,
-                     xfs_rtblock_t *new, int *stat);
-int xfs_rtfind_back(struct xfs_mount *mp, struct xfs_trans *tp,
-                   xfs_rtblock_t start, xfs_rtblock_t limit,
-                   xfs_rtblock_t *rtblock);
-int xfs_rtfind_forw(struct xfs_mount *mp, struct xfs_trans *tp,
-                   xfs_rtblock_t start, xfs_rtblock_t limit,
-                   xfs_rtblock_t *rtblock);
-int xfs_rtmodify_range(struct xfs_mount *mp, struct xfs_trans *tp,
-                      xfs_rtblock_t start, xfs_extlen_t len, int val);
-int xfs_rtmodify_summary_int(struct xfs_mount *mp, struct xfs_trans *tp,
-                            int log, xfs_rtblock_t bbno, int delta,
-                            struct xfs_buf **rbpp, xfs_fsblock_t *rsb,
-                            xfs_suminfo_t *sum);
-int xfs_rtmodify_summary(struct xfs_mount *mp, struct xfs_trans *tp, int log,
-                        xfs_rtblock_t bbno, int delta, struct xfs_buf **rbpp,
-                        xfs_fsblock_t *rsb);
-int xfs_rtfree_range(struct xfs_mount *mp, struct xfs_trans *tp,
-                    xfs_rtblock_t start, xfs_extlen_t len,
-                    struct xfs_buf **rbpp, xfs_fsblock_t *rsb);
-int xfs_rtalloc_query_range(struct xfs_mount *mp, struct xfs_trans *tp,
-               const struct xfs_rtalloc_rec *low_rec,
-               const struct xfs_rtalloc_rec *high_rec,
-               xfs_rtalloc_query_range_fn fn, void *priv);
-int xfs_rtalloc_query_all(struct xfs_mount *mp, struct xfs_trans *tp,
-                         xfs_rtalloc_query_range_fn fn,
-                         void *priv);
-bool xfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno);
-int xfs_rtalloc_extent_is_free(struct xfs_mount *mp, struct xfs_trans *tp,
-                              xfs_rtblock_t start, xfs_extlen_t len,
-                              bool *is_free);
 int xfs_rtalloc_reinit_frextents(struct xfs_mount *mp);
 #else
-# define xfs_rtallocate_extent(t,b,min,max,l,f,p,rb)    (ENOSYS)
-# define xfs_rtfree_extent(t,b,l)                       (ENOSYS)
-# define xfs_rtpick_extent(m,t,l,rb)                    (ENOSYS)
-# define xfs_growfs_rt(mp,in)                           (ENOSYS)
-# define xfs_rtalloc_query_range(t,l,h,f,p)             (ENOSYS)
-# define xfs_rtalloc_query_all(m,t,f,p)                 (ENOSYS)
-# define xfs_rtbuf_get(m,t,b,i,p)                       (ENOSYS)
-# define xfs_verify_rtbno(m, r)                        (false)
-# define xfs_rtalloc_extent_is_free(m,t,s,l,i)          (ENOSYS)
-# define xfs_rtalloc_reinit_frextents(m)                (0)
+# define xfs_rtallocate_extent(t,b,min,max,l,f,p,rb)   (-ENOSYS)
+# define xfs_rtpick_extent(m,t,l,rb)                   (-ENOSYS)
+# define xfs_growfs_rt(mp,in)                          (-ENOSYS)
+# define xfs_rtalloc_reinit_frextents(m)               (0)
 static inline int              /* error */
 xfs_rtmount_init(
        xfs_mount_t     *mp)    /* file system mount structure */
@@ -157,7 +89,7 @@ xfs_rtmount_init(
        xfs_warn(mp, "Not built with CONFIG_XFS_RT");
        return -ENOSYS;
 }
-# define xfs_rtmount_inodes(m)  (((mp)->m_sb.sb_rblocks == 0)? 0 : (ENOSYS))
+# define xfs_rtmount_inodes(m)  (((mp)->m_sb.sb_rblocks == 0)? 0 : (-ENOSYS))
 # define xfs_rtunmount_inodes(m)
 #endif /* CONFIG_XFS_RT */
 
index f0ae07828153130004b3fb8fef4a12f19520a9fb..764304595e8b00b38c5a38e0673d88152a532e93 100644 (file)
@@ -42,6 +42,7 @@
 #include "xfs_xattr.h"
 #include "xfs_iunlink_item.h"
 #include "xfs_dahash_test.h"
+#include "xfs_rtbitmap.h"
 #include "scrub/stats.h"
 
 #include <linux/magic.h>
@@ -896,7 +897,7 @@ xfs_fs_statfs(
 
                statp->f_blocks = sbp->sb_rblocks;
                freertx = percpu_counter_sum_positive(&mp->m_frextents);
-               statp->f_bavail = statp->f_bfree = freertx * sbp->sb_rextsize;
+               statp->f_bavail = statp->f_bfree = xfs_rtx_to_rtb(mp, freertx);
        }
 
        return 0;
index 8c0bfc9a33b116cd74606daa2d6de4fc8362f004..305c9d07bf1b27f7c7289d746fa0f84170e60b46 100644 (file)
@@ -24,6 +24,7 @@
 #include "xfs_dquot_item.h"
 #include "xfs_dquot.h"
 #include "xfs_icache.h"
+#include "xfs_rtbitmap.h"
 
 struct kmem_cache      *xfs_trans_cache;
 
@@ -655,6 +656,10 @@ xfs_trans_unreserve_and_mod_sb(
        mp->m_sb.sb_agcount += tp->t_agcount_delta;
        mp->m_sb.sb_imax_pct += tp->t_imaxpct_delta;
        mp->m_sb.sb_rextsize += tp->t_rextsize_delta;
+       if (tp->t_rextsize_delta) {
+               mp->m_rtxblklog = log2_if_power2(mp->m_sb.sb_rextsize);
+               mp->m_rtxblkmask = mask64_if_power2(mp->m_sb.sb_rextsize);
+       }
        mp->m_sb.sb_rbmblocks += tp->t_rbmblocks_delta;
        mp->m_sb.sb_rblocks += tp->t_rblocks_delta;
        mp->m_sb.sb_rextents += tp->t_rextents_delta;
@@ -1196,7 +1201,7 @@ xfs_trans_alloc_inode(
 
 retry:
        error = xfs_trans_alloc(mp, resv, dblocks,
-                       rblocks / mp->m_sb.sb_rextsize,
+                       xfs_extlen_to_rtxlen(mp, rblocks),
                        force ? XFS_TRANS_RESERVE : 0, &tp);
        if (error)
                return error;
index 90b69270f2fafd0d77f5c2f1f6e4a23fe0fed4c4..724c45e3e9a78d8b72d2722146aef13d5c169e9d 100644 (file)
@@ -68,4 +68,9 @@ enum amd_asic_type {
 
 extern const char *amdgpu_asic_name[];
 
+struct amdgpu_asic_type_quirk {
+       unsigned short device;  /* PCI device ID */
+       u8 revision;    /* revision ID */
+       unsigned short type;    /* real ASIC type */
+};
 #endif /*__AMD_ASIC_TYPE_H__ */
diff --git a/include/dt-bindings/watchdog/aspeed-wdt.h b/include/dt-bindings/watchdog/aspeed-wdt.h
new file mode 100644 (file)
index 0000000..7ae6d84
--- /dev/null
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef DT_BINDINGS_ASPEED_WDT_H
+#define DT_BINDINGS_ASPEED_WDT_H
+
+#define AST2500_WDT_RESET_CPU          (1 << 0)
+#define AST2500_WDT_RESET_COPROC       (1 << 1)
+#define AST2500_WDT_RESET_SDRAM                (1 << 2)
+#define AST2500_WDT_RESET_AHB          (1 << 3)
+#define AST2500_WDT_RESET_I2C          (1 << 4)
+#define AST2500_WDT_RESET_MAC0         (1 << 5)
+#define AST2500_WDT_RESET_MAC1         (1 << 6)
+#define AST2500_WDT_RESET_GRAPHICS     (1 << 7)
+#define AST2500_WDT_RESET_USB2_HOST_HUB        (1 << 8)
+#define AST2500_WDT_RESET_USB_HOST     (1 << 9)
+#define AST2500_WDT_RESET_HID_EHCI     (1 << 10)
+#define AST2500_WDT_RESET_VIDEO                (1 << 11)
+#define AST2500_WDT_RESET_HAC          (1 << 12)
+#define AST2500_WDT_RESET_LPC          (1 << 13)
+#define AST2500_WDT_RESET_SDIO         (1 << 14)
+#define AST2500_WDT_RESET_MIC          (1 << 15)
+#define AST2500_WDT_RESET_CRT          (1 << 16)
+#define AST2500_WDT_RESET_PWM          (1 << 17)
+#define AST2500_WDT_RESET_PECI         (1 << 18)
+#define AST2500_WDT_RESET_JTAG         (1 << 19)
+#define AST2500_WDT_RESET_ADC          (1 << 20)
+#define AST2500_WDT_RESET_GPIO         (1 << 21)
+#define AST2500_WDT_RESET_MCTP         (1 << 22)
+#define AST2500_WDT_RESET_XDMA         (1 << 23)
+#define AST2500_WDT_RESET_SPI          (1 << 24)
+#define AST2500_WDT_RESET_SOC_MISC     (1 << 25)
+
+#define AST2500_WDT_RESET_DEFAULT 0x023ffff3
+
+#define AST2600_WDT_RESET1_CPU         (1 << 0)
+#define AST2600_WDT_RESET1_SDRAM       (1 << 1)
+#define AST2600_WDT_RESET1_AHB         (1 << 2)
+#define AST2600_WDT_RESET1_SLI         (1 << 3)
+#define AST2600_WDT_RESET1_SOC_MISC0   (1 << 4)
+#define AST2600_WDT_RESET1_COPROC      (1 << 5)
+#define AST2600_WDT_RESET1_USB_A       (1 << 6)
+#define AST2600_WDT_RESET1_USB_B       (1 << 7)
+#define AST2600_WDT_RESET1_UHCI                (1 << 8)
+#define AST2600_WDT_RESET1_GRAPHICS    (1 << 9)
+#define AST2600_WDT_RESET1_CRT         (1 << 10)
+#define AST2600_WDT_RESET1_VIDEO       (1 << 11)
+#define AST2600_WDT_RESET1_HAC         (1 << 12)
+#define AST2600_WDT_RESET1_DP          (1 << 13)
+#define AST2600_WDT_RESET1_DP_MCU      (1 << 14)
+#define AST2600_WDT_RESET1_GP_MCU      (1 << 15)
+#define AST2600_WDT_RESET1_MAC0                (1 << 16)
+#define AST2600_WDT_RESET1_MAC1                (1 << 17)
+#define AST2600_WDT_RESET1_SDIO0       (1 << 18)
+#define AST2600_WDT_RESET1_JTAG0       (1 << 19)
+#define AST2600_WDT_RESET1_MCTP0       (1 << 20)
+#define AST2600_WDT_RESET1_MCTP1       (1 << 21)
+#define AST2600_WDT_RESET1_XDMA0       (1 << 22)
+#define AST2600_WDT_RESET1_XDMA1       (1 << 23)
+#define AST2600_WDT_RESET1_GPIO0       (1 << 24)
+#define AST2600_WDT_RESET1_RVAS                (1 << 25)
+
+#define AST2600_WDT_RESET1_DEFAULT 0x030f1ff1
+
+#define AST2600_WDT_RESET2_CPU         (1 << 0)
+#define AST2600_WDT_RESET2_SPI         (1 << 1)
+#define AST2600_WDT_RESET2_AHB2                (1 << 2)
+#define AST2600_WDT_RESET2_SLI2                (1 << 3)
+#define AST2600_WDT_RESET2_SOC_MISC1   (1 << 4)
+#define AST2600_WDT_RESET2_MAC2                (1 << 5)
+#define AST2600_WDT_RESET2_MAC3                (1 << 6)
+#define AST2600_WDT_RESET2_SDIO1       (1 << 7)
+#define AST2600_WDT_RESET2_JTAG1       (1 << 8)
+#define AST2600_WDT_RESET2_GPIO1       (1 << 9)
+#define AST2600_WDT_RESET2_MDIO                (1 << 10)
+#define AST2600_WDT_RESET2_LPC         (1 << 11)
+#define AST2600_WDT_RESET2_PECI                (1 << 12)
+#define AST2600_WDT_RESET2_PWM         (1 << 13)
+#define AST2600_WDT_RESET2_ADC         (1 << 14)
+#define AST2600_WDT_RESET2_FSI         (1 << 15)
+#define AST2600_WDT_RESET2_I2C         (1 << 16)
+#define AST2600_WDT_RESET2_I3C_GLOBAL  (1 << 17)
+#define AST2600_WDT_RESET2_I3C0                (1 << 18)
+#define AST2600_WDT_RESET2_I3C1                (1 << 19)
+#define AST2600_WDT_RESET2_I3C2                (1 << 20)
+#define AST2600_WDT_RESET2_I3C3                (1 << 21)
+#define AST2600_WDT_RESET2_I3C4                (1 << 22)
+#define AST2600_WDT_RESET2_I3C5                (1 << 23)
+#define AST2600_WDT_RESET2_ESPI                (1 << 26)
+
+#define AST2600_WDT_RESET2_DEFAULT 0x03fffff1
+
+#endif
index 24e014760328eb12abec1b9529da699424537f40..54189e0e5f419a36f822d34f8b6021f6e7f50318 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/mod_devicetable.h>
 #include <linux/property.h>
 #include <linux/uuid.h>
+#include <linux/fw_table.h>
 
 struct irq_domain;
 struct irq_domain_ops;
@@ -24,6 +25,16 @@ struct irq_domain_ops;
 #endif
 #include <acpi/acpi.h>
 
+#ifdef CONFIG_ACPI_TABLE_LIB
+#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI)
+#define __init_or_acpilib
+#define __initdata_or_acpilib
+#else
+#define EXPORT_SYMBOL_ACPI_LIB(x)
+#define __init_or_acpilib __init
+#define __initdata_or_acpilib __initdata
+#endif
+
 #ifdef CONFIG_ACPI
 
 #include <linux/list.h>
@@ -119,21 +130,8 @@ enum acpi_address_range_id {
 
 
 /* Table Handlers */
-union acpi_subtable_headers {
-       struct acpi_subtable_header common;
-       struct acpi_hmat_structure hmat;
-       struct acpi_prmt_module_header prmt;
-       struct acpi_cedt_header cedt;
-};
-
 typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table);
 
-typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header,
-                                     const unsigned long end);
-
-typedef int (*acpi_tbl_entry_handler_arg)(union acpi_subtable_headers *header,
-                                         void *arg, const unsigned long end);
-
 /* Debugger support */
 
 struct acpi_debugger_ops {
@@ -207,14 +205,6 @@ static inline int acpi_debugger_notify_command_complete(void)
                (!entry) || (unsigned long)entry + sizeof(*entry) > end ||  \
                ((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
 
-struct acpi_subtable_proc {
-       int id;
-       acpi_tbl_entry_handler handler;
-       acpi_tbl_entry_handler_arg handler_arg;
-       void *arg;
-       int count;
-};
-
 void __iomem *__acpi_map_table(unsigned long phys, unsigned long size);
 void __acpi_unmap_table(void __iomem *map, unsigned long size);
 int early_acpi_boot_init(void);
@@ -229,16 +219,6 @@ void acpi_reserve_initial_tables (void);
 void acpi_table_init_complete (void);
 int acpi_table_init (void);
 
-#ifdef CONFIG_ACPI_TABLE_LIB
-#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI)
-#define __init_or_acpilib
-#define __initdata_or_acpilib
-#else
-#define EXPORT_SYMBOL_ACPI_LIB(x)
-#define __init_or_acpilib __init
-#define __initdata_or_acpilib __initdata
-#endif
-
 int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
 int __init_or_acpilib acpi_table_parse_entries(char *id,
                unsigned long table_size, int entry_id,
@@ -1551,4 +1531,9 @@ static inline void acpi_device_notify(struct device *dev) { }
 static inline void acpi_device_notify_remove(struct device *dev) { }
 #endif
 
+static inline void acpi_use_parent_companion(struct device *dev)
+{
+       ACPI_COMPANION_SET(dev, ACPI_COMPANION(dev->parent));
+}
+
 #endif /*_LINUX_ACPI_H*/
index 29cc10220952dbe05497875b64cd19f00a81362b..f6ea2f57d8089688870aed9a962cc742ff44da58 100644 (file)
@@ -51,7 +51,7 @@ static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
 static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; }
 #endif
 
-void cper_print_aer(struct pci_dev *dev, int aer_severity,
+void pci_print_aer(struct pci_dev *dev, int aer_severity,
                    struct aer_capability_regs *aer);
 int cper_severity_to_aer(int cper_severity);
 void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
index 99a5201d9e625796f762b35beb32b1b523497019..dc7ed2f4688614a992eac65d11e25d841943ae45 100644 (file)
@@ -33,126 +33,6 @@ struct pci_dev;
 
 extern int amd_iommu_detect(void);
 
-/**
- * amd_iommu_init_device() - Init device for use with IOMMUv2 driver
- * @pdev: The PCI device to initialize
- * @pasids: Number of PASIDs to support for this device
- *
- * This function does all setup for the device pdev so that it can be
- * used with IOMMUv2.
- * Returns 0 on success or negative value on error.
- */
-extern int amd_iommu_init_device(struct pci_dev *pdev, int pasids);
-
-/**
- * amd_iommu_free_device() - Free all IOMMUv2 related device resources
- *                          and disable IOMMUv2 usage for this device
- * @pdev: The PCI device to disable IOMMUv2 usage for'
- */
-extern void amd_iommu_free_device(struct pci_dev *pdev);
-
-/**
- * amd_iommu_bind_pasid() - Bind a given task to a PASID on a device
- * @pdev: The PCI device to bind the task to
- * @pasid: The PASID on the device the task should be bound to
- * @task: the task to bind
- *
- * The function returns 0 on success or a negative value on error.
- */
-extern int amd_iommu_bind_pasid(struct pci_dev *pdev, u32 pasid,
-                               struct task_struct *task);
-
-/**
- * amd_iommu_unbind_pasid() - Unbind a PASID from its task on
- *                           a device
- * @pdev: The device of the PASID
- * @pasid: The PASID to unbind
- *
- * When this function returns the device is no longer using the PASID
- * and the PASID is no longer bound to its task.
- */
-extern void amd_iommu_unbind_pasid(struct pci_dev *pdev, u32 pasid);
-
-/**
- * amd_iommu_set_invalid_ppr_cb() - Register a call-back for failed
- *                                 PRI requests
- * @pdev: The PCI device the call-back should be registered for
- * @cb: The call-back function
- *
- * The IOMMUv2 driver invokes this call-back when it is unable to
- * successfully handle a PRI request. The device driver can then decide
- * which PRI response the device should see. Possible return values for
- * the call-back are:
- *
- * - AMD_IOMMU_INV_PRI_RSP_SUCCESS - Send SUCCESS back to the device
- * - AMD_IOMMU_INV_PRI_RSP_INVALID - Send INVALID back to the device
- * - AMD_IOMMU_INV_PRI_RSP_FAIL    - Send Failure back to the device,
- *                                  the device is required to disable
- *                                  PRI when it receives this response
- *
- * The function returns 0 on success or negative value on error.
- */
-#define AMD_IOMMU_INV_PRI_RSP_SUCCESS  0
-#define AMD_IOMMU_INV_PRI_RSP_INVALID  1
-#define AMD_IOMMU_INV_PRI_RSP_FAIL     2
-
-typedef int (*amd_iommu_invalid_ppr_cb)(struct pci_dev *pdev,
-                                       u32 pasid,
-                                       unsigned long address,
-                                       u16);
-
-extern int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
-                                       amd_iommu_invalid_ppr_cb cb);
-
-#define PPR_FAULT_EXEC (1 << 1)
-#define PPR_FAULT_READ  (1 << 2)
-#define PPR_FAULT_WRITE (1 << 5)
-#define PPR_FAULT_USER  (1 << 6)
-#define PPR_FAULT_RSVD  (1 << 7)
-#define PPR_FAULT_GN    (1 << 8)
-
-/**
- * amd_iommu_device_info() - Get information about IOMMUv2 support of a
- *                          PCI device
- * @pdev: PCI device to query information from
- * @info: A pointer to an amd_iommu_device_info structure which will contain
- *       the information about the PCI device
- *
- * Returns 0 on success, negative value on error
- */
-
-#define AMD_IOMMU_DEVICE_FLAG_ATS_SUP     0x1    /* ATS feature supported */
-#define AMD_IOMMU_DEVICE_FLAG_PRI_SUP     0x2    /* PRI feature supported */
-#define AMD_IOMMU_DEVICE_FLAG_PASID_SUP   0x4    /* PASID context supported */
-#define AMD_IOMMU_DEVICE_FLAG_EXEC_SUP    0x8    /* Device may request execution
-                                                   on memory pages */
-#define AMD_IOMMU_DEVICE_FLAG_PRIV_SUP   0x10    /* Device may request
-                                                   super-user privileges */
-
-struct amd_iommu_device_info {
-       int max_pasids;
-       u32 flags;
-};
-
-extern int amd_iommu_device_info(struct pci_dev *pdev,
-                                struct amd_iommu_device_info *info);
-
-/**
- * amd_iommu_set_invalidate_ctx_cb() - Register a call-back for invalidating
- *                                    a pasid context. This call-back is
- *                                    invoked when the IOMMUv2 driver needs to
- *                                    invalidate a PASID context, for example
- *                                    because the task that is bound to that
- *                                    context is about to exit.
- *
- * @pdev: The PCI device the call-back should be registered for
- * @cb: The call-back function
- */
-
-typedef void (*amd_iommu_invalidate_ctx)(struct pci_dev *pdev, u32 pasid);
-
-extern int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
-                                          amd_iommu_invalidate_ctx cb);
 #else /* CONFIG_AMD_IOMMU */
 
 static inline int amd_iommu_detect(void) { return -ENODEV; }
index c2231c64d60b92a7f7e4ea52e479cbdbf2611693..59d404e22814e885ae8429a361f2f43bed3b8f30 100644 (file)
  */
 #define __bpf_kfunc __used noinline
 
+#define __bpf_kfunc_start_defs()                                              \
+       __diag_push();                                                         \
+       __diag_ignore_all("-Wmissing-declarations",                            \
+                         "Global kfuncs as their definitions will be in BTF");\
+       __diag_ignore_all("-Wmissing-prototypes",                              \
+                         "Global kfuncs as their definitions will be in BTF")
+
+#define __bpf_kfunc_end_defs() __diag_pop()
+#define __bpf_hook_start() __bpf_kfunc_start_defs()
+#define __bpf_hook_end() __bpf_kfunc_end_defs()
+
 /*
  * Return the name of the passed struct, if exists, or halt the build if for
  * example the structure gets renamed. In this way, developers have to revisit
index d5a5da838cafa4c6c31cc58445e2e6f3b3ba0f99..11a92a946016eab597d5fe3b9e023705b127b25a 100644 (file)
        pr_debug("%.*s %12.12s:%-4d : " fmt,                            \
                 8 - (int)sizeof(KBUILD_MODNAME), "    ",               \
                 kbasename(__FILE__), __LINE__, ##__VA_ARGS__)
+#  define doutc(client, fmt, ...)                                      \
+       pr_debug("%.*s %12.12s:%-4d : [%pU %llu] " fmt,                 \
+                8 - (int)sizeof(KBUILD_MODNAME), "    ",               \
+                kbasename(__FILE__), __LINE__,                         \
+                &client->fsid, client->monc.auth->global_id,           \
+                ##__VA_ARGS__)
 # else
 /* faux printk call just to see any compiler warnings. */
 #  define dout(fmt, ...)       do {                            \
                if (0)                                          \
                        printk(KERN_DEBUG fmt, ##__VA_ARGS__);  \
        } while (0)
+#  define doutc(client, fmt, ...)      do {                    \
+               if (0)                                          \
+                       printk(KERN_DEBUG "[%pU %llu] " fmt,    \
+                       &client->fsid,                          \
+                       client->monc.auth->global_id,           \
+                       ##__VA_ARGS__);                         \
+               } while (0)
 # endif
 
 #else
  * or, just wrap pr_debug
  */
 # define dout(fmt, ...)        pr_debug(" " fmt, ##__VA_ARGS__)
+# define doutc(client, fmt, ...)                                       \
+       pr_debug(" [%pU %llu] %s: " fmt, &client->fsid,                 \
+                client->monc.auth->global_id, __func__, ##__VA_ARGS__)
 
 #endif
 
+#define pr_notice_client(client, fmt, ...)                             \
+       pr_notice("[%pU %llu]: " fmt, &client->fsid,                    \
+                 client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_info_client(client, fmt, ...)                               \
+       pr_info("[%pU %llu]: " fmt, &client->fsid,                      \
+               client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_warn_client(client, fmt, ...)                               \
+       pr_warn("[%pU %llu]: " fmt, &client->fsid,                      \
+               client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_warn_once_client(client, fmt, ...)                          \
+       pr_warn_once("[%pU %llu]: " fmt, &client->fsid,                 \
+                    client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_err_client(client, fmt, ...)                                        \
+       pr_err("[%pU %llu]: " fmt, &client->fsid,                       \
+              client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_warn_ratelimited_client(client, fmt, ...)                   \
+       pr_warn_ratelimited("[%pU %llu]: " fmt, &client->fsid,          \
+                           client->monc.auth->global_id, ##__VA_ARGS__)
+#define pr_err_ratelimited_client(client, fmt, ...)                    \
+       pr_err_ratelimited("[%pU %llu]: " fmt, &client->fsid,           \
+                          client->monc.auth->global_id, ##__VA_ARGS__)
+
 #endif
index f3b3593254b975004409b9dec8a8c88eec8bb0b0..ee1d0e5f9789f2bd1672721c304cd6cae56245b7 100644 (file)
@@ -357,6 +357,11 @@ enum {
        CEPH_MDS_OP_RENAMESNAP = 0x01403,
 };
 
+#define IS_CEPH_MDS_OP_NEWINODE(op) (op == CEPH_MDS_OP_CREATE     || \
+                                    op == CEPH_MDS_OP_MKNOD      || \
+                                    op == CEPH_MDS_OP_MKDIR      || \
+                                    op == CEPH_MDS_OP_SYMLINK)
+
 extern const char *ceph_mds_op_name(int op);
 
 #define CEPH_SETATTR_MODE              (1 << 0)
@@ -497,7 +502,7 @@ struct ceph_mds_request_head_legacy {
        union ceph_mds_request_args args;
 } __attribute__ ((packed));
 
-#define CEPH_MDS_REQUEST_HEAD_VERSION  2
+#define CEPH_MDS_REQUEST_HEAD_VERSION  3
 
 struct ceph_mds_request_head_old {
        __le16 version;                /* struct version */
@@ -528,6 +533,9 @@ struct ceph_mds_request_head {
 
        __le32 ext_num_retry;          /* new count retry attempts */
        __le32 ext_num_fwd;            /* new count fwd attempts */
+
+       __le32 struct_len;             /* to store size of struct ceph_mds_request_head */
+       __le32 owner_uid, owner_gid;   /* used for OPs which create inodes */
 } __attribute__ ((packed));
 
 /* cap/lease release record */
diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h
deleted file mode 100644 (file)
index 4c3e064..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _FS_CEPH_MDSMAP_H
-#define _FS_CEPH_MDSMAP_H
-
-#include <linux/bug.h>
-#include <linux/ceph/types.h>
-
-/*
- * mds map - describe servers in the mds cluster.
- *
- * we limit fields to those the client actually xcares about
- */
-struct ceph_mds_info {
-       u64 global_id;
-       struct ceph_entity_addr addr;
-       s32 state;
-       int num_export_targets;
-       bool laggy;
-       u32 *export_targets;
-};
-
-struct ceph_mdsmap {
-       u32 m_epoch, m_client_epoch, m_last_failure;
-       u32 m_root;
-       u32 m_session_timeout;          /* seconds */
-       u32 m_session_autoclose;        /* seconds */
-       u64 m_max_file_size;
-       u64 m_max_xattr_size;           /* maximum size for xattrs blob */
-       u32 m_max_mds;                  /* expected up:active mds number */
-       u32 m_num_active_mds;           /* actual up:active mds number */
-       u32 possible_max_rank;          /* possible max rank index */
-       struct ceph_mds_info *m_info;
-
-       /* which object pools file data can be stored in */
-       int m_num_data_pg_pools;
-       u64 *m_data_pg_pools;
-       u64 m_cas_pg_pool;
-
-       bool m_enabled;
-       bool m_damaged;
-       int m_num_laggy;
-};
-
-static inline struct ceph_entity_addr *
-ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
-{
-       if (w >= m->possible_max_rank)
-               return NULL;
-       return &m->m_info[w].addr;
-}
-
-static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w)
-{
-       BUG_ON(w < 0);
-       if (w >= m->possible_max_rank)
-               return CEPH_MDS_STATE_DNE;
-       return m->m_info[w].state;
-}
-
-static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
-{
-       if (w >= 0 && w < m->possible_max_rank)
-               return m->m_info[w].laggy;
-       return false;
-}
-
-extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m);
-struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2);
-extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m);
-extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m);
-
-#endif
index 722a586bb22444418d31eb80d9d25e89de5d72a2..de7bb47d8a46ace38d95a81ed6df231d91ac725b 100644 (file)
@@ -154,6 +154,7 @@ struct closure {
        struct closure          *parent;
 
        atomic_t                remaining;
+       bool                    closure_get_happened;
 
 #ifdef CONFIG_DEBUG_CLOSURES
 #define CLOSURE_MAGIC_DEAD     0xc054dead
@@ -185,7 +186,11 @@ static inline unsigned closure_nr_remaining(struct closure *cl)
  */
 static inline void closure_sync(struct closure *cl)
 {
-       if (closure_nr_remaining(cl) != 1)
+#ifdef CONFIG_DEBUG_CLOSURES
+       BUG_ON(closure_nr_remaining(cl) != 1 && !cl->closure_get_happened);
+#endif
+
+       if (cl->closure_get_happened)
                __closure_sync(cl);
 }
 
@@ -233,8 +238,6 @@ static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
        closure_set_ip(cl);
        cl->fn = fn;
        cl->wq = wq;
-       /* between atomic_dec() in closure_put() */
-       smp_mb__before_atomic();
 }
 
 static inline void closure_queue(struct closure *cl)
@@ -259,6 +262,8 @@ static inline void closure_queue(struct closure *cl)
  */
 static inline void closure_get(struct closure *cl)
 {
+       cl->closure_get_happened = true;
+
 #ifdef CONFIG_DEBUG_CLOSURES
        BUG_ON((atomic_inc_return(&cl->remaining) &
                CLOSURE_REMAINING_MASK) <= 1);
@@ -281,6 +286,7 @@ static inline void closure_init(struct closure *cl, struct closure *parent)
                closure_get(parent);
 
        atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+       cl->closure_get_happened = false;
 
        closure_debug_create(cl);
        closure_set_ip(cl);
index f0ccca16a0aca19115f0f8f9c8b65234f257b700..4a658de44ee92e8e090e6bb08a6cc45590f82bfe 100644 (file)
@@ -144,6 +144,7 @@ bool dma_pci_p2pdma_supported(struct device *dev);
 int dma_set_mask(struct device *dev, u64 mask);
 int dma_set_coherent_mask(struct device *dev, u64 mask);
 u64 dma_get_required_mask(struct device *dev);
+bool dma_addressing_limited(struct device *dev);
 size_t dma_max_mapping_size(struct device *dev);
 size_t dma_opt_mapping_size(struct device *dev);
 bool dma_need_sync(struct device *dev, dma_addr_t dma_addr);
@@ -264,6 +265,10 @@ static inline u64 dma_get_required_mask(struct device *dev)
 {
        return 0;
 }
+static inline bool dma_addressing_limited(struct device *dev)
+{
+       return false;
+}
 static inline size_t dma_max_mapping_size(struct device *dev)
 {
        return 0;
@@ -465,20 +470,6 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
        return dma_set_mask_and_coherent(dev, mask);
 }
 
-/**
- * dma_addressing_limited - return if the device is addressing limited
- * @dev:       device to check
- *
- * Return %true if the devices DMA mask is too small to address all memory in
- * the system, else %false.  Lack of addressing bits is the prime reason for
- * bounce buffering, but might not be the only one.
- */
-static inline bool dma_addressing_limited(struct device *dev)
-{
-       return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
-                           dma_get_required_mask(dev);
-}
-
 static inline unsigned int dma_get_max_seg_size(struct device *dev)
 {
        if (dev->dma_parms && dev->dma_parms->max_segment_size)
index 226a36ed5aa1fc3901fafdb239bac1d974fa0770..689028257fccbd1f317d6ab324c393f55626ddc2 100644 (file)
@@ -1045,10 +1045,10 @@ static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add,
 
 /**
  * ethtool_sprintf - Write formatted string to ethtool string data
- * @data: Pointer to start of string to update
+ * @data: Pointer to a pointer to the start of string to update
  * @fmt: Format of string to write
  *
- * Write formatted string to data. Update data to point at start of
+ * Write formatted string to *data. Update *data to point at start of
  * next string.
  */
 extern __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...);
index 0388e8c20f5284f2adafad0611157e5f40354ca2..bb37ad5cc954a4ce7d5aaf002ff506aff8862136 100644 (file)
@@ -98,6 +98,17 @@ enum fid_type {
         */
        FILEID_FAT_WITH_PARENT = 0x72,
 
+       /*
+        * 64 bit inode number, 32 bit generation number.
+        */
+       FILEID_INO64_GEN = 0x81,
+
+       /*
+        * 64 bit inode number, 32 bit generation number,
+        * 64 bit parent inode number, 32 bit parent generation.
+        */
+       FILEID_INO64_GEN_PARENT = 0x82,
+
        /*
         * 128 bit child FID (struct lu_fid)
         * 128 bit parent FID (struct lu_fid)
@@ -129,7 +140,11 @@ struct fid {
                        u32 parent_ino;
                        u32 parent_gen;
                } i32;
-               struct {
+               struct {
+                       u64 ino;
+                       u32 gen;
+               } __packed i64;
+               struct {
                        u32 block;
                        u16 partref;
                        u16 parent_partref;
@@ -253,6 +268,33 @@ extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid,
 extern int exportfs_encode_fh(struct dentry *dentry, struct fid *fid,
                              int *max_len, int flags);
 
+static inline bool exportfs_can_encode_fid(const struct export_operations *nop)
+{
+       return !nop || nop->encode_fh;
+}
+
+static inline bool exportfs_can_decode_fh(const struct export_operations *nop)
+{
+       return nop && nop->fh_to_dentry;
+}
+
+static inline bool exportfs_can_encode_fh(const struct export_operations *nop,
+                                         int fh_flags)
+{
+       /*
+        * If a non-decodeable file handle was requested, we only need to make
+        * sure that filesystem did not opt-out of encoding fid.
+        */
+       if (fh_flags & EXPORT_FH_FID)
+               return exportfs_can_encode_fid(nop);
+
+       /*
+        * If a decodeable file handle was requested, we need to make sure that
+        * filesystem can also decode file handles.
+        */
+       return exportfs_can_decode_fh(nop);
+}
+
 static inline int exportfs_encode_fid(struct inode *inode, struct fid *fid,
                                      int *max_len)
 {
@@ -272,10 +314,12 @@ extern struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
 /*
  * Generic helpers for filesystems.
  */
-extern struct dentry *generic_fh_to_dentry(struct super_block *sb,
+int generic_encode_ino32_fh(struct inode *inode, __u32 *fh, int *max_len,
+                           struct inode *parent);
+struct dentry *generic_fh_to_dentry(struct super_block *sb,
        struct fid *fid, int fh_len, int fh_type,
        struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
-extern struct dentry *generic_fh_to_parent(struct super_block *sb,
+struct dentry *generic_fh_to_parent(struct super_block *sb,
        struct fid *fid, int fh_len, int fh_type,
        struct inode *(*get_inode) (struct super_block *sb, u64 ino, u32 gen));
 
index a82a4bb6ce68bf9c8cee4a6ed81676406383c65f..039fe0ce8d83d1d7ca80c306f4008f2a56713736 100644 (file)
 
 #define F2FS_SUPER_OFFSET              1024    /* byte-size offset */
 #define F2FS_MIN_LOG_SECTOR_SIZE       9       /* 9 bits for 512 bytes */
-#define F2FS_MAX_LOG_SECTOR_SIZE       12      /* 12 bits for 4096 bytes */
-#define F2FS_LOG_SECTORS_PER_BLOCK     3       /* log number for sector/blk */
-#define F2FS_BLKSIZE                   4096    /* support only 4KB block */
-#define F2FS_BLKSIZE_BITS              12      /* bits for F2FS_BLKSIZE */
+#define F2FS_MAX_LOG_SECTOR_SIZE       PAGE_SHIFT      /* Max is Block Size */
+#define F2FS_LOG_SECTORS_PER_BLOCK     (PAGE_SHIFT - 9) /* log number for sector/blk */
+#define F2FS_BLKSIZE                   PAGE_SIZE /* support only block == page */
+#define F2FS_BLKSIZE_BITS              PAGE_SHIFT /* bits for F2FS_BLKSIZE */
 #define F2FS_MAX_EXTENSION             64      /* # of extension entries */
 #define F2FS_EXTENSION_LEN             8       /* max size of extension */
 #define F2FS_BLK_ALIGN(x)      (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
@@ -104,6 +104,7 @@ enum f2fs_error {
        ERROR_CORRUPTED_VERITY_XATTR,
        ERROR_CORRUPTED_XATTR,
        ERROR_INVALID_NODE_REFERENCE,
+       ERROR_INCONSISTENT_NAT,
        ERROR_MAX,
 };
 
@@ -210,14 +211,14 @@ struct f2fs_checkpoint {
        unsigned char sit_nat_version_bitmap[];
 } __packed;
 
-#define CP_CHKSUM_OFFSET       4092    /* default chksum offset in checkpoint */
+#define CP_CHKSUM_OFFSET       (F2FS_BLKSIZE - sizeof(__le32)) /* default chksum offset in checkpoint */
 #define CP_MIN_CHKSUM_OFFSET                                           \
        (offsetof(struct f2fs_checkpoint, sit_nat_version_bitmap))
 
 /*
  * For orphan inode management
  */
-#define F2FS_ORPHANS_PER_BLOCK 1020
+#define F2FS_ORPHANS_PER_BLOCK ((F2FS_BLKSIZE - 4 * sizeof(__le32)) / sizeof(__le32))
 
 #define GET_ORPHAN_BLOCKS(n)   (((n) + F2FS_ORPHANS_PER_BLOCK - 1) / \
                                        F2FS_ORPHANS_PER_BLOCK)
@@ -243,14 +244,31 @@ struct f2fs_extent {
 #define F2FS_NAME_LEN          255
 /* 200 bytes for inline xattrs by default */
 #define DEFAULT_INLINE_XATTR_ADDRS     50
-#define DEF_ADDRS_PER_INODE    923     /* Address Pointers in an Inode */
+
+#define OFFSET_OF_END_OF_I_EXT         360
+#define SIZE_OF_I_NID                  20
+
+struct node_footer {
+       __le32 nid;             /* node id */
+       __le32 ino;             /* inode number */
+       __le32 flag;            /* include cold/fsync/dentry marks and offset */
+       __le64 cp_ver;          /* checkpoint version */
+       __le32 next_blkaddr;    /* next node page block address */
+} __packed;
+
+/* Address Pointers in an Inode */
+#define DEF_ADDRS_PER_INODE    ((F2FS_BLKSIZE - OFFSET_OF_END_OF_I_EXT \
+                                       - SIZE_OF_I_NID \
+                                       - sizeof(struct node_footer)) / sizeof(__le32))
 #define CUR_ADDRS_PER_INODE(inode)     (DEF_ADDRS_PER_INODE - \
                                        get_extra_isize(inode))
 #define DEF_NIDS_PER_INODE     5       /* Node IDs in an Inode */
 #define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
-#define DEF_ADDRS_PER_BLOCK    1018    /* Address Pointers in a Direct Block */
+/* Address Pointers in a Direct Block */
+#define DEF_ADDRS_PER_BLOCK    ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32))
 #define ADDRS_PER_BLOCK(inode) addrs_per_block(inode)
-#define NIDS_PER_BLOCK         1018    /* Node IDs in an Indirect Block */
+/* Node IDs in an Indirect Block */
+#define NIDS_PER_BLOCK         ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32))
 
 #define ADDRS_PER_PAGE(page, inode)    \
        (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK(inode))
@@ -342,14 +360,6 @@ enum {
 
 #define OFFSET_BIT_MASK                GENMASK(OFFSET_BIT_SHIFT - 1, 0)
 
-struct node_footer {
-       __le32 nid;             /* node id */
-       __le32 ino;             /* inode number */
-       __le32 flag;            /* include cold/fsync/dentry marks and offset */
-       __le64 cp_ver;          /* checkpoint version */
-       __le32 next_blkaddr;    /* next node page block address */
-} __packed;
-
 struct f2fs_node {
        /* can be one of three types: inode, direct, and indirect types */
        union {
@@ -363,7 +373,7 @@ struct f2fs_node {
 /*
  * For NAT entries
  */
-#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
+#define NAT_ENTRY_PER_BLOCK (F2FS_BLKSIZE / sizeof(struct f2fs_nat_entry))
 
 struct f2fs_nat_entry {
        __u8 version;           /* latest version of cached nat entry */
@@ -378,12 +388,13 @@ struct f2fs_nat_block {
 /*
  * For SIT entries
  *
- * Each segment is 2MB in size by default so that a bitmap for validity of
- * there-in blocks should occupy 64 bytes, 512 bits.
+ * A validity bitmap of 64 bytes covers 512 blocks of area. For a 4K page size,
+ * this results in a segment size of 2MB. For 16k pages, the default segment size
+ * is 8MB.
  * Not allow to change this.
  */
 #define SIT_VBLOCK_MAP_SIZE 64
-#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
+#define SIT_ENTRY_PER_BLOCK (F2FS_BLKSIZE / sizeof(struct f2fs_sit_entry))
 
 /*
  * F2FS uses 4 bytes to represent block address. As a result, supported size of
@@ -418,7 +429,7 @@ struct f2fs_sit_block {
  * For segment summary
  *
  * One summary block contains exactly 512 summary entries, which represents
- * exactly 2MB segment by default. Not allow to change the basic units.
+ * exactly one segment by default. Not allow to change the basic units.
  *
  * NOTE: For initializing fields, you must use set_summary
  *
@@ -429,12 +440,12 @@ struct f2fs_sit_block {
  * from node's page's beginning to get a data block address.
  * ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node)
  */
-#define ENTRIES_IN_SUM         512
+#define ENTRIES_IN_SUM         (F2FS_BLKSIZE / 8)
 #define        SUMMARY_SIZE            (7)     /* sizeof(struct summary) */
 #define        SUM_FOOTER_SIZE         (5)     /* sizeof(struct summary_footer) */
 #define SUM_ENTRY_SIZE         (SUMMARY_SIZE * ENTRIES_IN_SUM)
 
-/* a summary entry for a 4KB-sized block in a segment */
+/* a summary entry for a block in a segment */
 struct f2fs_summary {
        __le32 nid;             /* parent node id */
        union {
@@ -518,7 +529,7 @@ struct f2fs_journal {
        };
 } __packed;
 
-/* 4KB-sized summary block structure */
+/* Block-sized summary block structure */
 struct f2fs_summary_block {
        struct f2fs_summary entries[ENTRIES_IN_SUM];
        struct f2fs_journal journal;
@@ -559,11 +570,14 @@ typedef __le32    f2fs_hash_t;
  * Note: there are more reserved space in inline dentry than in regular
  * dentry, when converting inline dentry we should handle this carefully.
  */
-#define NR_DENTRY_IN_BLOCK     214     /* the number of dentry in a block */
+
+/* the number of dentry in a block */
+#define NR_DENTRY_IN_BLOCK     ((BITS_PER_BYTE * F2FS_BLKSIZE) / \
+                                       ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * BITS_PER_BYTE + 1))
 #define SIZE_OF_DIR_ENTRY      11      /* by byte */
 #define SIZE_OF_DENTRY_BITMAP  ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
                                        BITS_PER_BYTE)
-#define SIZE_OF_RESERVED       (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \
+#define SIZE_OF_RESERVED       (F2FS_BLKSIZE - ((SIZE_OF_DIR_ENTRY + \
                                F2FS_SLOT_LEN) * \
                                NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP))
 #define MIN_INLINE_DENTRY_SIZE         40      /* just include '.' and '..' entries */
@@ -576,7 +590,7 @@ struct f2fs_dir_entry {
        __u8 file_type;         /* file type */
 } __packed;
 
-/* 4KB-sized directory entry block */
+/* Block-sized directory entry block */
 struct f2fs_dentry_block {
        /* validity bitmap for directory entries in each block */
        __u8 dentry_bitmap[SIZE_OF_DENTRY_BITMAP];
diff --git a/include/linux/fw_table.h b/include/linux/fw_table.h
new file mode 100644 (file)
index 0000000..ff8fa58
--- /dev/null
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *  fw_tables.h - Parsing support for ACPI and ACPI-like tables provided by
+ *                platform or device firmware
+ *
+ *  Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *  Copyright (C) 2023 Intel Corp.
+ */
+#ifndef _FW_TABLE_H_
+#define _FW_TABLE_H_
+
+union acpi_subtable_headers;
+
+typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header,
+                                     const unsigned long end);
+
+typedef int (*acpi_tbl_entry_handler_arg)(union acpi_subtable_headers *header,
+                                         void *arg, const unsigned long end);
+
+struct acpi_subtable_proc {
+       int id;
+       acpi_tbl_entry_handler handler;
+       acpi_tbl_entry_handler_arg handler_arg;
+       void *arg;
+       int count;
+};
+
+#include <linux/acpi.h>
+#include <acpi/acpi.h>
+
+union acpi_subtable_headers {
+       struct acpi_subtable_header common;
+       struct acpi_hmat_structure hmat;
+       struct acpi_prmt_module_header prmt;
+       struct acpi_cedt_header cedt;
+};
+
+int acpi_parse_entries_array(char *id, unsigned long table_size,
+                            struct acpi_table_header *table_header,
+                            struct acpi_subtable_proc *proc,
+                            int proc_num, unsigned int max_entries);
+
+#endif
index 1d454dc944b37fa379c3a1dd7af266281cd2e4cb..0aed62f0c63309d8b4962275c8a9b32f1cd9f659 100644 (file)
@@ -605,9 +605,6 @@ int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc,
                                    void *data, struct lock_class_key *lock_key,
                                    struct lock_class_key *request_key);
 
-struct gpio_chip *gpiochip_find(void *data,
-                               int (*match)(struct gpio_chip *gc, void *data));
-
 struct gpio_device *gpio_device_find(void *data,
                                int (*match)(struct gpio_chip *gc, void *data));
 struct gpio_device *gpio_device_find_by_label(const char *label);
index 90fa83464f0039e4e71a2b1b3f76a58602d7ed32..84ed77c049400546e968016af548bce081805cca 100644 (file)
@@ -96,7 +96,7 @@ enum i3c_dcr {
 
 /**
  * struct i3c_device_info - I3C device information
- * @pid: Provisional ID
+ * @pid: Provisioned ID
  * @bcr: Bus Characteristic Register
  * @dcr: Device Characteristic Register
  * @static_addr: static/I2C address
index db909ef79be47dbbbd19fac1e6d4ff20de8ed1c0..24c1863b86e2b6940d263555515039fa0b317203 100644 (file)
@@ -135,6 +135,7 @@ struct i3c_ibi_slot {
  *                  rejected by the master
  * @num_slots: number of IBI slots reserved for this device
  * @enabled: reflect the IBI status
+ * @wq: workqueue used to execute IBI handlers.
  * @handler: IBI handler specified at i3c_device_request_ibi() call time. This
  *          handler will be called from the controller workqueue, and as such
  *          is allowed to sleep (though it is recommended to process the IBI
@@ -157,6 +158,7 @@ struct i3c_device_ibi_info {
        unsigned int max_payload_len;
        unsigned int num_slots;
        unsigned int enabled;
+       struct workqueue_struct *wq;
        void (*handler)(struct i3c_device *dev,
                        const struct i3c_ibi_payload *payload);
 };
@@ -172,7 +174,7 @@ struct i3c_device_ibi_info {
  *              assigned a dynamic address by the master. Will be used during
  *              bus initialization to assign it a specific dynamic address
  *              before starting DAA (Dynamic Address Assignment)
- * @pid: I3C Provisional ID exposed by the device. This is a unique identifier
+ * @pid: I3C Provisioned ID exposed by the device. This is a unique identifier
  *      that may be used to attach boardinfo to i3c_dev_desc when the device
  *      does not have a static address
  * @of_node: optional DT node in case the device has been described in the DT
@@ -475,7 +477,7 @@ struct i3c_master_controller_ops {
  * @boardinfo.i2c: list of I2C boardinfo objects
  * @boardinfo: board-level information attached to devices connected on the bus
  * @bus: I3C bus exposed by this master
- * @wq: workqueue used to execute IBI handlers. Can also be used by master
+ * @wq: workqueue which can be used by master
  *     drivers if they need to postpone operations that need to take place
  *     in a thread context. Typical examples are Hot Join processing which
  *     requires taking the bus lock in maintenance, which in turn, can only
index a0dce14090a9e1d239500897caaea9c1b05ea9f8..da5f5fa4a3a6ae2be38dec9bb7c5ac73ff8df6e2 100644 (file)
@@ -200,7 +200,7 @@ static inline void idr_preload_end(void)
  */
 #define idr_for_each_entry_ul(idr, entry, tmp, id)                     \
        for (tmp = 0, id = 0;                                           \
-            tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
+            ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
             tmp = id, ++id)
 
 /**
@@ -224,10 +224,12 @@ static inline void idr_preload_end(void)
  * @id: Entry ID.
  *
  * Continue to iterate over entries, continuing after the current position.
+ * After normal termination @entry is left with the value NULL.  This
+ * is convenient for a "not found" value.
  */
 #define idr_for_each_entry_continue_ul(idr, entry, tmp, id)            \
        for (tmp = id;                                                  \
-            tmp <= id && ((entry) = idr_get_next_ul(idr, &(id))) != NULL; \
+            ((entry) = tmp <= id ? idr_get_next_ul(idr, &(id)) : NULL) != NULL; \
             tmp = id, ++id)
 
 /*
index 49790c1bd2c43e21615fda95cbf3febb22ba6da0..de6503c0edb8efcf86e0565a52b81e7a4e378014 100644 (file)
@@ -562,7 +562,7 @@ struct ff_device {
 
        int max_effects;
        struct ff_effect *effects;
-       struct file *effect_owners[];
+       struct file *effect_owners[] __counted_by(max_effects);
 };
 
 int input_ff_create(struct input_dev *dev, unsigned int max_effects);
index 3b8580bd33c146badadfd0bde7101cba99d17c96..2cf89a538b18bbc7c99c8705c2d22bdc95065238 100644 (file)
@@ -47,7 +47,7 @@ struct input_mt {
        unsigned int flags;
        unsigned int frame;
        int *red;
-       struct input_mt_slot slots[];
+       struct input_mt_slot slots[] __counted_by(num_slots);
 };
 
 static inline void input_mt_set_value(struct input_mt_slot *slot,
index 8fb1b41b4d1580a4c5c3f45c4fb7fb557dca464d..ec289c1016f5f24884bfc27a812898efe14cac60 100644 (file)
@@ -66,6 +66,7 @@ struct iommu_domain_geometry {
 #define __IOMMU_DOMAIN_DMA_FQ  (1U << 3)  /* DMA-API uses flush queue    */
 
 #define __IOMMU_DOMAIN_SVA     (1U << 4)  /* Shared process address space */
+#define __IOMMU_DOMAIN_PLATFORM        (1U << 5)
 
 #define __IOMMU_DOMAIN_NESTED  (1U << 6)  /* User-managed address space nested
                                              on a stage-2 translation        */
@@ -86,6 +87,8 @@ struct iommu_domain_geometry {
  *                               invalidation.
  *     IOMMU_DOMAIN_SVA        - DMA addresses are shared process addresses
  *                               represented by mm_struct's.
+ *     IOMMU_DOMAIN_PLATFORM   - Legacy domain for drivers that do their own
+ *                               dma_api stuff. Do not use in new drivers.
  */
 #define IOMMU_DOMAIN_BLOCKED   (0U)
 #define IOMMU_DOMAIN_IDENTITY  (__IOMMU_DOMAIN_PT)
@@ -96,6 +99,7 @@ struct iommu_domain_geometry {
                                 __IOMMU_DOMAIN_DMA_API |       \
                                 __IOMMU_DOMAIN_DMA_FQ)
 #define IOMMU_DOMAIN_SVA       (__IOMMU_DOMAIN_SVA)
+#define IOMMU_DOMAIN_PLATFORM  (__IOMMU_DOMAIN_PLATFORM)
 #define IOMMU_DOMAIN_NESTED    (__IOMMU_DOMAIN_NESTED)
 
 struct iommu_domain {
@@ -340,13 +344,12 @@ static inline int __iommu_copy_struct_from_user(
  *                     NULL while the @user_data can be optionally provided, the
  *                     new domain must support __IOMMU_DOMAIN_PAGING.
  *                     Upon failure, ERR_PTR must be returned.
+ * @domain_alloc_paging: Allocate an iommu_domain that can be used for
+ *                       UNMANAGED, DMA, and DMA_FQ domain types.
  * @probe_device: Add device to iommu driver handling
  * @release_device: Remove device from iommu driver handling
  * @probe_finalize: Do final setup work after the device is added to an IOMMU
  *                  group and attached to the groups domain
- * @set_platform_dma_ops: Returning control back to the platform DMA ops. This op
- *                        is to support old IOMMU drivers, new drivers should use
- *                        default domains, and the common IOMMU DMA ops.
  * @device_group: find iommu group for a particular device
  * @get_resv_regions: Request list of reserved regions for a device
  * @of_xlate: add OF master IDs to iommu grouping
@@ -365,6 +368,13 @@ static inline int __iommu_copy_struct_from_user(
  *                    will be blocked by the hardware.
  * @pgsize_bitmap: bitmap of all possible supported page sizes
  * @owner: Driver module providing these ops
+ * @identity_domain: An always available, always attachable identity
+ *                   translation.
+ * @blocked_domain: An always available, always attachable blocking
+ *                  translation.
+ * @default_domain: If not NULL this will always be set as the default domain.
+ *                  This should be an IDENTITY/BLOCKED/PLATFORM domain.
+ *                  Do not use in new drivers.
  */
 struct iommu_ops {
        bool (*capable)(struct device *dev, enum iommu_cap);
@@ -375,11 +385,11 @@ struct iommu_ops {
        struct iommu_domain *(*domain_alloc_user)(
                struct device *dev, u32 flags, struct iommu_domain *parent,
                const struct iommu_user_data *user_data);
+       struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
 
        struct iommu_device *(*probe_device)(struct device *dev);
        void (*release_device)(struct device *dev);
        void (*probe_finalize)(struct device *dev);
-       void (*set_platform_dma_ops)(struct device *dev);
        struct iommu_group *(*device_group)(struct device *dev);
 
        /* Request/Free a list of reserved regions for a device */
@@ -402,6 +412,9 @@ struct iommu_ops {
        const struct iommu_domain_ops *default_domain_ops;
        unsigned long pgsize_bitmap;
        struct module *owner;
+       struct iommu_domain *identity_domain;
+       struct iommu_domain *blocked_domain;
+       struct iommu_domain *default_domain;
 };
 
 /**
@@ -420,10 +433,8 @@ struct iommu_ops {
  * * ENODEV    - device specific errors, not able to be attached
  * * <others>  - treated as ENODEV by the caller. Use is discouraged
  * @set_dev_pasid: set an iommu domain to a pasid of device
- * @map: map a physically contiguous memory region to an iommu domain
  * @map_pages: map a physically contiguous set of pages of the same size to
  *             an iommu domain.
- * @unmap: unmap a physically contiguous memory region from an iommu domain
  * @unmap_pages: unmap a number of pages of the same size from an iommu domain
  * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
  * @iotlb_sync_map: Sync mappings created recently using @map to the hardware
@@ -442,20 +453,16 @@ struct iommu_domain_ops {
        int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
                             ioasid_t pasid);
 
-       int (*map)(struct iommu_domain *domain, unsigned long iova,
-                  phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
        int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
                         phys_addr_t paddr, size_t pgsize, size_t pgcount,
                         int prot, gfp_t gfp, size_t *mapped);
-       size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
-                       size_t size, struct iommu_iotlb_gather *iotlb_gather);
        size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
                              size_t pgsize, size_t pgcount,
                              struct iommu_iotlb_gather *iotlb_gather);
 
        void (*flush_iotlb_all)(struct iommu_domain *domain);
-       void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
-                              size_t size);
+       int (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
+                             size_t size);
        void (*iotlb_sync)(struct iommu_domain *domain,
                           struct iommu_iotlb_gather *iotlb_gather);
 
@@ -476,6 +483,7 @@ struct iommu_domain_ops {
  * @list: Used by the iommu-core to keep a list of registered iommus
  * @ops: iommu-ops for talking to this iommu
  * @dev: struct device for sysfs handling
+ * @singleton_group: Used internally for drivers that have only one group
  * @max_pasids: number of supported PASIDs
  */
 struct iommu_device {
@@ -483,6 +491,7 @@ struct iommu_device {
        const struct iommu_ops *ops;
        struct fwnode_handle *fwnode;
        struct device *dev;
+       struct iommu_group *singleton_group;
        u32 max_pasids;
 };
 
@@ -526,6 +535,7 @@ struct iommu_fault_param {
  * @attach_deferred: the dma domain attachment is deferred
  * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs
  * @require_direct: device requires IOMMU_RESV_DIRECT regions
+ * @shadow_on_flush: IOTLB flushes are used to sync shadow tables
  *
  * TODO: migrate other per device data pointers under iommu_dev_data, e.g.
  *     struct iommu_group      *iommu_group;
@@ -541,6 +551,7 @@ struct dev_iommu {
        u32                             attach_deferred:1;
        u32                             pci_32bit_workaround:1;
        u32                             require_direct:1;
+       u32                             shadow_on_flush:1;
 };
 
 int iommu_device_register(struct iommu_device *iommu,
@@ -768,6 +779,7 @@ extern struct iommu_group *pci_device_group(struct device *dev);
 extern struct iommu_group *generic_device_group(struct device *dev);
 /* FSL-MC device grouping function */
 struct iommu_group *fsl_mc_device_group(struct device *dev);
+extern struct iommu_group *generic_single_device_group(struct device *dev);
 
 /**
  * struct iommu_fwspec - per-device IOMMU instance data
@@ -1253,7 +1265,7 @@ static inline void iommu_free_global_pasid(ioasid_t pasid) {}
  * Creates a mapping at @iova for the buffer described by a scatterlist
  * stored in the given sg_table object in the provided IOMMU domain.
  */
-static inline size_t iommu_map_sgtable(struct iommu_domain *domain,
+static inline ssize_t iommu_map_sgtable(struct iommu_domain *domain,
                        unsigned long iova, struct sg_table *sgt, int prot)
 {
        return iommu_map_sg(domain, iova, sgt->sgl, sgt->orig_nents, prot,
index 365eb092e9c4e9ff7b74cdc051078e7cec0aaca4..ab1da3142b06a94e71f977d5d7715f2245e0f3ae 100644 (file)
@@ -445,6 +445,10 @@ int kprobe_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
 
 int arch_kprobe_get_kallsym(unsigned int *symnum, unsigned long *value,
                            char *type, char *sym);
+
+int kprobe_exceptions_notify(struct notifier_block *self,
+                            unsigned long val, void *data);
+
 #else /* !CONFIG_KPROBES: */
 
 static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
index 99b8176c3738d12a21b601789c5cac2e9bfd7893..ff217a5ce552143e39717acc4a25ed9c8109fe7b 100644 (file)
@@ -48,7 +48,7 @@ LSM_HOOK(int, 0, quota_on, struct dentry *dentry)
 LSM_HOOK(int, 0, syslog, int type)
 LSM_HOOK(int, 0, settime, const struct timespec64 *ts,
         const struct timezone *tz)
-LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages)
+LSM_HOOK(int, 1, vm_enough_memory, struct mm_struct *mm, long pages)
 LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm)
 LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, const struct file *file)
 LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
@@ -273,7 +273,7 @@ LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen)
 LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode)
 LSM_HOOK(int, 0, inode_notifysecctx, struct inode *inode, void *ctx, u32 ctxlen)
 LSM_HOOK(int, 0, inode_setsecctx, struct dentry *dentry, void *ctx, u32 ctxlen)
-LSM_HOOK(int, 0, inode_getsecctx, struct inode *inode, void **ctx,
+LSM_HOOK(int, -EOPNOTSUPP, inode_getsecctx, struct inode *inode, void **ctx,
         u32 *ctxlen)
 
 #if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE)
index 4df6d1c12437f1850751a8175a643bce939bfe63..6f3631425f386dad40e847aa592a31e12d1b5e9f 100644 (file)
@@ -1232,7 +1232,13 @@ struct mlx5_ifc_virtio_emulation_cap_bits {
        u8         max_emulated_devices[0x8];
        u8         max_num_virtio_queues[0x18];
 
-       u8         reserved_at_a0[0x60];
+       u8         reserved_at_a0[0x20];
+
+       u8         reserved_at_c0[0x13];
+       u8         desc_group_mkey_supported[0x1];
+       u8         reserved_at_d4[0xc];
+
+       u8         reserved_at_e0[0x20];
 
        u8         umem_1_buffer_param_a[0x20];
 
index 9becdc3fa5034b360110cd8e0eb0a30fa8098daa..b86d51a855f6709762e2da6862e387eaf56a99e1 100644 (file)
@@ -74,7 +74,11 @@ struct mlx5_ifc_virtio_q_bits {
        u8    reserved_at_320[0x8];
        u8    pd[0x18];
 
-       u8    reserved_at_340[0xc0];
+       u8    reserved_at_340[0x20];
+
+       u8    desc_group_mkey[0x20];
+
+       u8    reserved_at_380[0x80];
 };
 
 struct mlx5_ifc_virtio_net_q_object_bits {
@@ -141,6 +145,7 @@ enum {
        MLX5_VIRTQ_MODIFY_MASK_STATE                    = (u64)1 << 0,
        MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_PARAMS      = (u64)1 << 3,
        MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_DUMP_ENABLE = (u64)1 << 4,
+       MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY          = (u64)1 << 14,
 };
 
 enum {
index daa2f40d9ce65f6654676946a92b89ddbb594c19..7b12eebc5586dcc7cec786001fe7011f675ee9eb 100644 (file)
@@ -295,7 +295,9 @@ struct mmc_card {
 #define MMC_QUIRK_BROKEN_HPI   (1<<13)         /* Disable broken HPI support */
 #define MMC_QUIRK_BROKEN_SD_DISCARD    (1<<14) /* Disable broken SD discard support */
 #define MMC_QUIRK_BROKEN_SD_CACHE      (1<<15) /* Disable broken SD cache support */
+#define MMC_QUIRK_BROKEN_CACHE_FLUSH   (1<<16) /* Don't flush cache until the write has occurred */
 
+       bool                    written_flag;   /* Indicates eMMC has been written since power on */
        bool                    reenable_cmdq;  /* Re-enable Command Queue */
 
        unsigned int            erase_size;     /* erase size in sectors */
index 057c89867aa2f35947265f499556c81dc8fc7d73..b8da2db4ecd295672ec512800ffc3b50b363c63d 100644 (file)
@@ -115,6 +115,9 @@ static inline bool vfsgid_eq_kgid(vfsgid_t vfsgid, kgid_t kgid)
 
 int vfsgid_in_group_p(vfsgid_t vfsgid);
 
+struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap);
+void mnt_idmap_put(struct mnt_idmap *idmap);
+
 vfsuid_t make_vfsuid(struct mnt_idmap *idmap,
                     struct user_namespace *fs_userns, kuid_t kuid);
 
index d88bb56c18e2e9d78308f85e4e843fee790422a3..947410faf9e2c9216b5aebfcb49dbb365799d44c 100644 (file)
@@ -287,7 +287,7 @@ struct cfi_private {
        unsigned long chipshift; /* Because they're of the same type */
        const char *im_name;     /* inter_module name for cmdset_setup */
        unsigned long quirks;
-       struct flchip chips[];  /* per-chip data structure for each chip */
+       struct flchip chips[] __counted_by(numchips);  /* per-chip data structure for each chip */
 };
 
 uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
index 2e3f43788d4839420ec565043802fb19b2d7398c..0421f12156b5c9cae86165cf20543e1b2ab0cf42 100644 (file)
@@ -24,7 +24,7 @@ struct lpddr_private {
        struct qinfo_chip *qinfo;
        int numchips;
        unsigned long chipshift;
-       struct flchip chips[];
+       struct flchip chips[] __counted_by(numchips);
 };
 
 /* qinfo_query_info structure contains request information for
index 3e285c09d16d95dba7271a53ee3e3d7cf73bc107..badb4c1ac079e87dd9c1cd439220698b02553e4f 100644 (file)
@@ -263,6 +263,7 @@ struct spinand_manufacturer {
 extern const struct spinand_manufacturer alliancememory_spinand_manufacturer;
 extern const struct spinand_manufacturer ato_spinand_manufacturer;
 extern const struct spinand_manufacturer esmt_c8_spinand_manufacturer;
+extern const struct spinand_manufacturer foresee_spinand_manufacturer;
 extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
 extern const struct spinand_manufacturer macronix_spinand_manufacturer;
 extern const struct spinand_manufacturer micron_spinand_manufacturer;
index cd628c4b011e54fa807674daa8a3361c97c20a5f..cd797e00fe359a91b44b2e012309e87d5b446a7e 100644 (file)
@@ -239,6 +239,7 @@ struct nfs_server {
        struct list_head        delegations;
        struct list_head        ss_copies;
 
+       unsigned long           delegation_gen;
        unsigned long           mig_gen;
        unsigned long           mig_status;
 #define NFS_MIG_IN_TRANSITION          (1)
index 12bbb5c636646e7a9318613669385400749c94d2..539b57fbf3ce397ae80bbe186547f77100830d1b 100644 (file)
@@ -1772,7 +1772,7 @@ struct nfs_rpc_ops {
        void    (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *);
        int     (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir);
        int     (*link)    (struct inode *, struct inode *, const struct qstr *);
-       int     (*symlink) (struct inode *, struct dentry *, struct page *,
+       int     (*symlink) (struct inode *, struct dentry *, struct folio *,
                            unsigned int, struct iattr *);
        int     (*mkdir)   (struct inode *, struct dentry *, struct iattr *);
        int     (*rmdir)   (struct inode *, const struct qstr *);
index 4efea9dd967c1bd7c58e7d84d073784159936bdb..e10333d78dbbe539f3011a85d5e221d102560c99 100644 (file)
@@ -6,14 +6,12 @@
 #ifndef _NVME_KEYRING_H
 #define _NVME_KEYRING_H
 
-#ifdef CONFIG_NVME_KEYRING
+#if IS_ENABLED(CONFIG_NVME_KEYRING)
 
 key_serial_t nvme_tls_psk_default(struct key *keyring,
                const char *hostnqn, const char *subnqn);
 
 key_serial_t nvme_keyring_id(void);
-int nvme_keyring_init(void);
-void nvme_keyring_exit(void);
 
 #else
 
@@ -26,11 +24,5 @@ static inline key_serial_t nvme_keyring_id(void)
 {
        return 0;
 }
-static inline int nvme_keyring_init(void)
-{
-       return 0;
-}
-static inline void nvme_keyring_exit(void) {}
-
 #endif /* !CONFIG_NVME_KEYRING */
 #endif /* _NVME_KEYRING_H */
index a7ba74babad73a5d4d6924cd096e210923b2e5c8..44325c068b6a01eb81274fa65767dd9298d35643 100644 (file)
@@ -1732,7 +1732,7 @@ struct nvmf_auth_dhchap_success1_data {
        __u8            rsvd2;
        __u8            rvalid;
        __u8            rsvd3[7];
-       /* 'hl' bytes of response value if 'rvalid' is set */
+       /* 'hl' bytes of response value */
        __u8            rval[];
 };
 
index 4729d54e8995359cd7f2a494ba99c012bf4c83cb..73de70362b9881569ff64800c195538935ffc939 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/pinctrl/pinctrl-state.h>
 
 struct device;
+struct gpio_chip;
 
 /* This struct is private to the core and should be regarded as a cookie */
 struct pinctrl;
@@ -25,27 +26,30 @@ struct pinctrl_state;
 #ifdef CONFIG_PINCTRL
 
 /* External interface to pin control */
-extern bool pinctrl_gpio_can_use_line(unsigned gpio);
-extern int pinctrl_gpio_request(unsigned gpio);
-extern void pinctrl_gpio_free(unsigned gpio);
-extern int pinctrl_gpio_direction_input(unsigned gpio);
-extern int pinctrl_gpio_direction_output(unsigned gpio);
-extern int pinctrl_gpio_set_config(unsigned gpio, unsigned long config);
-
-extern struct pinctrl * __must_check pinctrl_get(struct device *dev);
-extern void pinctrl_put(struct pinctrl *p);
-extern struct pinctrl_state * __must_check pinctrl_lookup_state(struct pinctrl *p,
-                                                               const char *name);
-extern int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s);
-
-extern struct pinctrl * __must_check devm_pinctrl_get(struct device *dev);
-extern void devm_pinctrl_put(struct pinctrl *p);
-extern int pinctrl_select_default_state(struct device *dev);
+bool pinctrl_gpio_can_use_line(struct gpio_chip *gc, unsigned int offset);
+int pinctrl_gpio_request(struct gpio_chip *gc, unsigned int offset);
+void pinctrl_gpio_free(struct gpio_chip *gc, unsigned int offset);
+int pinctrl_gpio_direction_input(struct gpio_chip *gc,
+                                unsigned int offset);
+int pinctrl_gpio_direction_output(struct gpio_chip *gc,
+                                 unsigned int offset);
+int pinctrl_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+                               unsigned long config);
+
+struct pinctrl * __must_check pinctrl_get(struct device *dev);
+void pinctrl_put(struct pinctrl *p);
+struct pinctrl_state * __must_check pinctrl_lookup_state(struct pinctrl *p,
+                                                        const char *name);
+int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s);
+
+struct pinctrl * __must_check devm_pinctrl_get(struct device *dev);
+void devm_pinctrl_put(struct pinctrl *p);
+int pinctrl_select_default_state(struct device *dev);
 
 #ifdef CONFIG_PM
-extern int pinctrl_pm_select_default_state(struct device *dev);
-extern int pinctrl_pm_select_sleep_state(struct device *dev);
-extern int pinctrl_pm_select_idle_state(struct device *dev);
+int pinctrl_pm_select_default_state(struct device *dev);
+int pinctrl_pm_select_sleep_state(struct device *dev);
+int pinctrl_pm_select_idle_state(struct device *dev);
 #else
 static inline int pinctrl_pm_select_default_state(struct device *dev)
 {
@@ -63,31 +67,38 @@ static inline int pinctrl_pm_select_idle_state(struct device *dev)
 
 #else /* !CONFIG_PINCTRL */
 
-static inline bool pinctrl_gpio_can_use_line(unsigned gpio)
+static inline bool
+pinctrl_gpio_can_use_line(struct gpio_chip *gc, unsigned int offset)
 {
        return true;
 }
 
-static inline int pinctrl_gpio_request(unsigned gpio)
+static inline int
+pinctrl_gpio_request(struct gpio_chip *gc, unsigned int offset)
 {
        return 0;
 }
 
-static inline void pinctrl_gpio_free(unsigned gpio)
+static inline void
+pinctrl_gpio_free(struct gpio_chip *gc, unsigned int offset)
 {
 }
 
-static inline int pinctrl_gpio_direction_input(unsigned gpio)
+static inline int
+pinctrl_gpio_direction_input(struct gpio_chip *gc, unsigned int offset)
 {
        return 0;
 }
 
-static inline int pinctrl_gpio_direction_output(unsigned gpio)
+static inline int
+pinctrl_gpio_direction_output(struct gpio_chip *gc, unsigned int offset)
 {
        return 0;
 }
 
-static inline int pinctrl_gpio_set_config(unsigned gpio, unsigned long config)
+static inline int
+pinctrl_gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+                           unsigned long config)
 {
        return 0;
 }
index ab721cf13a9864c9e4d1fa54218686d9cadcd0e7..7dae17b62a4d3b68fcfd9d54514c4e532b045e66 100644 (file)
@@ -4436,8 +4436,20 @@ struct ec_response_i2c_passthru_protect {
  * These commands are for sending and receiving message via HDMI CEC
  */
 
+#define EC_CEC_MAX_PORTS 16
+
 #define MAX_CEC_MSG_LEN 16
 
+/*
+ * Helper macros for packing/unpacking cec_events.
+ * bits[27:0] : bitmask of events from enum mkbp_cec_event
+ * bits[31:28]: port number
+ */
+#define EC_MKBP_EVENT_CEC_PACK(events, port) \
+               (((events) & GENMASK(27, 0)) | (((port) & 0xf) << 28))
+#define EC_MKBP_EVENT_CEC_GET_EVENTS(event) ((event) & GENMASK(27, 0))
+#define EC_MKBP_EVENT_CEC_GET_PORT(event) (((event) >> 28) & 0xf)
+
 /* CEC message from the AP to be written on the CEC bus */
 #define EC_CMD_CEC_WRITE_MSG 0x00B8
 
@@ -4449,19 +4461,54 @@ struct ec_params_cec_write {
        uint8_t msg[MAX_CEC_MSG_LEN];
 } __ec_align1;
 
+/**
+ * struct ec_params_cec_write_v1 - Message to write to the CEC bus
+ * @port: CEC port to write the message on
+ * @msg_len: length of msg in bytes
+ * @msg: message content to write to the CEC bus
+ */
+struct ec_params_cec_write_v1 {
+       uint8_t port;
+       uint8_t msg_len;
+       uint8_t msg[MAX_CEC_MSG_LEN];
+} __ec_align1;
+
+/* CEC message read from a CEC bus reported back to the AP */
+#define EC_CMD_CEC_READ_MSG 0x00B9
+
+/**
+ * struct ec_params_cec_read - Read a message from the CEC bus
+ * @port: CEC port to read a message on
+ */
+struct ec_params_cec_read {
+       uint8_t port;
+} __ec_align1;
+
+/**
+ * struct ec_response_cec_read - Message read from the CEC bus
+ * @msg_len: length of msg in bytes
+ * @msg: message content read from the CEC bus
+ */
+struct ec_response_cec_read {
+       uint8_t msg_len;
+       uint8_t msg[MAX_CEC_MSG_LEN];
+} __ec_align1;
+
 /* Set various CEC parameters */
 #define EC_CMD_CEC_SET 0x00BA
 
 /**
  * struct ec_params_cec_set - CEC parameters set
  * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS
+ * @port: CEC port to set the parameter on
  * @val: in case cmd is CEC_CMD_ENABLE, this field can be 0 to disable CEC
  *     or 1 to enable CEC functionality, in case cmd is
  *     CEC_CMD_LOGICAL_ADDRESS, this field encodes the requested logical
  *     address between 0 and 15 or 0xff to unregister
  */
 struct ec_params_cec_set {
-       uint8_t cmd; /* enum cec_command */
+       uint8_t cmd : 4; /* enum cec_command */
+       uint8_t port : 4;
        uint8_t val;
 } __ec_align1;
 
@@ -4471,9 +4518,11 @@ struct ec_params_cec_set {
 /**
  * struct ec_params_cec_get - CEC parameters get
  * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS
+ * @port: CEC port to get the parameter on
  */
 struct ec_params_cec_get {
-       uint8_t cmd; /* enum cec_command */
+       uint8_t cmd : 4; /* enum cec_command */
+       uint8_t port : 4;
 } __ec_align1;
 
 /**
@@ -4487,6 +4536,17 @@ struct ec_response_cec_get {
        uint8_t val;
 } __ec_align1;
 
+/* Get the number of CEC ports */
+#define EC_CMD_CEC_PORT_COUNT 0x00C1
+
+/**
+ * struct ec_response_cec_port_count - CEC port count response
+ * @port_count: number of CEC ports
+ */
+struct ec_response_cec_port_count {
+       uint8_t port_count;
+} __ec_align1;
+
 /* CEC parameters command */
 enum cec_command {
        /* CEC reading, writing and events enable */
@@ -4501,6 +4561,8 @@ enum mkbp_cec_event {
        EC_MKBP_CEC_SEND_OK                     = BIT(0),
        /* Outgoing message was not acknowledged */
        EC_MKBP_CEC_SEND_FAILED                 = BIT(1),
+       /* Incoming message can be read out by AP */
+       EC_MKBP_CEC_HAVE_DATA                   = BIT(2),
 };
 
 /*****************************************************************************/
index d661399b217dfc4b09eb3680a85ec6d5497bb196..6c19d4fbbe39586bc88dc47c64f9988570e2cdb7 100644 (file)
@@ -10,7 +10,7 @@
 #ifndef __SHMOB_DRM_H__
 #define __SHMOB_DRM_H__
 
-#include <drm/drm_mode.h>
+#include <video/videomode.h>
 
 enum shmob_drm_clk_source {
        SHMOB_DRM_CLK_BUS,
@@ -18,72 +18,21 @@ enum shmob_drm_clk_source {
        SHMOB_DRM_CLK_EXTERNAL,
 };
 
-enum shmob_drm_interface {
-       SHMOB_DRM_IFACE_RGB8,           /* 24bpp, 8:8:8 */
-       SHMOB_DRM_IFACE_RGB9,           /* 18bpp, 9:9 */
-       SHMOB_DRM_IFACE_RGB12A,         /* 24bpp, 12:12 */
-       SHMOB_DRM_IFACE_RGB12B,         /* 12bpp */
-       SHMOB_DRM_IFACE_RGB16,          /* 16bpp */
-       SHMOB_DRM_IFACE_RGB18,          /* 18bpp */
-       SHMOB_DRM_IFACE_RGB24,          /* 24bpp */
-       SHMOB_DRM_IFACE_YUV422,         /* 16bpp */
-       SHMOB_DRM_IFACE_SYS8A,          /* 24bpp, 8:8:8 */
-       SHMOB_DRM_IFACE_SYS8B,          /* 18bpp, 8:8:2 */
-       SHMOB_DRM_IFACE_SYS8C,          /* 18bpp, 2:8:8 */
-       SHMOB_DRM_IFACE_SYS8D,          /* 16bpp, 8:8 */
-       SHMOB_DRM_IFACE_SYS9,           /* 18bpp, 9:9 */
-       SHMOB_DRM_IFACE_SYS12,          /* 24bpp, 12:12 */
-       SHMOB_DRM_IFACE_SYS16A,         /* 16bpp */
-       SHMOB_DRM_IFACE_SYS16B,         /* 18bpp, 16:2 */
-       SHMOB_DRM_IFACE_SYS16C,         /* 18bpp, 2:16 */
-       SHMOB_DRM_IFACE_SYS18,          /* 18bpp */
-       SHMOB_DRM_IFACE_SYS24,          /* 24bpp */
-};
-
-struct shmob_drm_backlight_data {
-       const char *name;
-       int max_brightness;
-       int (*get_brightness)(void);
-       int (*set_brightness)(int brightness);
-};
-
 struct shmob_drm_panel_data {
        unsigned int width_mm;          /* Panel width in mm */
        unsigned int height_mm;         /* Panel height in mm */
-       struct drm_mode_modeinfo mode;
+       struct videomode mode;
 };
 
-struct shmob_drm_sys_interface_data {
-       unsigned int read_latch:6;
-       unsigned int read_setup:8;
-       unsigned int read_cycle:8;
-       unsigned int read_strobe:8;
-       unsigned int write_setup:8;
-       unsigned int write_cycle:8;
-       unsigned int write_strobe:8;
-       unsigned int cs_setup:3;
-       unsigned int vsync_active_high:1;
-       unsigned int vsync_dir_input:1;
-};
-
-#define SHMOB_DRM_IFACE_FL_DWPOL (1 << 0) /* Rising edge dot clock data latch */
-#define SHMOB_DRM_IFACE_FL_DIPOL (1 << 1) /* Active low display enable */
-#define SHMOB_DRM_IFACE_FL_DAPOL (1 << 2) /* Active low display data */
-#define SHMOB_DRM_IFACE_FL_HSCNT (1 << 3) /* Disable HSYNC during VBLANK */
-#define SHMOB_DRM_IFACE_FL_DWCNT (1 << 4) /* Disable dotclock during blanking */
-
 struct shmob_drm_interface_data {
-       enum shmob_drm_interface interface;
-       struct shmob_drm_sys_interface_data sys;
+       unsigned int bus_fmt;           /* MEDIA_BUS_FMT_* */
        unsigned int clk_div;
-       unsigned int flags;
 };
 
 struct shmob_drm_platform_data {
        enum shmob_drm_clk_source clk_source;
        struct shmob_drm_interface_data iface;
        struct shmob_drm_panel_data panel;
-       struct shmob_drm_backlight_data backlight;
 };
 
 #endif /* __SHMOB_DRM_H__ */
index d2f9f690a9c1452d189c5f638a61a02300d1200c..cda3597b84f2c32fbe567c04b3c4114311bed02a 100644 (file)
@@ -41,8 +41,8 @@ struct pwm_args {
 };
 
 enum {
-       PWMF_REQUESTED = 1 << 0,
-       PWMF_EXPORTED = 1 << 1,
+       PWMF_REQUESTED = 0,
+       PWMF_EXPORTED = 1,
 };
 
 /*
@@ -71,7 +71,6 @@ struct pwm_state {
  * @hwpwm: per-chip relative index of the PWM device
  * @pwm: global index of the PWM device
  * @chip: PWM chip providing this PWM device
- * @chip_data: chip-private data associated with the PWM device
  * @args: PWM arguments
  * @state: last applied state
  * @last: last implemented state (for PWM_DEBUG)
@@ -82,7 +81,6 @@ struct pwm_device {
        unsigned int hwpwm;
        unsigned int pwm;
        struct pwm_chip *chip;
-       void *chip_data;
 
        struct pwm_args args;
        struct pwm_state state;
@@ -267,7 +265,6 @@ struct pwm_capture {
  * @get_state: get the current PWM state. This function is only
  *            called once per PWM device when the PWM chip is
  *            registered.
- * @owner: helps prevent removal of modules exporting active PWMs
  */
 struct pwm_ops {
        int (*request)(struct pwm_chip *chip, struct pwm_device *pwm);
@@ -278,13 +275,13 @@ struct pwm_ops {
                     const struct pwm_state *state);
        int (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
                         struct pwm_state *state);
-       struct module *owner;
 };
 
 /**
  * struct pwm_chip - abstract a PWM controller
  * @dev: device providing the PWMs
  * @ops: callbacks for this PWM controller
+ * @owner: module providing this chip
  * @base: number of first PWM controlled by this chip
  * @npwm: number of PWMs controlled by this chip
  * @of_xlate: request a PWM device given a device tree PWM specifier
@@ -295,6 +292,7 @@ struct pwm_ops {
 struct pwm_chip {
        struct device *dev;
        const struct pwm_ops *ops;
+       struct module *owner;
        int base;
        unsigned int npwm;
 
@@ -383,13 +381,13 @@ static inline void pwm_disable(struct pwm_device *pwm)
 /* PWM provider APIs */
 int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result,
                unsigned long timeout);
-int pwm_set_chip_data(struct pwm_device *pwm, void *data);
-void *pwm_get_chip_data(struct pwm_device *pwm);
 
-int pwmchip_add(struct pwm_chip *chip);
+int __pwmchip_add(struct pwm_chip *chip, struct module *owner);
+#define pwmchip_add(chip) __pwmchip_add(chip, THIS_MODULE)
 void pwmchip_remove(struct pwm_chip *chip);
 
-int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip);
+int __devm_pwmchip_add(struct device *dev, struct pwm_chip *chip, struct module *owner);
+#define devm_pwmchip_add(dev, chip) __devm_pwmchip_add(dev, chip, THIS_MODULE)
 
 struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
                                         unsigned int index,
@@ -445,16 +443,6 @@ static inline int pwm_capture(struct pwm_device *pwm,
        return -EINVAL;
 }
 
-static inline int pwm_set_chip_data(struct pwm_device *pwm, void *data)
-{
-       return -EINVAL;
-}
-
-static inline void *pwm_get_chip_data(struct pwm_device *pwm)
-{
-       return NULL;
-}
-
 static inline int pwmchip_add(struct pwm_chip *chip)
 {
        return -EINVAL;
index ff56ab804bf6dbedf9664696d9e25abcbcd3879d..d6d6ffeeb9a2af2f3ff497b48c08e2c19f8d0efb 100644 (file)
@@ -764,6 +764,8 @@ static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t fla
 extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
                      __realloc_size(3);
 extern void kvfree(const void *addr);
+DEFINE_FREE(kvfree, void *, if (_T) kvfree(_T))
+
 extern void kvfree_sensitive(const void *addr, size_t len);
 
 unsigned int kmem_cache_size(struct kmem_cache *s);
index 86825c88b5767a5e6960d89f2a1129189f26d0fd..255a0562aea5a868b4e3bd78ecd2497e8f3f105f 100644 (file)
@@ -566,6 +566,7 @@ struct spi_controller {
 #define SPI_CONTROLLER_MUST_RX         BIT(3)  /* Requires rx */
 #define SPI_CONTROLLER_MUST_TX         BIT(4)  /* Requires tx */
 #define SPI_CONTROLLER_GPIO_SS         BIT(5)  /* GPIO CS must select slave */
+#define SPI_CONTROLLER_SUSPENDED       BIT(6)  /* Currently suspended */
 
        /* Flag indicating if the allocation of this struct is devres-managed */
        bool                    devm_allocated;
index af7358277f1c340ccb5c911fa567b5a813988437..e9d4377d03c6e78f07a8688bb0bd6a53e2bb8fe8 100644 (file)
@@ -92,6 +92,7 @@ struct rpc_clnt {
        };
        const struct cred       *cl_cred;
        unsigned int            cl_max_connect; /* max number of transports not to the same IP */
+       struct super_block *pipefs_sb;
 };
 
 /*
index ec4e9367f5b03be610f5f88621855f3a512604eb..68f3d315d2e18d93a356b0738e4ed855fac94591 100644 (file)
@@ -152,7 +152,7 @@ struct tcp_request_sock {
        u64                             snt_synack; /* first SYNACK sent time */
        bool                            tfo_listener;
        bool                            is_mptcp;
-       s8                              req_usec_ts;
+       bool                            req_usec_ts;
 #if IS_ENABLED(CONFIG_MPTCP)
        bool                            drop_req;
 #endif
diff --git a/include/linux/tsm.h b/include/linux/tsm.h
new file mode 100644 (file)
index 0000000..de8324a
--- /dev/null
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __TSM_H
+#define __TSM_H
+
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#define TSM_INBLOB_MAX 64
+#define TSM_OUTBLOB_MAX SZ_32K
+
+/*
+ * Privilege level is a nested permission concept to allow confidential
+ * guests to partition address space, 4-levels are supported.
+ */
+#define TSM_PRIVLEVEL_MAX 3
+
+/**
+ * struct tsm_desc - option descriptor for generating tsm report blobs
+ * @privlevel: optional privilege level to associate with @outblob
+ * @inblob_len: sizeof @inblob
+ * @inblob: arbitrary input data
+ */
+struct tsm_desc {
+       unsigned int privlevel;
+       size_t inblob_len;
+       u8 inblob[TSM_INBLOB_MAX];
+};
+
+/**
+ * struct tsm_report - track state of report generation relative to options
+ * @desc: input parameters to @report_new()
+ * @outblob_len: sizeof(@outblob)
+ * @outblob: generated evidence to provider to the attestation agent
+ * @auxblob_len: sizeof(@auxblob)
+ * @auxblob: (optional) auxiliary data to the report (e.g. certificate data)
+ */
+struct tsm_report {
+       struct tsm_desc desc;
+       size_t outblob_len;
+       u8 *outblob;
+       size_t auxblob_len;
+       u8 *auxblob;
+};
+
+/**
+ * struct tsm_ops - attributes and operations for tsm instances
+ * @name: tsm id reflected in /sys/kernel/config/tsm/report/$report/provider
+ * @privlevel_floor: convey base privlevel for nested scenarios
+ * @report_new: Populate @report with the report blob and auxblob
+ * (optional), return 0 on successful population, or -errno otherwise
+ *
+ * Implementation specific ops, only one is expected to be registered at
+ * a time i.e. only one of "sev-guest", "tdx-guest", etc.
+ */
+struct tsm_ops {
+       const char *name;
+       const unsigned int privlevel_floor;
+       int (*report_new)(struct tsm_report *report, void *data);
+};
+
+extern const struct config_item_type tsm_report_default_type;
+
+/* publish @privlevel, @privlevel_floor, and @auxblob attributes */
+extern const struct config_item_type tsm_report_extra_type;
+
+int tsm_register(const struct tsm_ops *ops, void *priv,
+                const struct config_item_type *type);
+int tsm_unregister(const struct tsm_ops *ops);
+#endif /* __TSM_H */
index 0e652026b776f81b697f7fa93c3183b67ee46203..db15ac07f8a6a453bfbe35df555eea6efc83e0d8 100644 (file)
@@ -204,6 +204,16 @@ struct vdpa_map_file {
  *                             @vdev: vdpa device
  *                             @idx: virtqueue index
  *                             Returns u32: group id for this virtqueue
+ * @get_vq_desc_group:         Get the group id for the descriptor table of
+ *                             a specific virtqueue (optional)
+ *                             @vdev: vdpa device
+ *                             @idx: virtqueue index
+ *                             Returns u32: group id for the descriptor table
+ *                             portion of this virtqueue. Could be different
+ *                             than the one from @get_vq_group, in which case
+ *                             the access to the descriptor table can be
+ *                             confined to a separate asid, isolating from
+ *                             the virtqueue's buffer address access.
  * @get_device_features:       Get virtio features supported by the device
  *                             @vdev: vdpa device
  *                             Returns the virtio features support by the
@@ -242,6 +252,17 @@ struct vdpa_map_file {
  * @reset:                     Reset device
  *                             @vdev: vdpa device
  *                             Returns integer: success (0) or error (< 0)
+ * @compat_reset:              Reset device with compatibility quirks to
+ *                             accommodate older userspace. Only needed by
+ *                             parent driver which used to have bogus reset
+ *                             behaviour, and has to maintain such behaviour
+ *                             for compatibility with older userspace.
+ *                             Historically compliant driver only has to
+ *                             implement .reset, Historically non-compliant
+ *                             driver should implement both.
+ *                             @vdev: vdpa device
+ *                             @flags: compatibility quirks for reset
+ *                             Returns integer: success (0) or error (< 0)
  * @suspend:                   Suspend the device (optional)
  *                             @vdev: vdpa device
  *                             Returns integer: success (0) or error (< 0)
@@ -317,6 +338,15 @@ struct vdpa_map_file {
  *                             @iova: iova to be unmapped
  *                             @size: size of the area
  *                             Returns integer: success (0) or error (< 0)
+ * @reset_map:                 Reset device memory mapping to the default
+ *                             state (optional)
+ *                             Needed for devices that are using device
+ *                             specific DMA translation and prefer mapping
+ *                             to be decoupled from the virtio life cycle,
+ *                             i.e. device .reset op does not reset mapping
+ *                             @vdev: vdpa device
+ *                             @asid: address space identifier
+ *                             Returns integer: success (0) or error (< 0)
  * @get_vq_dma_dev:            Get the dma device for a specific
  *                             virtqueue (optional)
  *                             @vdev: vdpa device
@@ -360,6 +390,7 @@ struct vdpa_config_ops {
        /* Device ops */
        u32 (*get_vq_align)(struct vdpa_device *vdev);
        u32 (*get_vq_group)(struct vdpa_device *vdev, u16 idx);
+       u32 (*get_vq_desc_group)(struct vdpa_device *vdev, u16 idx);
        u64 (*get_device_features)(struct vdpa_device *vdev);
        u64 (*get_backend_features)(const struct vdpa_device *vdev);
        int (*set_driver_features)(struct vdpa_device *vdev, u64 features);
@@ -373,6 +404,8 @@ struct vdpa_config_ops {
        u8 (*get_status)(struct vdpa_device *vdev);
        void (*set_status)(struct vdpa_device *vdev, u8 status);
        int (*reset)(struct vdpa_device *vdev);
+       int (*compat_reset)(struct vdpa_device *vdev, u32 flags);
+#define VDPA_RESET_F_CLEAN_MAP 1
        int (*suspend)(struct vdpa_device *vdev);
        int (*resume)(struct vdpa_device *vdev);
        size_t (*get_config_size)(struct vdpa_device *vdev);
@@ -394,6 +427,7 @@ struct vdpa_config_ops {
                       u64 iova, u64 size, u64 pa, u32 perm, void *opaque);
        int (*dma_unmap)(struct vdpa_device *vdev, unsigned int asid,
                         u64 iova, u64 size);
+       int (*reset_map)(struct vdpa_device *vdev, unsigned int asid);
        int (*set_group_asid)(struct vdpa_device *vdev, unsigned int group,
                              unsigned int asid);
        struct device *(*get_vq_dma_dev)(struct vdpa_device *vdev, u16 idx);
@@ -485,14 +519,17 @@ static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev)
        return vdev->dma_dev;
 }
 
-static inline int vdpa_reset(struct vdpa_device *vdev)
+static inline int vdpa_reset(struct vdpa_device *vdev, u32 flags)
 {
        const struct vdpa_config_ops *ops = vdev->config;
        int ret;
 
        down_write(&vdev->cf_lock);
        vdev->features_valid = false;
-       ret = ops->reset(vdev);
+       if (ops->compat_reset && flags)
+               ret = ops->compat_reset(vdev, flags);
+       else
+               ret = ops->reset(vdev);
        up_write(&vdev->cf_lock);
        return ret;
 }
index 067ac1d789bcb64917c5cbef40791f4a04b50380..d0f2797420f7044616c7c7ef9faccc956acf5a7c 100644 (file)
@@ -12,37 +12,48 @@ struct virtio_pci_modern_common_cfg {
        __le16 queue_reset;             /* read-write */
 };
 
+/**
+ * struct virtio_pci_modern_device - info for modern PCI virtio
+ * @pci_dev:       Ptr to the PCI device struct
+ * @common:        Position of the common capability in the PCI config
+ * @device:        Device-specific data (non-legacy mode)
+ * @notify_base:    Base of vq notifications (non-legacy mode)
+ * @notify_pa:     Physical base of vq notifications
+ * @isr:           Where to read and clear interrupt
+ * @notify_len:            So we can sanity-check accesses
+ * @device_len:            So we can sanity-check accesses
+ * @notify_map_cap: Capability for when we need to map notifications per-vq
+ * @notify_offset_multiplier: Multiply queue_notify_off by this value
+ *                            (non-legacy mode).
+ * @modern_bars:    Bitmask of BARs
+ * @id:                    Device and vendor id
+ * @device_id_check: Callback defined before vp_modern_probe() to be used to
+ *                 verify the PCI device is a vendor's expected device rather
+ *                 than the standard virtio PCI device
+ *                 Returns the found device id or ERRNO
+ * @dma_mask:      Optional mask instead of the traditional DMA_BIT_MASK(64),
+ *                 for vendor devices with DMA space address limitations
+ */
 struct virtio_pci_modern_device {
        struct pci_dev *pci_dev;
 
        struct virtio_pci_common_cfg __iomem *common;
-       /* Device-specific data (non-legacy mode)  */
        void __iomem *device;
-       /* Base of vq notifications (non-legacy mode). */
        void __iomem *notify_base;
-       /* Physical base of vq notifications */
        resource_size_t notify_pa;
-       /* Where to read and clear interrupt */
        u8 __iomem *isr;
 
-       /* So we can sanity-check accesses. */
        size_t notify_len;
        size_t device_len;
+       size_t common_len;
 
-       /* Capability for when we need to map notifications per-vq. */
        int notify_map_cap;
 
-       /* Multiply queue_notify_off by this value. (non-legacy mode). */
        u32 notify_offset_multiplier;
-
        int modern_bars;
-
        struct virtio_device_id id;
 
-       /* optional check for vendor virtio device, returns dev_id or -ERRNO */
        int (*device_id_check)(struct pci_dev *pdev);
-
-       /* optional mask for devices with limited DMA space */
        u64 dma_mask;
 };
 
diff --git a/include/linux/vlynq.h b/include/linux/vlynq.h
deleted file mode 100644 (file)
index e9c0cd3..0000000
+++ /dev/null
@@ -1,149 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2006, 2007 Eugene Konev <ejka@openwrt.org>
- */
-
-#ifndef __VLYNQ_H__
-#define __VLYNQ_H__
-
-#include <linux/device.h>
-#include <linux/types.h>
-
-struct module;
-
-#define VLYNQ_NUM_IRQS 32
-
-struct vlynq_mapping {
-       u32 size;
-       u32 offset;
-};
-
-enum vlynq_divisor {
-       vlynq_div_auto = 0,
-       vlynq_ldiv1,
-       vlynq_ldiv2,
-       vlynq_ldiv3,
-       vlynq_ldiv4,
-       vlynq_ldiv5,
-       vlynq_ldiv6,
-       vlynq_ldiv7,
-       vlynq_ldiv8,
-       vlynq_rdiv1,
-       vlynq_rdiv2,
-       vlynq_rdiv3,
-       vlynq_rdiv4,
-       vlynq_rdiv5,
-       vlynq_rdiv6,
-       vlynq_rdiv7,
-       vlynq_rdiv8,
-       vlynq_div_external
-};
-
-struct vlynq_device_id {
-       u32 id;
-       enum vlynq_divisor divisor;
-       unsigned long driver_data;
-};
-
-struct vlynq_regs;
-struct vlynq_device {
-       u32 id, dev_id;
-       int local_irq;
-       int remote_irq;
-       enum vlynq_divisor divisor;
-       u32 regs_start, regs_end;
-       u32 mem_start, mem_end;
-       u32 irq_start, irq_end;
-       int irq;
-       int enabled;
-       struct vlynq_regs *local;
-       struct vlynq_regs *remote;
-       struct device dev;
-};
-
-struct vlynq_driver {
-       char *name;
-       struct vlynq_device_id *id_table;
-       int (*probe)(struct vlynq_device *dev, struct vlynq_device_id *id);
-       void (*remove)(struct vlynq_device *dev);
-       struct device_driver driver;
-};
-
-struct plat_vlynq_ops {
-       int (*on)(struct vlynq_device *dev);
-       void (*off)(struct vlynq_device *dev);
-};
-
-static inline struct vlynq_driver *to_vlynq_driver(struct device_driver *drv)
-{
-       return container_of(drv, struct vlynq_driver, driver);
-}
-
-static inline struct vlynq_device *to_vlynq_device(struct device *device)
-{
-       return container_of(device, struct vlynq_device, dev);
-}
-
-extern struct bus_type vlynq_bus_type;
-
-extern int __vlynq_register_driver(struct vlynq_driver *driver,
-                                  struct module *owner);
-
-static inline int vlynq_register_driver(struct vlynq_driver *driver)
-{
-       return __vlynq_register_driver(driver, THIS_MODULE);
-}
-
-static inline void *vlynq_get_drvdata(struct vlynq_device *dev)
-{
-       return dev_get_drvdata(&dev->dev);
-}
-
-static inline void vlynq_set_drvdata(struct vlynq_device *dev, void *data)
-{
-       dev_set_drvdata(&dev->dev, data);
-}
-
-static inline u32 vlynq_mem_start(struct vlynq_device *dev)
-{
-       return dev->mem_start;
-}
-
-static inline u32 vlynq_mem_end(struct vlynq_device *dev)
-{
-       return dev->mem_end;
-}
-
-static inline u32 vlynq_mem_len(struct vlynq_device *dev)
-{
-       return dev->mem_end - dev->mem_start + 1;
-}
-
-static inline int vlynq_virq_to_irq(struct vlynq_device *dev, int virq)
-{
-       int irq = dev->irq_start + virq;
-       if ((irq < dev->irq_start) || (irq > dev->irq_end))
-               return -EINVAL;
-
-       return irq;
-}
-
-static inline int vlynq_irq_to_virq(struct vlynq_device *dev, int irq)
-{
-       if ((irq < dev->irq_start) || (irq > dev->irq_end))
-               return -EINVAL;
-
-       return irq - dev->irq_start;
-}
-
-extern void vlynq_unregister_driver(struct vlynq_driver *driver);
-extern int vlynq_enable_device(struct vlynq_device *dev);
-extern void vlynq_disable_device(struct vlynq_device *dev);
-extern int vlynq_set_local_mapping(struct vlynq_device *dev, u32 tx_offset,
-                                  struct vlynq_mapping *mapping);
-extern int vlynq_set_remote_mapping(struct vlynq_device *dev, u32 tx_offset,
-                                   struct vlynq_mapping *mapping);
-extern int vlynq_set_local_irq(struct vlynq_device *dev, int virq);
-extern int vlynq_set_remote_irq(struct vlynq_device *dev, int virq);
-
-#endif /* __VLYNQ_H__ */
index 9c007f83569aafb504a97a7ab834525185307c9b..53e4b2eb2b260941a487ff8da208e57203ae234a 100644 (file)
@@ -275,7 +275,7 @@ struct cec_adapter {
 
        u32 sequence;
 
-       char input_phys[32];
+       char input_phys[40];
 };
 
 static inline void *cec_get_drvdata(const struct cec_adapter *adap)
index bdc654a455216b8ee367574d40624a115a9e782a..783bda6d5cc3fddec776b095431fcc3028677847 100644 (file)
@@ -108,7 +108,7 @@ struct ipu_node_names {
        char ivsc_sensor_port[7];
        char ivsc_ipu_port[7];
        char endpoint[11];
-       char remote_port[7];
+       char remote_port[9];
        char vcm[16];
 };
 
index c3d8f12234b1fb73ced5ac5c0adcc9f1a3328e0d..40fc0264250d779ab5dfa7d2fe16e6f1382c07d4 100644 (file)
@@ -19,6 +19,7 @@
 #define MIPI_CSI2_DT_NULL              0x10
 #define MIPI_CSI2_DT_BLANKING          0x11
 #define MIPI_CSI2_DT_EMBEDDED_8B       0x12
+#define MIPI_CSI2_DT_GENERIC_LONG(n)   (0x13 + (n) - 1)        /* 1..4 */
 #define MIPI_CSI2_DT_YUV420_8B         0x18
 #define MIPI_CSI2_DT_YUV420_10B                0x19
 #define MIPI_CSI2_DT_YUV420_8B_LEGACY  0x1a
index e0a13505f88da69770b5f514d56ff6810056fb07..d82dfdbf6e5838851637ad4be41ba0100905af8a 100644 (file)
@@ -284,7 +284,7 @@ struct video_device {
        struct v4l2_prio_state *prio;
 
        /* device info */
-       char name[32];
+       char name[64];
        enum vfl_devnode_type vfl_type;
        enum vfl_devnode_direction vfl_dir;
        int minor;
index 8a8977a33ec107399771b17600b9bc47bd9a9ddd..f6f111fae33c0d528e2154d48cdab7fb3836e2a3 100644 (file)
@@ -13,8 +13,6 @@
 #include <media/v4l2-subdev.h>
 #include <media/v4l2-dev.h>
 
-#define V4L2_DEVICE_NAME_SIZE (20 + 16)
-
 struct v4l2_ctrl_handler;
 
 /**
@@ -49,7 +47,7 @@ struct v4l2_device {
        struct media_device *mdev;
        struct list_head subdevs;
        spinlock_t lock;
-       char name[V4L2_DEVICE_NAME_SIZE];
+       char name[36];
        void (*notify)(struct v4l2_subdev *sd,
                        unsigned int notification, void *arg);
        struct v4l2_ctrl_handler *ctrl_handler;
index 4ffa914ade3a16b18bd3572b3461a4e1632bc90e..3a0e2588361cf451b3dbfc63dfab99550d13cf6a 100644 (file)
@@ -78,7 +78,7 @@ struct v4l2_subscribed_event {
        unsigned int            elems;
        unsigned int            first;
        unsigned int            in_use;
-       struct v4l2_kevent      events[];
+       struct v4l2_kevent      events[] __counted_by(elems);
 };
 
 /**
index b39586dfba358aab6a5558b31e0be185b55cda8f..ed0a44b6eadae8daf6c14f99bf0f6905152a6ec5 100644 (file)
@@ -143,6 +143,9 @@ int v4l2_create_fwnode_links(struct v4l2_subdev *src_sd,
  * v4l2_pipeline_pm_get - Increase the use count of a pipeline
  * @entity: The root entity of a pipeline
  *
+ * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM
+ * ON SUB-DEVICE DRIVERS INSTEAD.
+ *
  * Update the use count of all entities in the pipeline and power entities on.
  *
  * This function is intended to be called in video node open. It uses
@@ -157,6 +160,9 @@ int v4l2_pipeline_pm_get(struct media_entity *entity);
  * v4l2_pipeline_pm_put - Decrease the use count of a pipeline
  * @entity: The root entity of a pipeline
  *
+ * THIS FUNCTION IS DEPRECATED. DO NOT USE IN NEW DRIVERS. USE RUNTIME PM
+ * ON SUB-DEVICE DRIVERS INSTEAD.
+ *
  * Update the use count of all entities in the pipeline and power entities off.
  *
  * This function is intended to be called in video node release. It uses
index d9fca929c10b531768342860efa71bee4d5bf4dd..c1f90c1223a749ec8fa8959cd64d105b4566e129 100644 (file)
@@ -446,7 +446,9 @@ enum v4l2_subdev_pre_streamon_flags {
  * @s_stream: start (enabled == 1) or stop (enabled == 0) streaming on the
  *     sub-device. Failure on stop will remove any resources acquired in
  *     streaming start, while the error code is still returned by the driver.
- *     Also see call_s_stream wrapper in v4l2-subdev.c.
+ *     The caller shall track the subdev state, and shall not start or stop an
+ *     already started or stopped subdev. Also see call_s_stream wrapper in
+ *     v4l2-subdev.c.
  *
  * @g_pixelaspect: callback to return the pixelaspect ratio.
  *
@@ -822,8 +824,9 @@ struct v4l2_subdev_state {
  *                  operation shall fail if the pad index it has been called on
  *                  is not valid or in case of unrecoverable failures.
  *
- * @set_routing: enable or disable data connection routes described in the
- *              subdevice routing table.
+ * @set_routing: Enable or disable data connection routes described in the
+ *              subdevice routing table. Subdevs that implement this operation
+ *              must set the V4L2_SUBDEV_FL_STREAMS flag.
  *
  * @enable_streams: Enable the streams defined in streams_mask on the given
  *     source pad. Subdevs that implement this operation must use the active
@@ -948,8 +951,6 @@ struct v4l2_subdev_internal_ops {
        void (*release)(struct v4l2_subdev *sd);
 };
 
-#define V4L2_SUBDEV_NAME_SIZE 32
-
 /* Set this flag if this subdev is a i2c device. */
 #define V4L2_SUBDEV_FL_IS_I2C                  (1U << 0)
 /* Set this flag if this subdev is a spi device. */
@@ -1059,7 +1060,7 @@ struct v4l2_subdev {
        const struct v4l2_subdev_ops *ops;
        const struct v4l2_subdev_internal_ops *internal_ops;
        struct v4l2_ctrl_handler *ctrl_handler;
-       char name[V4L2_SUBDEV_NAME_SIZE];
+       char name[52];
        u32 grp_id;
        void *dev_priv;
        void *host_priv;
diff --git a/include/media/videobuf-core.h b/include/media/videobuf-core.h
deleted file mode 100644 (file)
index 2e01b2e..0000000
+++ /dev/null
@@ -1,233 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * generic helper functions for handling video4linux capture buffers
- *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
- *
- * Highly based on video-buf written originally by:
- * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
- * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
- * (c) 2006 Ted Walther and John Sokol
- */
-
-#ifndef _VIDEOBUF_CORE_H
-#define _VIDEOBUF_CORE_H
-
-#include <linux/poll.h>
-#include <linux/videodev2.h>
-
-#define UNSET (-1U)
-
-
-struct videobuf_buffer;
-struct videobuf_queue;
-
-/* --------------------------------------------------------------------- */
-
-/*
- * A small set of helper functions to manage video4linux buffers.
- *
- * struct videobuf_buffer holds the data structures used by the helper
- * functions, additionally some commonly used fields for v4l buffers
- * (width, height, lists, waitqueue) are in there.  That struct should
- * be used as first element in the drivers buffer struct.
- *
- * about the mmap helpers (videobuf_mmap_*):
- *
- * The mmaper function allows to map any subset of contiguous buffers.
- * This includes one mmap() call for all buffers (which the original
- * video4linux API uses) as well as one mmap() for every single buffer
- * (which v4l2 uses).
- *
- * If there is a valid mapping for a buffer, buffer->baddr/bsize holds
- * userspace address + size which can be fed into the
- * videobuf_dma_init_user function listed above.
- *
- */
-
-struct videobuf_mapping {
-       unsigned int count;
-       struct videobuf_queue *q;
-};
-
-enum videobuf_state {
-       VIDEOBUF_NEEDS_INIT = 0,
-       VIDEOBUF_PREPARED   = 1,
-       VIDEOBUF_QUEUED     = 2,
-       VIDEOBUF_ACTIVE     = 3,
-       VIDEOBUF_DONE       = 4,
-       VIDEOBUF_ERROR      = 5,
-       VIDEOBUF_IDLE       = 6,
-};
-
-struct videobuf_buffer {
-       unsigned int            i;
-       u32                     magic;
-
-       /* info about the buffer */
-       unsigned int            width;
-       unsigned int            height;
-       unsigned int            bytesperline; /* use only if != 0 */
-       unsigned long           size;
-       enum v4l2_field         field;
-       enum videobuf_state     state;
-       struct list_head        stream;  /* QBUF/DQBUF list */
-
-       /* touched by irq handler */
-       struct list_head        queue;
-       wait_queue_head_t       done;
-       unsigned int            field_count;
-       u64                     ts;
-
-       /* Memory type */
-       enum v4l2_memory        memory;
-
-       /* buffer size */
-       size_t                  bsize;
-
-       /* buffer offset (mmap + overlay) */
-       size_t                  boff;
-
-       /* buffer addr (userland ptr!) */
-       unsigned long           baddr;
-
-       /* for mmap'ed buffers */
-       struct videobuf_mapping *map;
-
-       /* Private pointer to allow specific methods to store their data */
-       int                     privsize;
-       void                    *priv;
-};
-
-struct videobuf_queue_ops {
-       int (*buf_setup)(struct videobuf_queue *q,
-                        unsigned int *count, unsigned int *size);
-       int (*buf_prepare)(struct videobuf_queue *q,
-                          struct videobuf_buffer *vb,
-                          enum v4l2_field field);
-       void (*buf_queue)(struct videobuf_queue *q,
-                         struct videobuf_buffer *vb);
-       void (*buf_release)(struct videobuf_queue *q,
-                           struct videobuf_buffer *vb);
-};
-
-#define MAGIC_QTYPE_OPS        0x12261003
-
-/* Helper operations - device type dependent */
-struct videobuf_qtype_ops {
-       u32                     magic;
-
-       struct videobuf_buffer *(*alloc_vb)(size_t size);
-       void *(*vaddr)          (struct videobuf_buffer *buf);
-       int (*iolock)           (struct videobuf_queue *q,
-                                struct videobuf_buffer *vb,
-                                struct v4l2_framebuffer *fbuf);
-       int (*sync)             (struct videobuf_queue *q,
-                                struct videobuf_buffer *buf);
-       int (*mmap_mapper)      (struct videobuf_queue *q,
-                                struct videobuf_buffer *buf,
-                                struct vm_area_struct *vma);
-};
-
-struct videobuf_queue {
-       struct mutex               vb_lock;
-       struct mutex               *ext_lock;
-       spinlock_t                 *irqlock;
-       struct device              *dev;
-
-       wait_queue_head_t          wait; /* wait if queue is empty */
-
-       enum v4l2_buf_type         type;
-       unsigned int               msize;
-       enum v4l2_field            field;
-       enum v4l2_field            last;   /* for field=V4L2_FIELD_ALTERNATE */
-       struct videobuf_buffer     *bufs[VIDEO_MAX_FRAME];
-       const struct videobuf_queue_ops  *ops;
-       struct videobuf_qtype_ops  *int_ops;
-
-       unsigned int               streaming:1;
-       unsigned int               reading:1;
-
-       /* capture via mmap() + ioctl(QBUF/DQBUF) */
-       struct list_head           stream;
-
-       /* capture via read() */
-       unsigned int               read_off;
-       struct videobuf_buffer     *read_buf;
-
-       /* driver private data */
-       void                       *priv_data;
-};
-
-static inline void videobuf_queue_lock(struct videobuf_queue *q)
-{
-       if (!q->ext_lock)
-               mutex_lock(&q->vb_lock);
-}
-
-static inline void videobuf_queue_unlock(struct videobuf_queue *q)
-{
-       if (!q->ext_lock)
-               mutex_unlock(&q->vb_lock);
-}
-
-int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
-               int non_blocking, int intr);
-int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
-               struct v4l2_framebuffer *fbuf);
-
-struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q);
-
-/* Used on videobuf-dvb */
-void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
-                             struct videobuf_buffer *buf);
-
-void videobuf_queue_core_init(struct videobuf_queue *q,
-                        const struct videobuf_queue_ops *ops,
-                        struct device *dev,
-                        spinlock_t *irqlock,
-                        enum v4l2_buf_type type,
-                        enum v4l2_field field,
-                        unsigned int msize,
-                        void *priv,
-                        struct videobuf_qtype_ops *int_ops,
-                        struct mutex *ext_lock);
-int  videobuf_queue_is_busy(struct videobuf_queue *q);
-void videobuf_queue_cancel(struct videobuf_queue *q);
-
-enum v4l2_field videobuf_next_field(struct videobuf_queue *q);
-int videobuf_reqbufs(struct videobuf_queue *q,
-                    struct v4l2_requestbuffers *req);
-int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b);
-int videobuf_qbuf(struct videobuf_queue *q,
-                 struct v4l2_buffer *b);
-int videobuf_dqbuf(struct videobuf_queue *q,
-                  struct v4l2_buffer *b, int nonblocking);
-int videobuf_streamon(struct videobuf_queue *q);
-int videobuf_streamoff(struct videobuf_queue *q);
-
-void videobuf_stop(struct videobuf_queue *q);
-
-int videobuf_read_start(struct videobuf_queue *q);
-void videobuf_read_stop(struct videobuf_queue *q);
-ssize_t videobuf_read_stream(struct videobuf_queue *q,
-                            char __user *data, size_t count, loff_t *ppos,
-                            int vbihack, int nonblocking);
-ssize_t videobuf_read_one(struct videobuf_queue *q,
-                         char __user *data, size_t count, loff_t *ppos,
-                         int nonblocking);
-__poll_t videobuf_poll_stream(struct file *file,
-                                 struct videobuf_queue *q,
-                                 poll_table *wait);
-
-int videobuf_mmap_setup(struct videobuf_queue *q,
-                       unsigned int bcount, unsigned int bsize,
-                       enum v4l2_memory memory);
-int __videobuf_mmap_setup(struct videobuf_queue *q,
-                       unsigned int bcount, unsigned int bsize,
-                       enum v4l2_memory memory);
-int videobuf_mmap_free(struct videobuf_queue *q);
-int videobuf_mmap_mapper(struct videobuf_queue *q,
-                        struct vm_area_struct *vma);
-
-#endif
diff --git a/include/media/videobuf-dma-contig.h b/include/media/videobuf-dma-contig.h
deleted file mode 100644 (file)
index 525883b..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * helper functions for physically contiguous capture buffers
- *
- * The functions support hardware lacking scatter gather support
- * (i.e. the buffers must be linear in physical memory)
- *
- * Copyright (c) 2008 Magnus Damm
- */
-#ifndef _VIDEOBUF_DMA_CONTIG_H
-#define _VIDEOBUF_DMA_CONTIG_H
-
-#include <linux/dma-mapping.h>
-#include <media/videobuf-core.h>
-
-void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
-                                   const struct videobuf_queue_ops *ops,
-                                   struct device *dev,
-                                   spinlock_t *irqlock,
-                                   enum v4l2_buf_type type,
-                                   enum v4l2_field field,
-                                   unsigned int msize,
-                                   void *priv,
-                                   struct mutex *ext_lock);
-
-dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf);
-void videobuf_dma_contig_free(struct videobuf_queue *q,
-                             struct videobuf_buffer *buf);
-
-#endif /* _VIDEOBUF_DMA_CONTIG_H */
diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h
deleted file mode 100644 (file)
index 930ff8d..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * helper functions for SG DMA video4linux capture buffers
- *
- * The functions expect the hardware being able to scatter gather
- * (i.e. the buffers are not linear in physical memory, but fragmented
- * into PAGE_SIZE chunks).  They also assume the driver does not need
- * to touch the video data.
- *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
- *
- * Highly based on video-buf written originally by:
- * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
- * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
- * (c) 2006 Ted Walther and John Sokol
- */
-#ifndef _VIDEOBUF_DMA_SG_H
-#define _VIDEOBUF_DMA_SG_H
-
-#include <media/videobuf-core.h>
-
-/* --------------------------------------------------------------------- */
-
-/*
- * A small set of helper functions to manage buffers (both userland
- * and kernel) for DMA.
- *
- * videobuf_dma_init_*()
- *     creates a buffer.  The userland version takes a userspace
- *     pointer + length.  The kernel version just wants the size and
- *     does memory allocation too using vmalloc_32().
- *
- * videobuf_dma_*()
- *     see Documentation/core-api/dma-api-howto.rst, these functions to
- *     basically the same.  The map function does also build a
- *     scatterlist for the buffer (and unmap frees it ...)
- *
- * videobuf_dma_free()
- *     no comment ...
- *
- */
-
-struct videobuf_dmabuf {
-       u32                 magic;
-
-       /* for userland buffer */
-       int                 offset;
-       size_t              size;
-       struct page         **pages;
-
-       /* for kernel buffers */
-       void                *vaddr;
-       struct page         **vaddr_pages;
-       dma_addr_t          *dma_addr;
-       struct device       *dev;
-
-       /* for overlay buffers (pci-pci dma) */
-       dma_addr_t          bus_addr;
-
-       /* common */
-       struct scatterlist  *sglist;
-       int                 sglen;
-       unsigned long       nr_pages;
-       int                 direction;
-};
-
-struct videobuf_dma_sg_memory {
-       u32                 magic;
-
-       /* for mmap'ed buffers */
-       struct videobuf_dmabuf  dma;
-};
-
-/*
- * Scatter-gather DMA buffer API.
- *
- * These functions provide a simple way to create a page list and a
- * scatter-gather list from a kernel, userspace of physical address and map the
- * memory for DMA operation.
- *
- * Despite the name, this is totally unrelated to videobuf, except that
- * videobuf-dma-sg uses the same API internally.
- */
-int videobuf_dma_free(struct videobuf_dmabuf *dma);
-
-int videobuf_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma);
-struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf);
-
-void *videobuf_sg_alloc(size_t size);
-
-void videobuf_queue_sg_init(struct videobuf_queue *q,
-                        const struct videobuf_queue_ops *ops,
-                        struct device *dev,
-                        spinlock_t *irqlock,
-                        enum v4l2_buf_type type,
-                        enum v4l2_field field,
-                        unsigned int msize,
-                        void *priv,
-                        struct mutex *ext_lock);
-
-#endif /* _VIDEOBUF_DMA_SG_H */
-
diff --git a/include/media/videobuf-vmalloc.h b/include/media/videobuf-vmalloc.h
deleted file mode 100644 (file)
index e930dbb..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * helper functions for vmalloc capture buffers
- *
- * The functions expect the hardware being able to scatter gather
- * (i.e. the buffers are not linear in physical memory, but fragmented
- * into PAGE_SIZE chunks).  They also assume the driver does not need
- * to touch the video data.
- *
- * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
- */
-#ifndef _VIDEOBUF_VMALLOC_H
-#define _VIDEOBUF_VMALLOC_H
-
-#include <media/videobuf-core.h>
-
-/* --------------------------------------------------------------------- */
-
-struct videobuf_vmalloc_memory {
-       u32                 magic;
-
-       void                *vaddr;
-
-       /* remap_vmalloc_range seems to need to run
-        * after mmap() on some cases */
-       struct vm_area_struct *vma;
-};
-
-void videobuf_queue_vmalloc_init(struct videobuf_queue *q,
-                        const struct videobuf_queue_ops *ops,
-                        struct device *dev,
-                        spinlock_t *irqlock,
-                        enum v4l2_buf_type type,
-                        enum v4l2_field field,
-                        unsigned int msize,
-                        void *priv,
-                        struct mutex *ext_lock);
-
-void *videobuf_to_vmalloc(struct videobuf_buffer *buf);
-
-void videobuf_vmalloc_free(struct videobuf_buffer *buf);
-
-#endif
index 7f0adda3bf2fed28c53352d9021574b5eade2738..335bbc52171c10eb4b4b7e03a18eb8147902c6ac 100644 (file)
@@ -40,8 +40,8 @@ struct flowi_common {
 #define FLOWI_FLAG_KNOWN_NH            0x02
        __u32   flowic_secid;
        kuid_t  flowic_uid;
-       struct flowi_tunnel flowic_tun_key;
        __u32           flowic_multipath_hash;
+       struct flowi_tunnel flowic_tun_key;
 };
 
 union flowi_uli {
index 078d3c52c03f982cb5d6c4469b3ac0a73656329f..e5f2f0b73a9a0dde838c59f6f84dff0cbdb97e79 100644 (file)
@@ -20,7 +20,22 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_find(const struct nf
 #endif
 }
 
-static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *ct)
+static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
+                                          enum ip_conntrack_info ctinfo)
+{
+#if IS_ENABLED(CONFIG_NET_ACT_CT)
+       struct nf_conn_act_ct_ext *act_ct_ext;
+
+       act_ct_ext = nf_conn_act_ct_ext_find(ct);
+       if (dev_net(skb->dev) == &init_net && act_ct_ext)
+               act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
+#endif
+}
+
+static inline struct
+nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct sk_buff *skb,
+                                          struct nf_conn *ct,
+                                          enum ip_conntrack_info ctinfo)
 {
 #if IS_ENABLED(CONFIG_NET_ACT_CT)
        struct nf_conn_act_ct_ext *act_ct = nf_ct_ext_find(ct, NF_CT_EXT_ACT_CT);
@@ -29,22 +44,11 @@ static inline struct nf_conn_act_ct_ext *nf_conn_act_ct_ext_add(struct nf_conn *
                return act_ct;
 
        act_ct = nf_ct_ext_add(ct, NF_CT_EXT_ACT_CT, GFP_ATOMIC);
+       nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
        return act_ct;
 #else
        return NULL;
 #endif
 }
 
-static inline void nf_conn_act_ct_ext_fill(struct sk_buff *skb, struct nf_conn *ct,
-                                          enum ip_conntrack_info ctinfo)
-{
-#if IS_ENABLED(CONFIG_NET_ACT_CT)
-       struct nf_conn_act_ct_ext *act_ct_ext;
-
-       act_ct_ext = nf_conn_act_ct_ext_find(ct);
-       if (dev_net(skb->dev) == &init_net && act_ct_ext)
-               act_ct_ext->ifindex[CTINFO2DIR(ctinfo)] = skb->dev->ifindex;
-#endif
-}
-
 #endif /* _NF_CONNTRACK_ACT_CT_H */
index a375a171ef3cb37ab1d8246c72c6a3e83f5c9184..b56be10838f09a2cb56ab511242d2b583eb4c33b 100644 (file)
@@ -124,7 +124,7 @@ struct tcp_ao_info {
 #define tcp_hash_fail(msg, family, skb, fmt, ...)                      \
 do {                                                                   \
        const struct tcphdr *th = tcp_hdr(skb);                         \
-       char hdr_flags[5] = {};                                         \
+       char hdr_flags[6];                                              \
        char *f = hdr_flags;                                            \
                                                                        \
        if (th->fin)                                                    \
@@ -133,17 +133,18 @@ do {                                                                      \
                *f++ = 'S';                                             \
        if (th->rst)                                                    \
                *f++ = 'R';                                             \
+       if (th->psh)                                                    \
+               *f++ = 'P';                                             \
        if (th->ack)                                                    \
-               *f++ = 'A';                                             \
-       if (f != hdr_flags)                                             \
-               *f = ' ';                                               \
+               *f++ = '.';                                             \
+       *f = 0;                                                         \
        if ((family) == AF_INET) {                                      \
-               net_info_ratelimited("%s for (%pI4, %d)->(%pI4, %d) %s" fmt "\n", \
+               net_info_ratelimited("%s for %pI4.%d->%pI4.%d [%s] " fmt "\n", \
                                msg, &ip_hdr(skb)->saddr, ntohs(th->source), \
                                &ip_hdr(skb)->daddr, ntohs(th->dest),   \
                                hdr_flags, ##__VA_ARGS__);              \
        } else {                                                        \
-               net_info_ratelimited("%s for [%pI6c]:%u->[%pI6c]:%u %s" fmt "\n", \
+               net_info_ratelimited("%s for [%pI6c].%d->[%pI6c].%d [%s]" fmt "\n", \
                                msg, &ipv6_hdr(skb)->saddr, ntohs(th->source), \
                                &ipv6_hdr(skb)->daddr, ntohs(th->dest), \
                                hdr_flags, ##__VA_ARGS__);              \
index a5ef84944a0680690755599ae14469d9111513fa..71ae37d3bedd76e38297be9bad03c26e896ddbba 100644 (file)
@@ -96,7 +96,6 @@ struct tegra_smmu_soc {
 
 struct tegra_mc;
 struct tegra_smmu;
-struct gart_device;
 
 #ifdef CONFIG_TEGRA_IOMMU_SMMU
 struct tegra_smmu *tegra_smmu_probe(struct device *dev,
@@ -116,28 +115,6 @@ static inline void tegra_smmu_remove(struct tegra_smmu *smmu)
 }
 #endif
 
-#ifdef CONFIG_TEGRA_IOMMU_GART
-struct gart_device *tegra_gart_probe(struct device *dev, struct tegra_mc *mc);
-int tegra_gart_suspend(struct gart_device *gart);
-int tegra_gart_resume(struct gart_device *gart);
-#else
-static inline struct gart_device *
-tegra_gart_probe(struct device *dev, struct tegra_mc *mc)
-{
-       return ERR_PTR(-ENODEV);
-}
-
-static inline int tegra_gart_suspend(struct gart_device *gart)
-{
-       return -ENODEV;
-}
-
-static inline int tegra_gart_resume(struct gart_device *gart)
-{
-       return -ENODEV;
-}
-#endif
-
 struct tegra_mc_reset {
        const char *name;
        unsigned long id;
@@ -185,8 +162,6 @@ struct tegra_mc_ops {
         */
        int (*probe)(struct tegra_mc *mc);
        void (*remove)(struct tegra_mc *mc);
-       int (*suspend)(struct tegra_mc *mc);
-       int (*resume)(struct tegra_mc *mc);
        irqreturn_t (*handle_irq)(int irq, void *data);
        int (*probe_device)(struct tegra_mc *mc, struct device *dev);
 };
@@ -225,7 +200,6 @@ struct tegra_mc {
        struct tegra_bpmp *bpmp;
        struct device *dev;
        struct tegra_smmu *smmu;
-       struct gart_device *gart;
        void __iomem *regs;
        void __iomem *bcast_ch_regs;
        void __iomem **ch_regs;
index a03c543cb072de303e88740679c3f0aa44a13017..f05f747e444d668600397651bd66105f6ed08718 100644 (file)
@@ -34,7 +34,7 @@
 
 #define MEDIA_BUS_FMT_FIXED                    0x0001
 
-/* RGB - next is       0x1025 */
+/* RGB - next is       0x1026 */
 #define MEDIA_BUS_FMT_RGB444_1X12              0x1016
 #define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE      0x1001
 #define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE      0x1002
@@ -46,6 +46,7 @@
 #define MEDIA_BUS_FMT_RGB565_2X8_BE            0x1007
 #define MEDIA_BUS_FMT_RGB565_2X8_LE            0x1008
 #define MEDIA_BUS_FMT_RGB666_1X18              0x1009
+#define MEDIA_BUS_FMT_RGB666_2X9_BE            0x1025
 #define MEDIA_BUS_FMT_BGR666_1X18              0x1023
 #define MEDIA_BUS_FMT_RBG888_1X24              0x100e
 #define MEDIA_BUS_FMT_RGB666_1X24_CPADHI       0x1015
index c8ae72466ee64482b4a0f461ddf0857a825a4ebb..3cd044edee5d894cad66c02a9a0970054ee9a884 100644 (file)
@@ -3,8 +3,8 @@
 /*     Documentation/netlink/specs/nfsd.yaml */
 /* YNL-GEN uapi header */
 
-#ifndef _UAPI_LINUX_NFSD_H
-#define _UAPI_LINUX_NFSD_H
+#ifndef _UAPI_LINUX_NFSD_NETLINK_H
+#define _UAPI_LINUX_NFSD_NETLINK_H
 
 #define NFSD_FAMILY_NAME       "nfsd"
 #define NFSD_FAMILY_VERSION    1
@@ -36,4 +36,4 @@ enum {
        NFSD_CMD_MAX = (__NFSD_CMD_MAX - 1)
 };
 
-#endif /* _UAPI_LINUX_NFSD_H */
+#endif /* _UAPI_LINUX_NFSD_NETLINK_H */
diff --git a/include/uapi/linux/npcm-video.h b/include/uapi/linux/npcm-video.h
new file mode 100644 (file)
index 0000000..1d39f6f
--- /dev/null
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Controls header for NPCM video driver
+ *
+ * Copyright (C) 2022 Nuvoton Technologies
+ */
+
+#ifndef _UAPI_LINUX_NPCM_VIDEO_H
+#define _UAPI_LINUX_NPCM_VIDEO_H
+
+#include <linux/v4l2-controls.h>
+
+/*
+ * Check Documentation/userspace-api/media/drivers/npcm-video.rst for control
+ * details.
+ */
+
+/*
+ * This control is meant to set the mode of NPCM Video Capture/Differentiation
+ * (VCD) engine.
+ *
+ * The VCD engine supports two modes:
+ * COMPLETE - Capture the next complete frame into memory.
+ * DIFF            - Compare the incoming frame with the frame stored in memory, and
+ *           updates the differentiated frame in memory.
+ */
+#define V4L2_CID_NPCM_CAPTURE_MODE     (V4L2_CID_USER_NPCM_BASE + 0)
+
+enum v4l2_npcm_capture_mode {
+       V4L2_NPCM_CAPTURE_MODE_COMPLETE = 0, /* COMPLETE mode */
+       V4L2_NPCM_CAPTURE_MODE_DIFF     = 1, /* DIFF mode */
+};
+
+/*
+ * This control is meant to get the count of compressed HEXTILE rectangles which
+ * is relevant to the number of differentiated frames if VCD is in DIFF mode.
+ * And the count will always be 1 if VCD is in COMPLETE mode.
+ */
+#define V4L2_CID_NPCM_RECT_COUNT       (V4L2_CID_USER_NPCM_BASE + 1)
+
+#endif /* _UAPI_LINUX_NPCM_VIDEO_H */
index 1c9da485318f93fe50e11a47f284d8fe81106066..b44ba7dcdefcadc62444e97721ea7ef00ade6a74 100644 (file)
@@ -68,6 +68,7 @@ typedef enum {
        SEV_RET_INVALID_PARAM,
        SEV_RET_RESOURCE_LIMIT,
        SEV_RET_SECURE_DATA_INVALID,
+       SEV_RET_INVALID_KEY = 0x27,
        SEV_RET_MAX,
 } sev_ret_code;
 
index 2aa39112cf8dd37e3ee265f6128973c8698145df..154a87a1eca978baf5fccf2280936d103a82ebbb 100644 (file)
 
 #include <linux/types.h>
 
+#define SNP_REPORT_USER_DATA_SIZE 64
+
 struct snp_report_req {
        /* user data that should be included in the report */
-       __u8 user_data[64];
+       __u8 user_data[SNP_REPORT_USER_DATA_SIZE];
 
        /* The vmpl level to be included in the report */
        __u32 vmpl;
index c3604a0a3e30ae44e2c1ff1e20e9745c92fc0d2d..68db66d4aae81ea4671766b946a12df8be6e1c8d 100644 (file)
@@ -203,6 +203,12 @@ enum v4l2_colorfx {
  */
 #define V4L2_CID_USER_ASPEED_BASE              (V4L2_CID_USER_BASE + 0x11a0)
 
+/*
+ * The base for Nuvoton NPCM driver controls.
+ * We reserve 16 controls for this driver.
+ */
+#define V4L2_CID_USER_NPCM_BASE                        (V4L2_CID_USER_BASE + 0x11b0)
+
 /* MPEG-class control IDs */
 /* The MPEG controls are applicable to all codec controls
  * and the 'MPEG' part of the define is historical */
index f5c48b61ab62244104bbf1b2100d3db7286f8c82..649560c685f13b73feaafb96b64c351b6eec2c25 100644 (file)
  */
 #define VHOST_VDPA_RESUME              _IO(VHOST_VIRTIO, 0x7E)
 
+/* Get the group for the descriptor table including driver & device areas
+ * of a virtqueue: read index, write group in num.
+ * The virtqueue index is stored in the index field of vhost_vring_state.
+ * The group ID of the descriptor table for this specific virtqueue
+ * is returned via num field of vhost_vring_state.
+ */
+#define VHOST_VDPA_GET_VRING_DESC_GROUP        _IOWR(VHOST_VIRTIO, 0x7F,       \
+                                             struct vhost_vring_state)
 #endif
index 2d827d22cd99d2e507acaebcb3d502ac15417770..d7656908f7305fb7fc0977115f129924af01beff 100644 (file)
@@ -185,5 +185,12 @@ struct vhost_vdpa_iova_range {
  * DRIVER_OK
  */
 #define VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK  0x6
+/* Device may expose the virtqueue's descriptor area, driver area and
+ * device area to a different group for ASID binding than where its
+ * buffers may reside. Requires VHOST_BACKEND_F_IOTLB_ASID.
+ */
+#define VHOST_BACKEND_F_DESC_ASID    0x7
+/* IOTLB don't flush memory mapping across device reset */
+#define VHOST_BACKEND_F_IOTLB_PERSIST  0x8
 
 #endif
index 78260e5d9985f8fde0a0ed5133b50c861a59ada1..c3d4e490ce7ca3b0ed4498f3096af64699e826f5 100644 (file)
@@ -804,6 +804,7 @@ struct v4l2_pix_format {
 #define V4L2_PIX_FMT_QC08C    v4l2_fourcc('Q', '0', '8', 'C') /* Qualcomm 8-bit compressed */
 #define V4L2_PIX_FMT_QC10C    v4l2_fourcc('Q', '1', '0', 'C') /* Qualcomm 10-bit compressed */
 #define V4L2_PIX_FMT_AJPG     v4l2_fourcc('A', 'J', 'P', 'G') /* Aspeed JPEG */
+#define V4L2_PIX_FMT_HEXTILE  v4l2_fourcc('H', 'X', 'T', 'L') /* Hextile compressed */
 
 /* 10bit raw packed, 32 bytes for every 25 pixels, last LSB 6 bits unused */
 #define V4L2_PIX_FMT_IPU3_SBGGR10      v4l2_fourcc('i', 'p', '3', 'b') /* IPU3 packed 10-bit BGGR bayer */
index 2c712c654165353c2312fe908e93d1efff2c8632..8881aea60f6f11be1aa2fd20f813883546c8e74a 100644 (file)
  */
 #define VIRTIO_F_NOTIFICATION_DATA     38
 
+/* This feature indicates that the driver uses the data provided by the device
+ * as a virtqueue identifier in available buffer notifications.
+ */
+#define VIRTIO_F_NOTIF_CONFIG_DATA     39
+
 /*
  * This feature indicates that the driver can reset a queue individually.
  */
index dcb179de43585f74ad80fc8da96a66e73f5fdf99..e1571603175e715339cdfb4c0a4d09ddd3a9dfa0 100644 (file)
@@ -248,6 +248,7 @@ enum {
  * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs
  * @padding: reserved for future, not used, has to be zeroed
  * @disable_fm: whether disable fastmap
+ * @need_resv_pool: whether reserve free pebs for filling pool/wl_pool
  *
  * This data structure is used to specify MTD device UBI has to attach and the
  * parameters it has to use. The number which should be assigned to the new UBI
@@ -293,7 +294,8 @@ struct ubi_attach_req {
        __s32 vid_hdr_offset;
        __s16 max_beb_per1024;
        __s8 disable_fm;
-       __s8 padding[9];
+       __s8 need_resv_pool;
+       __s8 padding[8];
 };
 
 /*
index fea06810b43dbb19a62624908a81d71b9b5bf1e2..a1e4239c7d75d1ca993955e0dfd38fc4e6c71941 100644 (file)
@@ -52,7 +52,7 @@ static int io_buffer_add_list(struct io_ring_ctx *ctx,
        return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
 }
 
-void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
+bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
 {
        struct io_ring_ctx *ctx = req->ctx;
        struct io_buffer_list *bl;
@@ -65,7 +65,7 @@ void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
         * multiple use.
         */
        if (req->flags & REQ_F_PARTIAL_IO)
-               return;
+               return false;
 
        io_ring_submit_lock(ctx, issue_flags);
 
@@ -76,7 +76,7 @@ void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
        req->buf_index = buf->bgid;
 
        io_ring_submit_unlock(ctx, issue_flags);
-       return;
+       return true;
 }
 
 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags)
index d14345ef61fc8de071da1f0e54a7ac8d24ee1dd0..f2d615236b2cb98e78cb326202a278a467f504e7 100644 (file)
@@ -53,11 +53,11 @@ int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
 
 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
 
-void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
+bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
 
 void *io_pbuf_get_address(struct io_ring_ctx *ctx, unsigned long bgid);
 
-static inline void io_kbuf_recycle_ring(struct io_kiocb *req)
+static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
 {
        /*
         * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
@@ -80,8 +80,10 @@ static inline void io_kbuf_recycle_ring(struct io_kiocb *req)
                } else {
                        req->buf_index = req->buf_list->bgid;
                        req->flags &= ~REQ_F_BUFFER_RING;
+                       return true;
                }
        }
+       return false;
 }
 
 static inline bool io_do_buffer_select(struct io_kiocb *req)
@@ -91,12 +93,13 @@ static inline bool io_do_buffer_select(struct io_kiocb *req)
        return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
 }
 
-static inline void io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
+static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
 {
        if (req->flags & REQ_F_BUFFER_SELECTED)
-               io_kbuf_recycle_legacy(req, issue_flags);
+               return io_kbuf_recycle_legacy(req, issue_flags);
        if (req->flags & REQ_F_BUFFER_RING)
-               io_kbuf_recycle_ring(req);
+               return io_kbuf_recycle_ring(req);
+       return false;
 }
 
 static inline unsigned int __io_put_kbuf_list(struct io_kiocb *req,
index 7a8e298af81b3b1db393efc547d04a8f2938f091..75d494dad7e2c7b22a53f50fc422d807a0559000 100644 (file)
@@ -1461,16 +1461,6 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
        int ret;
        bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
 
-       if (connect->in_progress) {
-               struct socket *socket;
-
-               ret = -ENOTSOCK;
-               socket = sock_from_file(req->file);
-               if (socket)
-                       ret = sock_error(socket->sk);
-               goto out;
-       }
-
        if (req_has_async_data(req)) {
                io = req->async_data;
        } else {
@@ -1490,9 +1480,7 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
            && force_nonblock) {
                if (ret == -EINPROGRESS) {
                        connect->in_progress = true;
-                       return -EAGAIN;
-               }
-               if (ret == -ECONNABORTED) {
+               } else if (ret == -ECONNABORTED) {
                        if (connect->seen_econnaborted)
                                goto out;
                        connect->seen_econnaborted = true;
@@ -1506,6 +1494,16 @@ int io_connect(struct io_kiocb *req, unsigned int issue_flags)
                memcpy(req->async_data, &__io, sizeof(__io));
                return -EAGAIN;
        }
+       if (connect->in_progress) {
+               /*
+                * At least bluetooth will return -EBADFD on a re-connect
+                * attempt, and it's (supposedly) also valid to get -EISCONN
+                * which means the previous result is good. For both of these,
+                * grab the sock_error() and use that for the completion.
+                */
+               if (ret == -EBADFD || ret == -EISCONN)
+                       ret = sock_error(sock_from_file(req->file)->sk);
+       }
        if (ret == -ERESTARTSYS)
                ret = -EINTR;
 out:
index 25a3515a177c7a03410ed2fe72f5e3291f6d4afb..799db44283c7e3782ad451f5ad417547e09d486c 100644 (file)
@@ -66,7 +66,7 @@ const struct io_issue_def io_issue_defs[] = {
                .iopoll                 = 1,
                .iopoll_queue           = 1,
                .vectored               = 1,
-               .prep                   = io_prep_rw,
+               .prep                   = io_prep_rwv,
                .issue                  = io_read,
        },
        [IORING_OP_WRITEV] = {
@@ -80,7 +80,7 @@ const struct io_issue_def io_issue_defs[] = {
                .iopoll                 = 1,
                .iopoll_queue           = 1,
                .vectored               = 1,
-               .prep                   = io_prep_rw,
+               .prep                   = io_prep_rwv,
                .issue                  = io_write,
        },
        [IORING_OP_FSYNC] = {
@@ -98,7 +98,7 @@ const struct io_issue_def io_issue_defs[] = {
                .ioprio                 = 1,
                .iopoll                 = 1,
                .iopoll_queue           = 1,
-               .prep                   = io_prep_rw,
+               .prep                   = io_prep_rw_fixed,
                .issue                  = io_read,
        },
        [IORING_OP_WRITE_FIXED] = {
@@ -111,7 +111,7 @@ const struct io_issue_def io_issue_defs[] = {
                .ioprio                 = 1,
                .iopoll                 = 1,
                .iopoll_queue           = 1,
-               .prep                   = io_prep_rw,
+               .prep                   = io_prep_rw_fixed,
                .issue                  = io_write,
        },
        [IORING_OP_POLL_ADD] = {
index 3398e1d944c2615f557162bbe0cca7b718a4c1dc..64390d4e20c1875f92d944ad4fddd6c3ad8a140e 100644 (file)
@@ -83,18 +83,6 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        /* used for fixed read/write too - just read unconditionally */
        req->buf_index = READ_ONCE(sqe->buf_index);
 
-       if (req->opcode == IORING_OP_READ_FIXED ||
-           req->opcode == IORING_OP_WRITE_FIXED) {
-               struct io_ring_ctx *ctx = req->ctx;
-               u16 index;
-
-               if (unlikely(req->buf_index >= ctx->nr_user_bufs))
-                       return -EFAULT;
-               index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
-               req->imu = ctx->user_bufs[index];
-               io_req_set_rsrc_node(req, ctx, 0);
-       }
-
        ioprio = READ_ONCE(sqe->ioprio);
        if (ioprio) {
                ret = ioprio_check_cap(ioprio);
@@ -110,16 +98,42 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
        rw->addr = READ_ONCE(sqe->addr);
        rw->len = READ_ONCE(sqe->len);
        rw->flags = READ_ONCE(sqe->rw_flags);
+       return 0;
+}
+
+int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+       int ret;
+
+       ret = io_prep_rw(req, sqe);
+       if (unlikely(ret))
+               return ret;
 
-       /* Have to do this validation here, as this is in io_read() rw->len might
-        * have chanaged due to buffer selection
+       /*
+        * Have to do this validation here, as this is in io_read() rw->len
+        * might have chanaged due to buffer selection
         */
-       if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) {
-               ret = io_iov_buffer_select_prep(req);
-               if (ret)
-                       return ret;
-       }
+       if (req->flags & REQ_F_BUFFER_SELECT)
+               return io_iov_buffer_select_prep(req);
+
+       return 0;
+}
 
+int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+{
+       struct io_ring_ctx *ctx = req->ctx;
+       u16 index;
+       int ret;
+
+       ret = io_prep_rw(req, sqe);
+       if (unlikely(ret))
+               return ret;
+
+       if (unlikely(req->buf_index >= ctx->nr_user_bufs))
+               return -EFAULT;
+       index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
+       req->imu = ctx->user_bufs[index];
+       io_req_set_rsrc_node(req, ctx, 0);
        return 0;
 }
 
@@ -129,12 +143,20 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
  */
 int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
+       struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
        int ret;
 
+       /* must be used with provided buffers */
+       if (!(req->flags & REQ_F_BUFFER_SELECT))
+               return -EINVAL;
+
        ret = io_prep_rw(req, sqe);
        if (unlikely(ret))
                return ret;
 
+       if (rw->addr || rw->len)
+               return -EINVAL;
+
        req->flags |= REQ_F_APOLL_MULTISHOT;
        return 0;
 }
@@ -542,6 +564,9 @@ static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
 {
        if (!force && !io_cold_defs[req->opcode].prep_async)
                return 0;
+       /* opcode type doesn't need async data */
+       if (!io_cold_defs[req->opcode].async_size)
+               return 0;
        if (!req_has_async_data(req)) {
                struct io_async_rw *iorw;
 
@@ -887,6 +912,7 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags)
 
 int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
 {
+       struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
        unsigned int cflags = 0;
        int ret;
 
@@ -903,7 +929,12 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
         * handling arm it.
         */
        if (ret == -EAGAIN) {
-               io_kbuf_recycle(req, issue_flags);
+               /*
+                * Reset rw->len to 0 again to avoid clamping future mshot
+                * reads, in case the buffer size varies.
+                */
+               if (io_kbuf_recycle(req, issue_flags))
+                       rw->len = 0;
                return -EAGAIN;
        }
 
@@ -916,6 +947,7 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
                 * jump to the termination path. This request is then done.
                 */
                cflags = io_put_kbuf(req, issue_flags);
+               rw->len = 0; /* similarly to above, reset len to 0 */
 
                if (io_fill_cqe_req_aux(req,
                                        issue_flags & IO_URING_F_COMPLETE_DEFER,
index c5aed03d42a4d14182a57318ad9fc7cd8cc0e829..f9e89b4fe4da91ca58923e62913af43a91149402 100644 (file)
@@ -16,6 +16,8 @@ struct io_async_rw {
 };
 
 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe);
+int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 int io_read(struct io_kiocb *req, unsigned int issue_flags);
 int io_readv_prep_async(struct io_kiocb *req);
 int io_write(struct io_kiocb *req, unsigned int issue_flags);
index 833faa04461bc201f6e1d154f5444a274e582812..0fae79164187094d77def3bdc9cd6e6cac99c733 100644 (file)
@@ -782,9 +782,7 @@ struct bpf_iter_num_kern {
        int end; /* final value, exclusive */
 } __aligned(8);
 
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
-                 "Global functions as their definitions will be in vmlinux BTF");
+__bpf_kfunc_start_defs();
 
 __bpf_kfunc int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end)
 {
@@ -843,4 +841,4 @@ __bpf_kfunc void bpf_iter_num_destroy(struct bpf_iter_num *it)
        s->cur = s->end = 0;
 }
 
-__diag_pop();
+__bpf_kfunc_end_defs();
index 209e5135f9fbc58321477e8191010cadf0f3e060..f04a468cf6a72688121ced9e4ddcf0555b7758c5 100644 (file)
@@ -282,7 +282,7 @@ static struct bpf_iter_reg bpf_cgroup_reg_info = {
        .ctx_arg_info_size      = 1,
        .ctx_arg_info           = {
                { offsetof(struct bpf_iter__cgroup, cgroup),
-                 PTR_TO_BTF_ID_OR_NULL },
+                 PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED },
        },
        .seq_info               = &cgroup_iter_seq_info,
 };
@@ -305,9 +305,7 @@ struct bpf_iter_css_kern {
        unsigned int flags;
 } __attribute__((aligned(8)));
 
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
-               "Global functions as their definitions will be in vmlinux BTF");
+__bpf_kfunc_start_defs();
 
 __bpf_kfunc int bpf_iter_css_new(struct bpf_iter_css *it,
                struct cgroup_subsys_state *start, unsigned int flags)
@@ -358,4 +356,4 @@ __bpf_kfunc void bpf_iter_css_destroy(struct bpf_iter_css *it)
 {
 }
 
-__diag_pop();
\ No newline at end of file
+__bpf_kfunc_end_defs();
index 6983af8e093c4b0260f64d62d7c5d040e67e647f..e01c741e54e7b8dcf7ebb60b65955fafc55639c8 100644 (file)
@@ -34,9 +34,7 @@ static bool cpu_valid(u32 cpu)
        return cpu < nr_cpu_ids;
 }
 
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
-                 "Global kfuncs as their definitions will be in BTF");
+__bpf_kfunc_start_defs();
 
 /**
  * bpf_cpumask_create() - Create a mutable BPF cpumask.
@@ -407,7 +405,7 @@ __bpf_kfunc u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1,
        return cpumask_any_and_distribute(src1, src2);
 }
 
-__diag_pop();
+__bpf_kfunc_end_defs();
 
 BTF_SET8_START(cpumask_kfunc_btf_ids)
 BTF_ID_FLAGS(func, bpf_cpumask_create, KF_ACQUIRE | KF_RET_NULL)
index e46ac288a1080b42feae40628d8e852fd980c7d7..56b0c1f678ee754101233daec601f4f62623e0ef 100644 (file)
@@ -1177,13 +1177,6 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map
                ret = -EBUSY;
                goto out;
        }
-       if (!atomic64_read(&map->usercnt)) {
-               /* maps with timers must be either held by user space
-                * or pinned in bpffs.
-                */
-               ret = -EPERM;
-               goto out;
-       }
        /* allocate hrtimer via map_kmalloc to use memcg accounting */
        t = bpf_map_kmalloc_node(map, sizeof(*t), GFP_ATOMIC, map->numa_node);
        if (!t) {
@@ -1196,7 +1189,21 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map
        rcu_assign_pointer(t->callback_fn, NULL);
        hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
        t->timer.function = bpf_timer_cb;
-       timer->timer = t;
+       WRITE_ONCE(timer->timer, t);
+       /* Guarantee the order between timer->timer and map->usercnt. So
+        * when there are concurrent uref release and bpf timer init, either
+        * bpf_timer_cancel_and_free() called by uref release reads a no-NULL
+        * timer or atomic64_read() below returns a zero usercnt.
+        */
+       smp_mb();
+       if (!atomic64_read(&map->usercnt)) {
+               /* maps with timers must be either held by user space
+                * or pinned in bpffs.
+                */
+               WRITE_ONCE(timer->timer, NULL);
+               kfree(t);
+               ret = -EPERM;
+       }
 out:
        __bpf_spin_unlock_irqrestore(&timer->lock);
        return ret;
@@ -1374,7 +1381,7 @@ void bpf_timer_cancel_and_free(void *val)
        /* The subsequent bpf_timer_start/cancel() helpers won't be able to use
         * this timer, since it won't be initialized.
         */
-       timer->timer = NULL;
+       WRITE_ONCE(timer->timer, NULL);
 out:
        __bpf_spin_unlock_irqrestore(&timer->lock);
        if (!t)
@@ -1886,9 +1893,7 @@ void bpf_rb_root_free(const struct btf_field *field, void *rb_root,
        }
 }
 
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
-                 "Global functions as their definitions will be in vmlinux BTF");
+__bpf_kfunc_start_defs();
 
 __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
 {
@@ -2505,7 +2510,7 @@ __bpf_kfunc void bpf_throw(u64 cookie)
        WARN(1, "A call to BPF exception callback should never return\n");
 }
 
-__diag_pop();
+__bpf_kfunc_end_defs();
 
 BTF_SET8_START(generic_btf_ids)
 #ifdef CONFIG_KEXEC_CORE
@@ -2564,15 +2569,17 @@ BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY)
 BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU)
 BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL)
 BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY)
+#ifdef CONFIG_CGROUPS
 BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS)
 BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL)
 BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY)
-BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
-BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL)
-BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY)
 BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
 BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL)
 BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY)
+#endif
+BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED)
+BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL)
+BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY)
 BTF_ID_FLAGS(func, bpf_dynptr_adjust)
 BTF_ID_FLAGS(func, bpf_dynptr_is_null)
 BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly)
index 6fc9dae9edc81cad457b3e193ce671a3b0723aa8..6abd7c5df4b39e4e956258e67c4dbe6b8dd8edeb 100644 (file)
@@ -193,9 +193,7 @@ static int __init bpf_map_iter_init(void)
 
 late_initcall(bpf_map_iter_init);
 
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
-                 "Global functions as their definitions will be in vmlinux BTF");
+__bpf_kfunc_start_defs();
 
 __bpf_kfunc s64 bpf_map_sum_elem_count(const struct bpf_map *map)
 {
@@ -213,7 +211,7 @@ __bpf_kfunc s64 bpf_map_sum_elem_count(const struct bpf_map *map)
        return ret;
 }
 
-__diag_pop();
+__bpf_kfunc_end_defs();
 
 BTF_SET8_START(bpf_map_iter_kfunc_ids)
 BTF_ID_FLAGS(func, bpf_map_sum_elem_count, KF_TRUSTED_ARGS)
index 654601dd6b493d44111f3fefb16c0b412d69ebe2..26082b97894d3c0d64f4c909122ec3820f73f197 100644 (file)
@@ -704,7 +704,7 @@ static struct bpf_iter_reg task_reg_info = {
        .ctx_arg_info_size      = 1,
        .ctx_arg_info           = {
                { offsetof(struct bpf_iter__task, task),
-                 PTR_TO_BTF_ID_OR_NULL },
+                 PTR_TO_BTF_ID_OR_NULL | PTR_TRUSTED },
        },
        .seq_info               = &task_seq_info,
        .fill_link_info         = bpf_iter_fill_link_info,
@@ -822,9 +822,7 @@ struct bpf_iter_task_vma_kern {
        struct bpf_iter_task_vma_kern_data *data;
 } __attribute__((aligned(8)));
 
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
-                 "Global functions as their definitions will be in vmlinux BTF");
+__bpf_kfunc_start_defs();
 
 __bpf_kfunc int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
                                      struct task_struct *task, u64 addr)
@@ -890,7 +888,9 @@ __bpf_kfunc void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it)
        }
 }
 
-__diag_pop();
+__bpf_kfunc_end_defs();
+
+#ifdef CONFIG_CGROUPS
 
 struct bpf_iter_css_task {
        __u64 __opaque[1];
@@ -900,9 +900,7 @@ struct bpf_iter_css_task_kern {
        struct css_task_iter *css_it;
 } __attribute__((aligned(8)));
 
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
-                 "Global functions as their definitions will be in vmlinux BTF");
+__bpf_kfunc_start_defs();
 
 __bpf_kfunc int bpf_iter_css_task_new(struct bpf_iter_css_task *it,
                struct cgroup_subsys_state *css, unsigned int flags)
@@ -948,7 +946,9 @@ __bpf_kfunc void bpf_iter_css_task_destroy(struct bpf_iter_css_task *it)
        bpf_mem_free(&bpf_global_ma, kit->css_it);
 }
 
-__diag_pop();
+__bpf_kfunc_end_defs();
+
+#endif /* CONFIG_CGROUPS */
 
 struct bpf_iter_task {
        __u64 __opaque[3];
@@ -969,9 +969,7 @@ enum {
        BPF_TASK_ITER_PROC_THREADS
 };
 
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
-                 "Global functions as their definitions will be in vmlinux BTF");
+__bpf_kfunc_start_defs();
 
 __bpf_kfunc int bpf_iter_task_new(struct bpf_iter_task *it,
                struct task_struct *task__nullable, unsigned int flags)
@@ -1041,7 +1039,7 @@ __bpf_kfunc void bpf_iter_task_destroy(struct bpf_iter_task *it)
 {
 }
 
-__diag_pop();
+__bpf_kfunc_end_defs();
 
 DEFINE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work);
 
index 857d76694517121e8f6e34f7f7c0c4429720b6a6..bd1c42eb540f1f7565d1d0c10457884c10f7a37a 100644 (file)
@@ -3742,7 +3742,12 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
        if (class == BPF_ALU || class == BPF_ALU64) {
                if (!bt_is_reg_set(bt, dreg))
                        return 0;
-               if (opcode == BPF_MOV) {
+               if (opcode == BPF_END || opcode == BPF_NEG) {
+                       /* sreg is reserved and unused
+                        * dreg still need precision before this insn
+                        */
+                       return 0;
+               } else if (opcode == BPF_MOV) {
                        if (BPF_SRC(insn->code) == BPF_X) {
                                /* dreg = sreg or dreg = (s8, s16, s32)sreg
                                 * dreg needs precision after this insn
@@ -4674,7 +4679,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
                   insn->imm != 0 && env->bpf_capable) {
                struct bpf_reg_state fake_reg = {};
 
-               __mark_reg_known(&fake_reg, (u32)insn->imm);
+               __mark_reg_known(&fake_reg, insn->imm);
                fake_reg.type = SCALAR_VALUE;
                save_register_state(state, spi, &fake_reg, size);
        } else if (reg && is_spillable_regtype(reg->type)) {
@@ -5388,7 +5393,9 @@ static bool in_rcu_cs(struct bpf_verifier_env *env)
 /* Once GCC supports btf_type_tag the following mechanism will be replaced with tag check */
 BTF_SET_START(rcu_protected_types)
 BTF_ID(struct, prog_test_ref_kfunc)
+#ifdef CONFIG_CGROUPS
 BTF_ID(struct, cgroup)
+#endif
 BTF_ID(struct, bpf_cpumask)
 BTF_ID(struct, task_struct)
 BTF_SET_END(rcu_protected_types)
@@ -10835,7 +10842,9 @@ BTF_ID(func, bpf_dynptr_clone)
 BTF_ID(func, bpf_percpu_obj_new_impl)
 BTF_ID(func, bpf_percpu_obj_drop_impl)
 BTF_ID(func, bpf_throw)
+#ifdef CONFIG_CGROUPS
 BTF_ID(func, bpf_iter_css_task_new)
+#endif
 BTF_SET_END(special_kfunc_set)
 
 BTF_ID_LIST(special_kfunc_list)
@@ -10861,7 +10870,11 @@ BTF_ID(func, bpf_dynptr_clone)
 BTF_ID(func, bpf_percpu_obj_new_impl)
 BTF_ID(func, bpf_percpu_obj_drop_impl)
 BTF_ID(func, bpf_throw)
+#ifdef CONFIG_CGROUPS
 BTF_ID(func, bpf_iter_css_task_new)
+#else
+BTF_ID_UNUSED
+#endif
 
 static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta)
 {
@@ -11394,6 +11407,12 @@ static int process_kf_arg_ptr_to_rbtree_node(struct bpf_verifier_env *env,
                                                  &meta->arg_rbtree_root.field);
 }
 
+/*
+ * css_task iter allowlist is needed to avoid dead locking on css_set_lock.
+ * LSM hooks and iters (both sleepable and non-sleepable) are safe.
+ * Any sleepable progs are also safe since bpf_check_attach_target() enforce
+ * them can only be attached to some specific hook points.
+ */
 static bool check_css_task_iter_allowlist(struct bpf_verifier_env *env)
 {
        enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
@@ -11401,10 +11420,12 @@ static bool check_css_task_iter_allowlist(struct bpf_verifier_env *env)
        switch (prog_type) {
        case BPF_PROG_TYPE_LSM:
                return true;
-       case BPF_TRACE_ITER:
-               return env->prog->aux->sleepable;
+       case BPF_PROG_TYPE_TRACING:
+               if (env->prog->expected_attach_type == BPF_TRACE_ITER)
+                       return true;
+               fallthrough;
        default:
-               return false;
+               return env->prog->aux->sleepable;
        }
 }
 
@@ -11663,7 +11684,7 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
                case KF_ARG_PTR_TO_ITER:
                        if (meta->func_id == special_kfunc_list[KF_bpf_iter_css_task_new]) {
                                if (!check_css_task_iter_allowlist(env)) {
-                                       verbose(env, "css_task_iter is only allowed in bpf_lsm and bpf iter-s\n");
+                                       verbose(env, "css_task_iter is only allowed in bpf_lsm, bpf_iter and sleepable progs\n");
                                        return -EINVAL;
                                }
                        }
index d80d7a6081412994582e2a3686442226b582cd50..c0adb7254b45aef82756c523cee94651e0dc81c6 100644 (file)
@@ -156,19 +156,16 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
  * optimize away the callsite. Therefore, __weak is needed to ensure that the
  * call is still emitted, by telling the compiler that we don't know what the
  * function might eventually be.
- *
- * __diag_* below are needed to dismiss the missing prototype warning.
  */
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
-                 "kfuncs which will be used in BPF programs");
+
+__bpf_hook_start();
 
 __weak noinline void bpf_rstat_flush(struct cgroup *cgrp,
                                     struct cgroup *parent, int cpu)
 {
 }
 
-__diag_pop();
+__bpf_hook_end();
 
 /* see cgroup_rstat_flush() */
 static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
index 621037a0aa870e44fab877eaf6770452ca90f9b9..ce1bb2301c061d9695d85f6508ed23380c4cb4ce 100644 (file)
@@ -1006,6 +1006,9 @@ void kgdb_panic(const char *msg)
        if (panic_timeout)
                return;
 
+       debug_locks_off();
+       console_flush_on_panic(CONSOLE_FLUSH_PENDING);
+
        if (dbg_kdb_mode)
                kdb_printf("PANIC: %s\n", msg);
 
index 438b868cbfa9228fde28eb95375ec9b7218ff136..6b213c8252d62df7a30dd2b0f861e6bb1895be36 100644 (file)
@@ -272,11 +272,10 @@ char *kdbgetenv(const char *match)
  * kdballocenv - This function is used to allocate bytes for
  *     environment entries.
  * Parameters:
- *     match   A character string representing a numeric value
- * Outputs:
- *     *value  the unsigned long representation of the env variable 'match'
+ *     bytes   The number of bytes to allocate in the static buffer.
  * Returns:
- *     Zero on success, a kdb diagnostic on failure.
+ *     A pointer to the allocated space in the buffer on success.
+ *     NULL if bytes > size available in the envbuffer.
  * Remarks:
  *     We use a static environment buffer (envbuffer) to hold the values
  *     of dynamically generated environment variables (see kdb_set).  Buffer
index ed3056eb20b8bdb88f74e00727096f72f494847e..73c95815789a0fd67f8b6d73d44586c48e0111fd 100644 (file)
@@ -587,6 +587,46 @@ int dma_direct_supported(struct device *dev, u64 mask)
        return mask >= phys_to_dma_unencrypted(dev, min_mask);
 }
 
+/*
+ * To check whether all ram resource ranges are covered by dma range map
+ * Returns 0 when further check is needed
+ * Returns 1 if there is some RAM range can't be covered by dma_range_map
+ */
+static int check_ram_in_range_map(unsigned long start_pfn,
+                                 unsigned long nr_pages, void *data)
+{
+       unsigned long end_pfn = start_pfn + nr_pages;
+       const struct bus_dma_region *bdr = NULL;
+       const struct bus_dma_region *m;
+       struct device *dev = data;
+
+       while (start_pfn < end_pfn) {
+               for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) {
+                       unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start);
+
+                       if (start_pfn >= cpu_start_pfn &&
+                           start_pfn - cpu_start_pfn < PFN_DOWN(m->size)) {
+                               bdr = m;
+                               break;
+                       }
+               }
+               if (!bdr)
+                       return 1;
+
+               start_pfn = PFN_DOWN(bdr->cpu_start) + PFN_DOWN(bdr->size);
+       }
+
+       return 0;
+}
+
+bool dma_direct_all_ram_mapped(struct device *dev)
+{
+       if (!dev->dma_range_map)
+               return true;
+       return !walk_system_ram_range(0, PFN_DOWN(ULONG_MAX) + 1, dev,
+                                     check_ram_in_range_map);
+}
+
 size_t dma_direct_max_mapping_size(struct device *dev)
 {
        /* If SWIOTLB is active, use its maximum mapping size */
index 97ec892ea0b5acec3adc7a7b947e6572ff9deb80..18d346118fe8eb56f26db5f9ed442710ef53779d 100644 (file)
@@ -20,6 +20,7 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
 bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
                enum dma_data_direction dir, unsigned long attrs);
+bool dma_direct_all_ram_mapped(struct device *dev);
 size_t dma_direct_max_mapping_size(struct device *dev);
 
 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
index e323ca48f7f2a4e8bbcb66065b7ff35876902fab..58db8fd70471a197c3fcd1d745b4fbc864b6634c 100644 (file)
@@ -793,6 +793,28 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
 }
 EXPORT_SYMBOL(dma_set_coherent_mask);
 
+/**
+ * dma_addressing_limited - return if the device is addressing limited
+ * @dev:       device to check
+ *
+ * Return %true if the devices DMA mask is too small to address all memory in
+ * the system, else %false.  Lack of addressing bits is the prime reason for
+ * bounce buffering, but might not be the only one.
+ */
+bool dma_addressing_limited(struct device *dev)
+{
+       const struct dma_map_ops *ops = get_dma_ops(dev);
+
+       if (min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
+                        dma_get_required_mask(dev))
+               return true;
+
+       if (unlikely(ops))
+               return false;
+       return !dma_direct_all_ram_mapped(dev);
+}
+EXPORT_SYMBOL_GPL(dma_addressing_limited);
+
 size_t dma_max_mapping_size(struct device *dev)
 {
        const struct dma_map_ops *ops = get_dma_ops(dev);
index 26202274784f0306e7b968abd214cc7edfe1b3f2..33d942615be54ccf6ed616c50a9db499c99b6816 100644 (file)
@@ -283,7 +283,8 @@ static void swiotlb_init_io_tlb_pool(struct io_tlb_pool *mem, phys_addr_t start,
        }
 
        for (i = 0; i < mem->nslabs; i++) {
-               mem->slots[i].list = IO_TLB_SEGSIZE - io_tlb_offset(i);
+               mem->slots[i].list = min(IO_TLB_SEGSIZE - io_tlb_offset(i),
+                                        mem->nslabs - i);
                mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
                mem->slots[i].alloc_size = 0;
        }
@@ -558,29 +559,40 @@ void __init swiotlb_exit(void)
  * alloc_dma_pages() - allocate pages to be used for DMA
  * @gfp:       GFP flags for the allocation.
  * @bytes:     Size of the buffer.
+ * @phys_limit:        Maximum allowed physical address of the buffer.
  *
  * Allocate pages from the buddy allocator. If successful, make the allocated
  * pages decrypted that they can be used for DMA.
  *
- * Return: Decrypted pages, or %NULL on failure.
+ * Return: Decrypted pages, %NULL on allocation failure, or ERR_PTR(-EAGAIN)
+ * if the allocated physical address was above @phys_limit.
  */
-static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes)
+static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes, u64 phys_limit)
 {
        unsigned int order = get_order(bytes);
        struct page *page;
+       phys_addr_t paddr;
        void *vaddr;
 
        page = alloc_pages(gfp, order);
        if (!page)
                return NULL;
 
-       vaddr = page_address(page);
+       paddr = page_to_phys(page);
+       if (paddr + bytes - 1 > phys_limit) {
+               __free_pages(page, order);
+               return ERR_PTR(-EAGAIN);
+       }
+
+       vaddr = phys_to_virt(paddr);
        if (set_memory_decrypted((unsigned long)vaddr, PFN_UP(bytes)))
                goto error;
        return page;
 
 error:
-       __free_pages(page, order);
+       /* Intentional leak if pages cannot be encrypted again. */
+       if (!set_memory_encrypted((unsigned long)vaddr, PFN_UP(bytes)))
+               __free_pages(page, order);
        return NULL;
 }
 
@@ -618,11 +630,7 @@ static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes,
        else if (phys_limit <= DMA_BIT_MASK(32))
                gfp |= __GFP_DMA32;
 
-       while ((page = alloc_dma_pages(gfp, bytes)) &&
-              page_to_phys(page) + bytes - 1 > phys_limit) {
-               /* allocated, but too high */
-               __free_pages(page, get_order(bytes));
-
+       while (IS_ERR(page = alloc_dma_pages(gfp, bytes, phys_limit))) {
                if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
                    phys_limit < DMA_BIT_MASK(64) &&
                    !(gfp & (__GFP_DMA32 | __GFP_DMA)))
index 0d866eaa4cc862c5921a34b7e7d97af5846c90dd..b531c33e9545b7957599fc67a7d9af3193f1279f 100644 (file)
@@ -500,6 +500,7 @@ static inline void rcu_expedite_gp(void) { }
 static inline void rcu_unexpedite_gp(void) { }
 static inline void rcu_async_hurry(void) { }
 static inline void rcu_async_relax(void) { }
+static inline bool rcu_cpu_online(int cpu) { return true; }
 #else /* #ifdef CONFIG_TINY_RCU */
 bool rcu_gp_is_normal(void);     /* Internal RCU use. */
 bool rcu_gp_is_expedited(void);  /* Internal RCU use. */
@@ -509,6 +510,7 @@ void rcu_unexpedite_gp(void);
 void rcu_async_hurry(void);
 void rcu_async_relax(void);
 void rcupdate_announce_bootup_oddness(void);
+bool rcu_cpu_online(int cpu);
 #ifdef CONFIG_TASKS_RCU_GENERIC
 void show_rcu_tasks_gp_kthreads(void);
 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
index 1fa631168594d1bd71a3fc37d025542a8a1f4040..f54d5782eca0baf60cbb29af901de5019db63f3e 100644 (file)
@@ -895,10 +895,36 @@ static void rcu_tasks_pregp_step(struct list_head *hop)
        synchronize_rcu();
 }
 
+/* Check for quiescent states since the pregp's synchronize_rcu() */
+static bool rcu_tasks_is_holdout(struct task_struct *t)
+{
+       int cpu;
+
+       /* Has the task been seen voluntarily sleeping? */
+       if (!READ_ONCE(t->on_rq))
+               return false;
+
+       /*
+        * Idle tasks (or idle injection) within the idle loop are RCU-tasks
+        * quiescent states. But CPU boot code performed by the idle task
+        * isn't a quiescent state.
+        */
+       if (is_idle_task(t))
+               return false;
+
+       cpu = task_cpu(t);
+
+       /* Idle tasks on offline CPUs are RCU-tasks quiescent states. */
+       if (t == idle_task(cpu) && !rcu_cpu_online(cpu))
+               return false;
+
+       return true;
+}
+
 /* Per-task initial processing. */
 static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop)
 {
-       if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) {
+       if (t != current && rcu_tasks_is_holdout(t)) {
                get_task_struct(t);
                t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
                WRITE_ONCE(t->rcu_tasks_holdout, true);
@@ -947,7 +973,7 @@ static void check_holdout_task(struct task_struct *t,
 
        if (!READ_ONCE(t->rcu_tasks_holdout) ||
            t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
-           !READ_ONCE(t->on_rq) ||
+           !rcu_tasks_is_holdout(t) ||
            (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
             !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
                WRITE_ONCE(t->rcu_tasks_holdout, false);
@@ -1525,7 +1551,7 @@ static int trc_inspect_reader(struct task_struct *t, void *bhp_in)
        } else {
                // The task is not running, so C-language access is safe.
                nesting = t->trc_reader_nesting;
-               WARN_ON_ONCE(ofl && task_curr(t) && !is_idle_task(t));
+               WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t))));
                if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl)
                        n_heavy_reader_ofl_updates++;
        }
index d3a97e1290203f8a00c4dc97fa0087676926b08e..3ac3c846105fb4c059a001ae3cf52a3c7747aac5 100644 (file)
@@ -755,14 +755,19 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
 }
 
 /*
- * Return true if the specified CPU has passed through a quiescent
- * state by virtue of being in or having passed through an dynticks
- * idle state since the last call to dyntick_save_progress_counter()
- * for this same CPU, or by virtue of having been offline.
+ * Returns positive if the specified CPU has passed through a quiescent state
+ * by virtue of being in or having passed through an dynticks idle state since
+ * the last call to dyntick_save_progress_counter() for this same CPU, or by
+ * virtue of having been offline.
+ *
+ * Returns negative if the specified CPU needs a force resched.
+ *
+ * Returns zero otherwise.
  */
 static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
 {
        unsigned long jtsq;
+       int ret = 0;
        struct rcu_node *rnp = rdp->mynode;
 
        /*
@@ -848,8 +853,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
            (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) ||
             rcu_state.cbovld)) {
                WRITE_ONCE(rdp->rcu_urgent_qs, true);
-               resched_cpu(rdp->cpu);
                WRITE_ONCE(rdp->last_fqs_resched, jiffies);
+               ret = -1;
        }
 
        /*
@@ -862,8 +867,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
        if (time_after(jiffies, rcu_state.jiffies_resched)) {
                if (time_after(jiffies,
                               READ_ONCE(rdp->last_fqs_resched) + jtsq)) {
-                       resched_cpu(rdp->cpu);
                        WRITE_ONCE(rdp->last_fqs_resched, jiffies);
+                       ret = -1;
                }
                if (IS_ENABLED(CONFIG_IRQ_WORK) &&
                    !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq &&
@@ -892,7 +897,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
                }
        }
 
-       return 0;
+       return ret;
 }
 
 /* Trace-event wrapper function for trace_rcu_future_grace_period.  */
@@ -2271,15 +2276,15 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
 {
        int cpu;
        unsigned long flags;
-       unsigned long mask;
-       struct rcu_data *rdp;
        struct rcu_node *rnp;
 
        rcu_state.cbovld = rcu_state.cbovldnext;
        rcu_state.cbovldnext = false;
        rcu_for_each_leaf_node(rnp) {
+               unsigned long mask = 0;
+               unsigned long rsmask = 0;
+
                cond_resched_tasks_rcu_qs();
-               mask = 0;
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
                rcu_state.cbovldnext |= !!rnp->cbovldmask;
                if (rnp->qsmask == 0) {
@@ -2297,11 +2302,17 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
                        continue;
                }
                for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) {
+                       struct rcu_data *rdp;
+                       int ret;
+
                        rdp = per_cpu_ptr(&rcu_data, cpu);
-                       if (f(rdp)) {
+                       ret = f(rdp);
+                       if (ret > 0) {
                                mask |= rdp->grpmask;
                                rcu_disable_urgency_upon_qs(rdp);
                        }
+                       if (ret < 0)
+                               rsmask |= rdp->grpmask;
                }
                if (mask != 0) {
                        /* Idle/offline CPUs, report (releases rnp->lock). */
@@ -2310,6 +2321,9 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
                        /* Nothing to do here, so just drop the lock. */
                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                }
+
+               for_each_leaf_node_cpu_mask(rnp, cpu, rsmask)
+                       resched_cpu(cpu);
        }
 }
 
@@ -4195,6 +4209,13 @@ static bool rcu_rdp_cpu_online(struct rcu_data *rdp)
        return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode));
 }
 
+bool rcu_cpu_online(int cpu)
+{
+       struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
+
+       return rcu_rdp_cpu_online(rdp);
+}
+
 #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
 
 /*
index df697c74d5197a21a49691cced48138397851076..84e8a0f6e4e0b0283eb6d7d6b72e5c0e2761c8c6 100644 (file)
@@ -1252,9 +1252,7 @@ static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
 };
 
 #ifdef CONFIG_KEYS
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
-                 "kfuncs which will be used in BPF programs");
+__bpf_kfunc_start_defs();
 
 /**
  * bpf_lookup_user_key - lookup a key by its serial
@@ -1404,7 +1402,7 @@ __bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr,
 }
 #endif /* CONFIG_SYSTEM_DATA_VERIFICATION */
 
-__diag_pop();
+__bpf_kfunc_end_defs();
 
 BTF_SET8_START(key_sig_kfunc_set)
 BTF_ID_FLAGS(func, bpf_lookup_user_key, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE)
index 8bfe23af9c739a5042e93b2bba956a6e366de2b9..7d2ddbcfa377cfa4cbfba3a257dc4fdfecb22e04 100644 (file)
@@ -927,11 +927,12 @@ static int parse_symbol_and_return(int argc, const char *argv[],
        for (i = 2; i < argc; i++) {
                tmp = strstr(argv[i], "$retval");
                if (tmp && !isalnum(tmp[7]) && tmp[7] != '_') {
+                       if (is_tracepoint) {
+                               trace_probe_log_set_index(i);
+                               trace_probe_log_err(tmp - argv[i], RETVAL_ON_PROBE);
+                               return -EINVAL;
+                       }
                        *is_return = true;
-                       /*
-                        * NOTE: Don't check is_tracepoint here, because it will
-                        * be checked when the argument is parsed.
-                        */
                        break;
                }
        }
index a3442db356709ce8f5c940dfd9b61bfc34fafd0d..52f8b537dd0a0872dc1adcbe09778eb76270aad0 100644 (file)
@@ -1020,9 +1020,9 @@ EXPORT_SYMBOL_GPL(kprobe_event_cmd_init);
 /**
  * __kprobe_event_gen_cmd_start - Generate a kprobe event command from arg list
  * @cmd: A pointer to the dynevent_cmd struct representing the new event
+ * @kretprobe: Is this a return probe?
  * @name: The name of the kprobe event
  * @loc: The location of the kprobe event
- * @kretprobe: Is this a return probe?
  * @...: Variable number of arg (pairs), one pair for each field
  *
  * NOTE: Users normally won't want to call this function directly, but
index 2d90935d5a2105201ab6b2f3845bd0d6fb7ea603..3ea1c830efabc350f4def96e08e71326faca093c 100644 (file)
@@ -772,3 +772,6 @@ config ASN1_ENCODER
 
 config POLYNOMIAL
        tristate
+
+config FIRMWARE_TABLE
+       bool
index 13455f47f9df6123150287816a7612dc5bc291f8..6b09731d8e6195603aab99a3fd3f5dc56331f567 100644 (file)
@@ -409,6 +409,8 @@ obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o
 
 obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
 
+obj-$(CONFIG_FIRMWARE_TABLE) += fw_table.o
+
 # FORTIFY_SOURCE compile-time behavior tests
 TEST_FORTIFY_SRCS = $(wildcard $(srctree)/$(src)/test_fortify/*-*.c)
 TEST_FORTIFY_LOGS = $(patsubst $(srctree)/$(src)/%.c, %.log, $(TEST_FORTIFY_SRCS))
index 0855e698ced11a7ebdccb983759664acff067618..f86c9eeafb35ad9da21ebddda8a182ea27970ff8 100644 (file)
@@ -21,6 +21,10 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
        BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
 
        if (!r) {
+               smp_acquire__after_ctrl_dep();
+
+               cl->closure_get_happened = false;
+
                if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
                        atomic_set(&cl->remaining,
                                   CLOSURE_REMAINING_INITIALIZER);
@@ -43,7 +47,7 @@ static inline void closure_put_after_sub(struct closure *cl, int flags)
 /* For clearing flags with the same atomic op as a put */
 void closure_sub(struct closure *cl, int v)
 {
-       closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
+       closure_put_after_sub(cl, atomic_sub_return_release(v, &cl->remaining));
 }
 EXPORT_SYMBOL(closure_sub);
 
@@ -52,7 +56,7 @@ EXPORT_SYMBOL(closure_sub);
  */
 void closure_put(struct closure *cl)
 {
-       closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
+       closure_put_after_sub(cl, atomic_dec_return_release(&cl->remaining));
 }
 EXPORT_SYMBOL(closure_put);
 
@@ -90,6 +94,7 @@ bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
        if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
                return false;
 
+       cl->closure_get_happened = true;
        closure_set_waiting(cl, _RET_IP_);
        atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
        llist_add(&cl->list, &waitlist->list);
diff --git a/lib/fw_table.c b/lib/fw_table.c
new file mode 100644 (file)
index 0000000..b51f30a
--- /dev/null
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *  fw_tables.c - Parsing support for ACPI and ACPI-like tables provided by
+ *                platform or device firmware
+ *
+ *  Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *  Copyright (C) 2023 Intel Corp.
+ */
+#include <linux/errno.h>
+#include <linux/fw_table.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+enum acpi_subtable_type {
+       ACPI_SUBTABLE_COMMON,
+       ACPI_SUBTABLE_HMAT,
+       ACPI_SUBTABLE_PRMT,
+       ACPI_SUBTABLE_CEDT,
+};
+
+struct acpi_subtable_entry {
+       union acpi_subtable_headers *hdr;
+       enum acpi_subtable_type type;
+};
+
+static unsigned long __init_or_acpilib
+acpi_get_entry_type(struct acpi_subtable_entry *entry)
+{
+       switch (entry->type) {
+       case ACPI_SUBTABLE_COMMON:
+               return entry->hdr->common.type;
+       case ACPI_SUBTABLE_HMAT:
+               return entry->hdr->hmat.type;
+       case ACPI_SUBTABLE_PRMT:
+               return 0;
+       case ACPI_SUBTABLE_CEDT:
+               return entry->hdr->cedt.type;
+       }
+       return 0;
+}
+
+static unsigned long __init_or_acpilib
+acpi_get_entry_length(struct acpi_subtable_entry *entry)
+{
+       switch (entry->type) {
+       case ACPI_SUBTABLE_COMMON:
+               return entry->hdr->common.length;
+       case ACPI_SUBTABLE_HMAT:
+               return entry->hdr->hmat.length;
+       case ACPI_SUBTABLE_PRMT:
+               return entry->hdr->prmt.length;
+       case ACPI_SUBTABLE_CEDT:
+               return entry->hdr->cedt.length;
+       }
+       return 0;
+}
+
+static unsigned long __init_or_acpilib
+acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
+{
+       switch (entry->type) {
+       case ACPI_SUBTABLE_COMMON:
+               return sizeof(entry->hdr->common);
+       case ACPI_SUBTABLE_HMAT:
+               return sizeof(entry->hdr->hmat);
+       case ACPI_SUBTABLE_PRMT:
+               return sizeof(entry->hdr->prmt);
+       case ACPI_SUBTABLE_CEDT:
+               return sizeof(entry->hdr->cedt);
+       }
+       return 0;
+}
+
+static enum acpi_subtable_type __init_or_acpilib
+acpi_get_subtable_type(char *id)
+{
+       if (strncmp(id, ACPI_SIG_HMAT, 4) == 0)
+               return ACPI_SUBTABLE_HMAT;
+       if (strncmp(id, ACPI_SIG_PRMT, 4) == 0)
+               return ACPI_SUBTABLE_PRMT;
+       if (strncmp(id, ACPI_SIG_CEDT, 4) == 0)
+               return ACPI_SUBTABLE_CEDT;
+       return ACPI_SUBTABLE_COMMON;
+}
+
+static __init_or_acpilib bool has_handler(struct acpi_subtable_proc *proc)
+{
+       return proc->handler || proc->handler_arg;
+}
+
+static __init_or_acpilib int call_handler(struct acpi_subtable_proc *proc,
+                                         union acpi_subtable_headers *hdr,
+                                         unsigned long end)
+{
+       if (proc->handler)
+               return proc->handler(hdr, end);
+       if (proc->handler_arg)
+               return proc->handler_arg(hdr, proc->arg, end);
+       return -EINVAL;
+}
+
+/**
+ * acpi_parse_entries_array - for each proc_num find a suitable subtable
+ *
+ * @id: table id (for debugging purposes)
+ * @table_size: size of the root table
+ * @table_header: where does the table start?
+ * @proc: array of acpi_subtable_proc struct containing entry id
+ *        and associated handler with it
+ * @proc_num: how big proc is?
+ * @max_entries: how many entries can we process?
+ *
+ * For each proc_num find a subtable with proc->id and run proc->handler
+ * on it. Assumption is that there's only single handler for particular
+ * entry id.
+ *
+ * The table_size is not the size of the complete ACPI table (the length
+ * field in the header struct), but only the size of the root table; i.e.,
+ * the offset from the very first byte of the complete ACPI table, to the
+ * first byte of the very first subtable.
+ *
+ * On success returns sum of all matching entries for all proc handlers.
+ * Otherwise, -ENODEV or -EINVAL is returned.
+ */
+int __init_or_acpilib
+acpi_parse_entries_array(char *id, unsigned long table_size,
+                        struct acpi_table_header *table_header,
+                        struct acpi_subtable_proc *proc,
+                        int proc_num, unsigned int max_entries)
+{
+       unsigned long table_end, subtable_len, entry_len;
+       struct acpi_subtable_entry entry;
+       int count = 0;
+       int errs = 0;
+       int i;
+
+       table_end = (unsigned long)table_header + table_header->length;
+
+       /* Parse all entries looking for a match. */
+
+       entry.type = acpi_get_subtable_type(id);
+       entry.hdr = (union acpi_subtable_headers *)
+           ((unsigned long)table_header + table_size);
+       subtable_len = acpi_get_subtable_header_length(&entry);
+
+       while (((unsigned long)entry.hdr) + subtable_len  < table_end) {
+               if (max_entries && count >= max_entries)
+                       break;
+
+               for (i = 0; i < proc_num; i++) {
+                       if (acpi_get_entry_type(&entry) != proc[i].id)
+                               continue;
+                       if (!has_handler(&proc[i]) ||
+                           (!errs &&
+                            call_handler(&proc[i], entry.hdr, table_end))) {
+                               errs++;
+                               continue;
+                       }
+
+                       proc[i].count++;
+                       break;
+               }
+               if (i != proc_num)
+                       count++;
+
+               /*
+                * If entry->length is 0, break from this loop to avoid
+                * infinite loop.
+                */
+               entry_len = acpi_get_entry_length(&entry);
+               if (entry_len == 0) {
+                       pr_err("[%4.4s:0x%02x] Invalid zero length\n", id, proc->id);
+                       return -EINVAL;
+               }
+
+               entry.hdr = (union acpi_subtable_headers *)
+                   ((unsigned long)entry.hdr + entry_len);
+       }
+
+       if (max_entries && count > max_entries) {
+               pr_warn("[%4.4s:0x%02x] found the maximum %i entries\n",
+                       id, proc->id, count);
+       }
+
+       return errs ? -EINVAL : count;
+}
index a9407840213851eb8399f67c7de67530a3d41669..bfdb815998328d62c287b27cc9d9cf8e03247bae 100644 (file)
@@ -311,7 +311,7 @@ static void ot_fini_sync(struct ot_context *sop)
        ot_kfree(sop->test, sop, sizeof(*sop));
 }
 
-struct {
+static struct {
        struct ot_context * (*init)(struct ot_test *oc);
        void (*fini)(struct ot_context *sop);
 } g_ot_sync_ops[] = {
@@ -475,7 +475,7 @@ static struct ot_context *ot_init_async_m0(struct ot_test *test)
        return sop;
 }
 
-struct {
+static struct {
        struct ot_context * (*init)(struct ot_test *oc);
        void (*fini)(struct ot_context *sop);
 } g_ot_async_ops[] = {
@@ -632,7 +632,7 @@ static int ot_start_async(struct ot_test *test)
 #define NODE_COMPACT sizeof(struct ot_node)
 #define NODE_VMALLOC (512)
 
-struct ot_test g_testcases[] = {
+static struct ot_test g_testcases[] = {
 
        /* sync & normal */
        {0, 0, NODE_COMPACT, 1000, 0,  1,  0,  0, "sync: percpu objpool"},
index fd492e5bbdbcde42497a513fded38048bce91bde..5a88d6d24d793807803139254b5804827251adf1 100644 (file)
@@ -424,7 +424,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,
         * of memory that aren't suitable for allocation
         */
        if (!memblock_can_resize)
-               return -1;
+               panic("memblock: cannot resize %s array\n", type->name);
 
        /* Calculate new doubled size */
        old_size = type->max * sizeof(struct memblock_region);
index 86bbc7147fc148822e2b39b6aa0ce291c197ed4f..e265a0ca6bddd40711235c8d7560a6f409a51241 100644 (file)
@@ -540,12 +540,14 @@ static int p9_check_errors(struct p9_client *c, struct p9_req_t *req)
                return 0;
 
        if (!p9_is_proto_dotl(c)) {
-               char *ename;
+               char *ename = NULL;
 
                err = p9pdu_readf(&req->rc, c->proto_version, "s?d",
                                  &ename, &ecode);
-               if (err)
+               if (err) {
+                       kfree(ename);
                        goto out_err;
+               }
 
                if (p9_is_proto_dotu(c) && ecode < 512)
                        err = -ecode;
@@ -1979,7 +1981,7 @@ struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid,
                goto error;
        }
        p9_debug(P9_DEBUG_9P,
-                ">>> TXATTRWALK file_fid %d, attr_fid %d name %s\n",
+                ">>> TXATTRWALK file_fid %d, attr_fid %d name '%s'\n",
                 file_fid->fid, attr_fid->fid, attr_name);
 
        req = p9_client_rpc(clnt, P9_TXATTRWALK, "dds",
index c4015f30f9fa79a4a968f9a0a9aab243f0d460a0..1a3948b8c493eda3aca297896bd8adf7a63d443a 100644 (file)
@@ -671,10 +671,14 @@ static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
 
        p9_debug(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n",
                 m, current, &req->tc, req->tc.id);
-       if (m->err < 0)
-               return m->err;
 
        spin_lock(&m->req_lock);
+
+       if (m->err < 0) {
+               spin_unlock(&m->req_lock);
+               return m->err;
+       }
+
        WRITE_ONCE(req->status, REQ_STATUS_UNSENT);
        list_add_tail(&req->req_list, &m->unsent_req_list);
        spin_unlock(&m->req_lock);
@@ -832,14 +836,21 @@ static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
                goto out_free_ts;
        if (!(ts->rd->f_mode & FMODE_READ))
                goto out_put_rd;
-       /* prevent workers from hanging on IO when fd is a pipe */
-       ts->rd->f_flags |= O_NONBLOCK;
+       /* Prevent workers from hanging on IO when fd is a pipe.
+        * It's technically possible for userspace or concurrent mounts to
+        * modify this flag concurrently, which will likely result in a
+        * broken filesystem. However, just having bad flags here should
+        * not crash the kernel or cause any other sort of bug, so mark this
+        * particular data race as intentional so that tooling (like KCSAN)
+        * can allow it and detect further problems.
+        */
+       data_race(ts->rd->f_flags |= O_NONBLOCK);
        ts->wr = fget(wfd);
        if (!ts->wr)
                goto out_put_rd;
        if (!(ts->wr->f_mode & FMODE_WRITE))
                goto out_put_wr;
-       ts->wr->f_flags |= O_NONBLOCK;
+       data_race(ts->wr->f_flags |= O_NONBLOCK);
 
        client->trans = ts;
        client->status = Connected;
index 1fffe2bed5b02f3480b9f074d8b472016708729f..dfdbe1ca533872ce189e64ff5f4a083c5261ddee 100644 (file)
@@ -54,7 +54,6 @@ struct xen_9pfs_front_priv {
        char *tag;
        struct p9_client *client;
 
-       int num_rings;
        struct xen_9pfs_dataring *rings;
 };
 
@@ -131,7 +130,7 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
        if (list_entry_is_head(priv, &xen_9pfs_devs, list))
                return -EINVAL;
 
-       num = p9_req->tc.tag % priv->num_rings;
+       num = p9_req->tc.tag % XEN_9PFS_NUM_RINGS;
        ring = &priv->rings[num];
 
 again:
@@ -279,7 +278,7 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
        list_del(&priv->list);
        write_unlock(&xen_9pfs_lock);
 
-       for (i = 0; i < priv->num_rings; i++) {
+       for (i = 0; i < XEN_9PFS_NUM_RINGS; i++) {
                struct xen_9pfs_dataring *ring = &priv->rings[i];
 
                cancel_work_sync(&ring->work);
@@ -408,15 +407,14 @@ static int xen_9pfs_front_init(struct xenbus_device *dev)
        if (p9_xen_trans.maxsize > XEN_FLEX_RING_SIZE(max_ring_order))
                p9_xen_trans.maxsize = XEN_FLEX_RING_SIZE(max_ring_order) / 2;
 
-       priv->num_rings = XEN_9PFS_NUM_RINGS;
-       priv->rings = kcalloc(priv->num_rings, sizeof(*priv->rings),
+       priv->rings = kcalloc(XEN_9PFS_NUM_RINGS, sizeof(*priv->rings),
                              GFP_KERNEL);
        if (!priv->rings) {
                kfree(priv);
                return -ENOMEM;
        }
 
-       for (i = 0; i < priv->num_rings; i++) {
+       for (i = 0; i < XEN_9PFS_NUM_RINGS; i++) {
                priv->rings[i].priv = priv;
                ret = xen_9pfs_front_alloc_dataring(dev, &priv->rings[i],
                                                    max_ring_order);
@@ -434,10 +432,11 @@ static int xen_9pfs_front_init(struct xenbus_device *dev)
        if (ret)
                goto error_xenbus;
        ret = xenbus_printf(xbt, dev->nodename, "num-rings", "%u",
-                           priv->num_rings);
+                           XEN_9PFS_NUM_RINGS);
        if (ret)
                goto error_xenbus;
-       for (i = 0; i < priv->num_rings; i++) {
+
+       for (i = 0; i < XEN_9PFS_NUM_RINGS; i++) {
                char str[16];
 
                BUILD_BUG_ON(XEN_9PFS_NUM_RINGS > 9);
index 0841f8d824198d0409d68aa178f2fba4d8ebb11d..c9fdcc5cdce10d8d9f5ed93cfb22d6d9e4bdd25d 100644 (file)
@@ -503,9 +503,8 @@ out:
  * architecture dependent calling conventions. 7+ can be supported in the
  * future.
  */
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
-                 "Global functions as their definitions will be in vmlinux BTF");
+__bpf_kfunc_start_defs();
+
 __bpf_kfunc int bpf_fentry_test1(int a)
 {
        return a + 1;
@@ -605,7 +604,7 @@ __bpf_kfunc void bpf_kfunc_call_memb_release(struct prog_test_member *p)
 {
 }
 
-__diag_pop();
+__bpf_kfunc_end_defs();
 
 BTF_SET8_START(bpf_test_modify_return_ids)
 BTF_ID_FLAGS(func, bpf_modify_return_test)
index 8f19253024b0aa4624bb7c8dac836d5c2fa3a01e..741360219552574f048e1135014ac7b8082273e6 100644 (file)
@@ -135,3 +135,4 @@ static void __exit ebtable_broute_fini(void)
 module_init(ebtable_broute_init);
 module_exit(ebtable_broute_fini);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Force packets to be routed instead of bridged");
index 278f324e67524a8933345f48feeb267d0a9e2dfa..dacd81b12e62645b8866ac1071261275280a365d 100644 (file)
@@ -116,3 +116,4 @@ static void __exit ebtable_filter_fini(void)
 module_init(ebtable_filter_init);
 module_exit(ebtable_filter_fini);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ebtables legacy filter table");
index 9066f7f376d57ea509f4af6cfc7d94cd157aae9b..0f2a8c6118d42ef47904e4dd8dcd7bcab26e3304 100644 (file)
@@ -116,3 +116,4 @@ static void __exit ebtable_nat_fini(void)
 module_init(ebtable_nat_init);
 module_exit(ebtable_nat_fini);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ebtables legacy stateless nat table");
index aa23479b20b2aef3da7e67d58ed4af4d9d9c616c..99d82676f780ac49d01151fa9c585f44f9ea8ccc 100644 (file)
@@ -2595,3 +2595,4 @@ EXPORT_SYMBOL(ebt_do_table);
 module_init(ebtables_init);
 module_exit(ebtables_fini);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ebtables legacy core");
index 71056ee847736b56fa14516c04c6ab2947100694..b5c406a6e7654f95fd323936219f0abd3dc66d65 100644 (file)
@@ -416,3 +416,4 @@ module_exit(nf_conntrack_l3proto_bridge_fini);
 
 MODULE_ALIAS("nf_conntrack-" __stringify(AF_BRIDGE));
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Bridge IPv4 and IPv6 connection tracking");
index 21d75108c2e94be490af74672a4845c4cb945ce8..383f96b0a1c78026629ef4e6b8c172019d29d35c 100644 (file)
@@ -11767,9 +11767,7 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id)
        return func;
 }
 
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
-                 "Global functions as their definitions will be in vmlinux BTF");
+__bpf_kfunc_start_defs();
 __bpf_kfunc int bpf_dynptr_from_skb(struct sk_buff *skb, u64 flags,
                                    struct bpf_dynptr_kern *ptr__uninit)
 {
@@ -11816,7 +11814,7 @@ __bpf_kfunc int bpf_sock_addr_set_sun_path(struct bpf_sock_addr_kern *sa_kern,
 
        return 0;
 }
-__diag_pop();
+__bpf_kfunc_end_defs();
 
 int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags,
                               struct bpf_dynptr_kern *ptr__uninit)
@@ -11879,10 +11877,7 @@ static int __init bpf_kfunc_init(void)
 }
 late_initcall(bpf_kfunc_init);
 
-/* Disables missing prototype warnings */
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
-                 "Global functions as their definitions will be in vmlinux BTF");
+__bpf_kfunc_start_defs();
 
 /* bpf_sock_destroy: Destroy the given socket with ECONNABORTED error code.
  *
@@ -11916,7 +11911,7 @@ __bpf_kfunc int bpf_sock_destroy(struct sock_common *sock)
        return sk->sk_prot->diag_destroy(sk, ECONNABORTED);
 }
 
-__diag_pop()
+__bpf_kfunc_end_defs();
 
 BTF_SET8_START(bpf_sk_iter_kfunc_ids)
 BTF_ID_FLAGS(func, bpf_sock_destroy, KF_TRUSTED_ARGS)
index 5e409b98aba0f0a1fd6fb88db3d1dde4291f1cd7..dec5443372360bb462a937724d5747ab676566f9 100644 (file)
@@ -217,8 +217,12 @@ static int page_pool_init(struct page_pool *pool,
                return -ENOMEM;
 #endif
 
-       if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
+       if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
+#ifdef CONFIG_PAGE_POOL_STATS
+               free_percpu(pool->recycle_stats);
+#endif
                return -ENOMEM;
+       }
 
        atomic_set(&pool->pages_state_release_cnt, 0);
 
index df4789ab512d72581915d2a829369a05398434bd..b6f1d6dab3f2e3c2e8515ad1293fb284cfc42218 100644 (file)
@@ -696,9 +696,7 @@ struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf)
        return nxdpf;
 }
 
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
-                 "Global functions as their definitions will be in vmlinux BTF");
+__bpf_kfunc_start_defs();
 
 /**
  * bpf_xdp_metadata_rx_timestamp - Read XDP frame RX timestamp.
@@ -738,7 +736,7 @@ __bpf_kfunc int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, u32 *hash,
        return -EOPNOTSUPP;
 }
 
-__diag_pop();
+__bpf_kfunc_end_defs();
 
 BTF_SET8_START(xdp_metadata_kfunc_ids)
 #define XDP_METADATA_KFUNC(_, __, name, ___) BTF_ID_FLAGS(func, name, KF_TRUSTED_ARGS)
index 1b8cbfda6e5dbd098a58d92639a64bc8db83ff23..44b033fe1ef6859df0703c7e580cf20c771ad479 100644 (file)
@@ -629,9 +629,6 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        if (dccp_parse_options(sk, dreq, skb))
                goto drop_and_free;
 
-       if (security_inet_conn_request(sk, skb, req))
-               goto drop_and_free;
-
        ireq = inet_rsk(req);
        sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
        sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
@@ -639,6 +636,9 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        ireq->ireq_family = AF_INET;
        ireq->ir_iif = READ_ONCE(sk->sk_bound_dev_if);
 
+       if (security_inet_conn_request(sk, skb, req))
+               goto drop_and_free;
+
        /*
         * Step 3: Process LISTEN state
         *
index 8d344b219f84ae391f640d9a2d09700883123dce..4550b680665a57ab9648b12645a632d54af69ab4 100644 (file)
@@ -360,15 +360,15 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        if (dccp_parse_options(sk, dreq, skb))
                goto drop_and_free;
 
-       if (security_inet_conn_request(sk, skb, req))
-               goto drop_and_free;
-
        ireq = inet_rsk(req);
        ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
        ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
        ireq->ireq_family = AF_INET6;
        ireq->ir_mark = inet_request_mark(sk, skb);
 
+       if (security_inet_conn_request(sk, skb, req))
+               goto drop_and_free;
+
        if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
            np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
            np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
index 9cbae0169249894601a6fb59646a34ed2bd6168c..788dfdc498a95553913451beaad4624e3fa11820 100644 (file)
@@ -15,7 +15,7 @@ const struct nla_policy devlink_dl_port_function_nl_policy[DEVLINK_PORT_FN_ATTR_
        [DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR] = { .type = NLA_BINARY, },
        [DEVLINK_PORT_FN_ATTR_STATE] = NLA_POLICY_MAX(NLA_U8, 1),
        [DEVLINK_PORT_FN_ATTR_OPSTATE] = NLA_POLICY_MAX(NLA_U8, 1),
-       [DEVLINK_PORT_FN_ATTR_CAPS] = NLA_POLICY_BITFIELD32(3),
+       [DEVLINK_PORT_FN_ATTR_CAPS] = NLA_POLICY_BITFIELD32(15),
 };
 
 const struct nla_policy devlink_dl_selftest_id_nl_policy[DEVLINK_ATTR_SELFTEST_ID_FLASH + 1] = {
index b71dab630a8732de385fb07694149d7e69a7151c..80cdc6f6b34c97601961179c4839dc68c0a6d2e1 100644 (file)
@@ -342,9 +342,7 @@ struct sk_buff *prp_create_tagged_frame(struct hsr_frame_info *frame,
        skb = skb_copy_expand(frame->skb_std, 0,
                              skb_tailroom(frame->skb_std) + HSR_HLEN,
                              GFP_ATOMIC);
-       prp_fill_rct(skb, frame, port);
-
-       return skb;
+       return prp_fill_rct(skb, frame, port);
 }
 
 static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
index 3760a14b6b576b07c60879bf58831c382cd103bc..4da03bf45c9b753f957d4c561d2075c825f07791 100644 (file)
@@ -22,9 +22,7 @@ enum bpf_fou_encap_type {
        FOU_BPF_ENCAP_GUE,
 };
 
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
-                 "Global functions as their definitions will be in BTF");
+__bpf_kfunc_start_defs();
 
 /* bpf_skb_set_fou_encap - Set FOU encap parameters
  *
@@ -100,7 +98,7 @@ __bpf_kfunc int bpf_skb_get_fou_encap(struct __sk_buff *skb_ctx,
        return 0;
 }
 
-__diag_pop()
+__bpf_kfunc_end_defs();
 
 BTF_SET8_START(fou_kfunc_set)
 BTF_ID_FLAGS(func, bpf_skb_set_fou_encap)
index 56f6ecc43451ecac0c831ee559b1f68a41f77978..4d42d0756fd70c980dac9620b1d778c6721ee28d 100644 (file)
@@ -170,3 +170,4 @@ module_init(iptable_nat_init);
 module_exit(iptable_nat_exit);
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("iptables legacy nat table");
index ca5e5b21587cda49e27abb66115856caebcaecf5..0e7f53964d0af627dbc72a7f7fbfc345f118d591 100644 (file)
@@ -108,3 +108,4 @@ static void __exit iptable_raw_fini(void)
 module_init(iptable_raw_init);
 module_exit(iptable_raw_fini);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("iptables legacy raw table");
index 265b39bc435b4c7f356a7e92705e43353adb426a..482e733c337582f82707a873873a18cfc84e0871 100644 (file)
@@ -186,3 +186,4 @@ module_init(nf_defrag_init);
 module_exit(nf_defrag_fini);
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IPv4 defragmentation support");
index f33aeab9424f75478aa19db46820823024762a0c..f01b038fc1cda0257fb29df9a8832310378bd1fb 100644 (file)
@@ -336,3 +336,4 @@ void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
 EXPORT_SYMBOL_GPL(nf_send_unreach);
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IPv4 packet rejection core");
index 98b25e5d147bac5262982681b0bc5b38434a473a..d37282c06e3da05fd36c48e6b4236d74ac2b7fe2 100644 (file)
@@ -306,7 +306,7 @@ struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
        treq->af_specific = af_ops;
 
        treq->syn_tos = TCP_SKB_CB(skb)->ip_dsfield;
-       treq->req_usec_ts = -1;
+       treq->req_usec_ts = false;
 
 #if IS_ENABLED(CONFIG_MPTCP)
        treq->is_mptcp = sk_is_mptcp(sk);
index ef5472ed6158873d961f24f3651ac822e1d0f64f..7696417d064011d3b437c2035b6fe1e5a3a0e6df 100644 (file)
@@ -1315,7 +1315,8 @@ static int tcp_ao_parse_crypto(struct tcp_ao_add *cmd, struct tcp_ao_key *key)
        key->maclen = cmd->maclen ?: 12; /* 12 is the default in RFC5925 */
 
        /* Check: maclen + tcp-ao header <= (MAX_TCP_OPTION_SPACE - mss
-        *                                      - tstamp - wscale - sackperm),
+        *                                      - tstamp (including sackperm)
+        *                                      - wscale),
         * see tcp_syn_options(), tcp_synack_options(), commit 33ad798c924b.
         *
         * In order to allow D-SACK with TCP-AO, the header size should be:
@@ -1342,9 +1343,9 @@ static int tcp_ao_parse_crypto(struct tcp_ao_add *cmd, struct tcp_ao_key *key)
         * large to leave sufficient option space.
         */
        syn_tcp_option_space = MAX_TCP_OPTION_SPACE;
+       syn_tcp_option_space -= TCPOLEN_MSS_ALIGNED;
        syn_tcp_option_space -= TCPOLEN_TSTAMP_ALIGNED;
        syn_tcp_option_space -= TCPOLEN_WSCALE_ALIGNED;
-       syn_tcp_option_space -= TCPOLEN_SACKPERM_ALIGNED;
        if (tcp_ao_len(key) > syn_tcp_option_space) {
                err = -EMSGSIZE;
                goto err_kfree;
index 50aaa1527150bd8adabce125775aab8b97018d53..bcb55d98004c5213f0095613124d5193b15b2793 100644 (file)
@@ -7115,7 +7115,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
        req->syncookie = want_cookie;
        tcp_rsk(req)->af_specific = af_ops;
        tcp_rsk(req)->ts_off = 0;
-       tcp_rsk(req)->req_usec_ts = -1;
+       tcp_rsk(req)->req_usec_ts = false;
 #if IS_ENABLED(CONFIG_MPTCP)
        tcp_rsk(req)->is_mptcp = 0;
 #endif
@@ -7143,9 +7143,10 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
        if (!dst)
                goto drop_and_free;
 
-       if (tmp_opt.tstamp_ok)
+       if (tmp_opt.tstamp_ok) {
+               tcp_rsk(req)->req_usec_ts = dst_tcp_usec_ts(dst);
                tcp_rsk(req)->ts_off = af_ops->init_ts_off(net, skb);
-
+       }
        if (!want_cookie && !isn) {
                int max_syn_backlog = READ_ONCE(net->ipv4.sysctl_max_syn_backlog);
 
index f558c054cf6e7538ecc3d711637af0bd44872318..eb13a55d660c2376968f11ee3265280f8cc9e1bd 100644 (file)
@@ -601,6 +601,44 @@ static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
 }
 #endif
 
+static __be32 *process_tcp_ao_options(struct tcp_sock *tp,
+                                     const struct tcp_request_sock *tcprsk,
+                                     struct tcp_out_options *opts,
+                                     struct tcp_key *key, __be32 *ptr)
+{
+#ifdef CONFIG_TCP_AO
+       u8 maclen = tcp_ao_maclen(key->ao_key);
+
+       if (tcprsk) {
+               u8 aolen = maclen + sizeof(struct tcp_ao_hdr);
+
+               *ptr++ = htonl((TCPOPT_AO << 24) | (aolen << 16) |
+                              (tcprsk->ao_keyid << 8) |
+                              (tcprsk->ao_rcv_next));
+       } else {
+               struct tcp_ao_key *rnext_key;
+               struct tcp_ao_info *ao_info;
+
+               ao_info = rcu_dereference_check(tp->ao_info,
+                       lockdep_sock_is_held(&tp->inet_conn.icsk_inet.sk));
+               rnext_key = READ_ONCE(ao_info->rnext_key);
+               if (WARN_ON_ONCE(!rnext_key))
+                       return ptr;
+               *ptr++ = htonl((TCPOPT_AO << 24) |
+                              (tcp_ao_len(key->ao_key) << 16) |
+                              (key->ao_key->sndid << 8) |
+                              (rnext_key->rcvid));
+       }
+       opts->hash_location = (__u8 *)ptr;
+       ptr += maclen / sizeof(*ptr);
+       if (unlikely(maclen % sizeof(*ptr))) {
+               memset(ptr, TCPOPT_NOP, sizeof(*ptr));
+               ptr++;
+       }
+#endif
+       return ptr;
+}
+
 /* Write previously computed TCP options to the packet.
  *
  * Beware: Something in the Internet is very sensitive to the ordering of
@@ -629,37 +667,7 @@ static void tcp_options_write(struct tcphdr *th, struct tcp_sock *tp,
                opts->hash_location = (__u8 *)ptr;
                ptr += 4;
        } else if (tcp_key_is_ao(key)) {
-#ifdef CONFIG_TCP_AO
-               u8 maclen = tcp_ao_maclen(key->ao_key);
-
-               if (tcprsk) {
-                       u8 aolen = maclen + sizeof(struct tcp_ao_hdr);
-
-                       *ptr++ = htonl((TCPOPT_AO << 24) | (aolen << 16) |
-                                      (tcprsk->ao_keyid << 8) |
-                                      (tcprsk->ao_rcv_next));
-               } else {
-                       struct tcp_ao_key *rnext_key;
-                       struct tcp_ao_info *ao_info;
-
-                       ao_info = rcu_dereference_check(tp->ao_info,
-                               lockdep_sock_is_held(&tp->inet_conn.icsk_inet.sk));
-                       rnext_key = READ_ONCE(ao_info->rnext_key);
-                       if (WARN_ON_ONCE(!rnext_key))
-                               goto out_ao;
-                       *ptr++ = htonl((TCPOPT_AO << 24) |
-                                      (tcp_ao_len(key->ao_key) << 16) |
-                                      (key->ao_key->sndid << 8) |
-                                      (rnext_key->rcvid));
-               }
-               opts->hash_location = (__u8 *)ptr;
-               ptr += maclen / sizeof(*ptr);
-               if (unlikely(maclen % sizeof(*ptr))) {
-                       memset(ptr, TCPOPT_NOP, sizeof(*ptr));
-                       ptr++;
-               }
-out_ao:
-#endif
+               ptr = process_tcp_ao_options(tp, tcprsk, opts, key, ptr);
        }
        if (unlikely(opts->mss)) {
                *ptr++ = htonl((TCPOPT_MSS << 24) |
@@ -3693,8 +3701,6 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
        mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
 
        memset(&opts, 0, sizeof(opts));
-       if (tcp_rsk(req)->req_usec_ts < 0)
-               tcp_rsk(req)->req_usec_ts = dst_tcp_usec_ts(dst);
        now = tcp_clock_ns();
 #ifdef CONFIG_SYN_COOKIES
        if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok))
index 65a8eaae2fec6c8ded875f4a1f5644763b5fbf8e..55b310a722c7d5c91480640271e180f2a7e95e58 100644 (file)
@@ -231,7 +231,7 @@ static void cpool_schedule_cleanup(struct kref *kref)
  */
 void tcp_sigpool_release(unsigned int id)
 {
-       if (WARN_ON_ONCE(id > cpool_populated || !cpool[id].alg))
+       if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg))
                return;
 
        /* slow-path */
@@ -245,7 +245,7 @@ EXPORT_SYMBOL_GPL(tcp_sigpool_release);
  */
 void tcp_sigpool_get(unsigned int id)
 {
-       if (WARN_ON_ONCE(id > cpool_populated || !cpool[id].alg))
+       if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg))
                return;
        kref_get(&cpool[id].kref);
 }
@@ -256,7 +256,7 @@ int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c) __cond_acquires(RC
        struct crypto_ahash *hash;
 
        rcu_read_lock_bh();
-       if (WARN_ON_ONCE(id > cpool_populated || !cpool[id].alg)) {
+       if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg)) {
                rcu_read_unlock_bh();
                return -EINVAL;
        }
@@ -301,7 +301,7 @@ EXPORT_SYMBOL_GPL(tcp_sigpool_end);
  */
 size_t tcp_sigpool_algo(unsigned int id, char *buf, size_t buf_len)
 {
-       if (WARN_ON_ONCE(id > cpool_populated || !cpool[id].alg))
+       if (WARN_ON_ONCE(id >= cpool_populated || !cpool[id].alg))
                return -EINVAL;
 
        return strscpy(buf, cpool[id].alg, buf_len);
index bf3cb3a13600cd418b6c9066c9e4d667854d21c9..52cf104e347881f052f1d515e78f05815bf7f45b 100644 (file)
@@ -170,3 +170,4 @@ module_init(ip6table_nat_init);
 module_exit(ip6table_nat_exit);
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Ip6tables legacy nat table");
index 08861d5d1f4db36b90cac3097d16393c04a5e4f7..fc9f6754028f2c4fc3fd5bb83ec1dca5225d4a74 100644 (file)
@@ -106,3 +106,4 @@ static void __exit ip6table_raw_fini(void)
 module_init(ip6table_raw_init);
 module_exit(ip6table_raw_fini);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Ip6tables legacy raw table");
index d59b296b4f51900c21a96d8f80fa1cadb72c83e8..be7817fbc024d096072b11108834ed4d13070f43 100644 (file)
@@ -182,3 +182,4 @@ module_init(nf_defrag_init);
 module_exit(nf_defrag_fini);
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IPv6 defragmentation support");
index 58ccdb08c0fd1858f4fc5aa326d9efc5df935cba..d45bc54b7ea55d03ffbea6de9ef3db8c098c217f 100644 (file)
@@ -413,3 +413,4 @@ void nf_send_unreach6(struct net *net, struct sk_buff *skb_in,
 EXPORT_SYMBOL_GPL(nf_send_unreach6);
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IPv6 packet rejection core");
index 500f6ed3b8cf9370ece430662f498944c685a19e..12eedc6ca2ccae602985e5b96402b13f258f9589 100644 (file)
@@ -181,14 +181,15 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
        treq = tcp_rsk(req);
        treq->tfo_listener = false;
 
-       if (security_inet_conn_request(sk, skb, req))
-               goto out_free;
-
        req->mss = mss;
        ireq->ir_rmt_port = th->source;
        ireq->ir_num = ntohs(th->dest);
        ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
        ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
+
+       if (security_inet_conn_request(sk, skb, req))
+               goto out_free;
+
        if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
            np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
            np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
index dd1d8ffd5f5941a423a4ea43c1867709a101d0a0..65d1f6755f98f0cc133083fd486ca5952df483b2 100644 (file)
@@ -1946,4 +1946,5 @@ module_init(kcm_init);
 module_exit(kcm_exit);
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("KCM (Kernel Connection Multiplexor) sockets");
 MODULE_ALIAS_NETPROTO(PF_KCM);
index 7cac441862e2163b5629c0fdd42c1bbfdfd7dbb9..51bccfb00a9cd9f16318bbe9a8cc3fe2460912b1 100644 (file)
@@ -127,8 +127,14 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
        skb->transport_header += llc_len;
        skb_pull(skb, llc_len);
        if (skb->protocol == htons(ETH_P_802_2)) {
-               __be16 pdulen = eth_hdr(skb)->h_proto;
-               s32 data_size = ntohs(pdulen) - llc_len;
+               __be16 pdulen;
+               s32 data_size;
+
+               if (skb->mac_len < ETH_HLEN)
+                       return 0;
+
+               pdulen = eth_hdr(skb)->h_proto;
+               data_size = ntohs(pdulen) - llc_len;
 
                if (data_size < 0 ||
                    !pskb_may_pull(skb, data_size))
index 79d1cef8f15a923c166fb000656291d5d5796779..06fb8e6944b06aecad73739bc9337e551cf73a90 100644 (file)
@@ -153,6 +153,9 @@ int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb)
        int rc = 1;
        u32 data_size;
 
+       if (skb->mac_len < ETH_HLEN)
+               return 1;
+
        llc_pdu_decode_sa(skb, mac_da);
        llc_pdu_decode_da(skb, mac_sa);
        llc_pdu_decode_ssap(skb, &dsap);
index 05c6ae0920534b6f7205d1008bddabd15039ce64..f5065429251095a116b9abbd5f0ba1df4d881142 100644 (file)
@@ -76,6 +76,9 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb)
        u32 data_size;
        struct sk_buff *nskb;
 
+       if (skb->mac_len < ETH_HLEN)
+               goto out;
+
        /* The test request command is type U (llc_len = 3) */
        data_size = ntohs(eth_hdr(skb)->h_proto) - 3;
        nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size);
index 3230506ae3ffd8c120f0c96b07d78a7b58a4aaac..a2c16b5010877c41a8e9fed0c6d7f20c6b37ca43 100644 (file)
@@ -2450,3 +2450,4 @@ static void __exit ip_vs_cleanup(void)
 module_init(ip_vs_init);
 module_exit(ip_vs_cleanup);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IP Virtual Server");
index 5e6ec32aff2b141b858616743787d399cf9d0df6..75f4c231f4a0277b00bb3324559d51ea9222959b 100644 (file)
@@ -270,3 +270,4 @@ static void __exit ip_vs_dh_cleanup(void)
 module_init(ip_vs_dh_init);
 module_exit(ip_vs_dh_cleanup);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs destination hashing scheduler");
index b846cc385279e11fbfccca4e5131ec62f80852c6..ab117e5bc34ead2a9bae1cdd6660ebb9d318aa29 100644 (file)
@@ -72,3 +72,4 @@ static void __exit ip_vs_fo_cleanup(void)
 module_init(ip_vs_fo_init);
 module_exit(ip_vs_fo_cleanup);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs weighted failover scheduler");
index ef1f45e43b6308edb33cc616f9d0994787d7e726..f53899d12416229f77cf8beb4017f0e233178773 100644 (file)
@@ -635,3 +635,4 @@ static void __exit ip_vs_ftp_exit(void)
 module_init(ip_vs_ftp_init);
 module_exit(ip_vs_ftp_exit);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs ftp helper");
index cf78ba4ce5ffd28255fde30276b736ae5925ce18..8ceec7a2fa8f30e8f4960b2159976cd7b1d908bd 100644 (file)
@@ -632,3 +632,4 @@ static void __exit ip_vs_lblc_cleanup(void)
 module_init(ip_vs_lblc_init);
 module_exit(ip_vs_lblc_cleanup);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs locality-based least-connection scheduler");
index 9eddf118b40ec00c073687d6e36ecddc92244f55..0fb64707213f83ce99c598044a78c84bdd6cf091 100644 (file)
@@ -817,3 +817,4 @@ static void __exit ip_vs_lblcr_cleanup(void)
 module_init(ip_vs_lblcr_init);
 module_exit(ip_vs_lblcr_cleanup);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs locality-based least-connection with replication scheduler");
index 9d34d81fc6f1c054d74c1f149d03689b249d2fa0..c2764505e380f57093370855671a44aab70c9ffb 100644 (file)
@@ -86,3 +86,4 @@ static void __exit ip_vs_lc_cleanup(void)
 module_init(ip_vs_lc_init);
 module_exit(ip_vs_lc_cleanup);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs least connection scheduler");
index f56862a8751888044c0aabd3993e85d8311fe29e..ed7f5c889b417b02dcf33b6aa0ff1081b940ec64 100644 (file)
@@ -136,3 +136,4 @@ static void __exit ip_vs_nq_cleanup(void)
 module_init(ip_vs_nq_init);
 module_exit(ip_vs_nq_cleanup);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs never queue scheduler");
index c03066fdd5ca69a33dbef556dd232cbb5ed7f89c..c7708b809700de1a41bb5b3a9a79aad6643165aa 100644 (file)
@@ -79,3 +79,4 @@ static void __exit ip_vs_ovf_cleanup(void)
 module_init(ip_vs_ovf_init);
 module_exit(ip_vs_ovf_cleanup);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs overflow connection scheduler");
index 0ac6705a61d3b09f43977a78c3c42e5e33ea2a10..e4ce1d9a63f913e20956be3b96e146f86ec3c3aa 100644 (file)
@@ -185,3 +185,4 @@ static void __exit ip_vs_sip_cleanup(void)
 module_init(ip_vs_sip_init);
 module_exit(ip_vs_sip_cleanup);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs sip helper");
index 38495c6f6c7cc83becdcc311bf4b51868a6198c2..6baa34dff9f045ec25c441ed5c236bbed0e908ae 100644 (file)
@@ -122,4 +122,5 @@ static void __exit ip_vs_rr_cleanup(void)
 
 module_init(ip_vs_rr_init);
 module_exit(ip_vs_rr_cleanup);
+MODULE_DESCRIPTION("ipvs round-robin scheduler");
 MODULE_LICENSE("GPL");
index 7663288e535873dc444980859cf4a258c7142172..a46f99a5661848dc1103db68c494d22595428f26 100644 (file)
@@ -137,3 +137,4 @@ static void __exit ip_vs_sed_cleanup(void)
 module_init(ip_vs_sed_init);
 module_exit(ip_vs_sed_cleanup);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs shortest expected delay scheduler");
index c2028e412092986fc4b206f2c163a43166f0db82..92e77d7a6b50efe8cded589066c061b4a75d5eef 100644 (file)
@@ -376,3 +376,4 @@ static void __exit ip_vs_sh_cleanup(void)
 module_init(ip_vs_sh_init);
 module_exit(ip_vs_sh_cleanup);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs source hashing scheduler");
index 3308e4cc740aea54713fead99fde05224973d3ef..8d5419edde50720949b7ad01cda527a72399413e 100644 (file)
@@ -137,3 +137,4 @@ static void __exit ip_vs_twos_cleanup(void)
 module_init(ip_vs_twos_init);
 module_exit(ip_vs_twos_cleanup);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs power of twos choice scheduler");
index 09f584b564a0e174d9f7d6eca2483614355b356a..9fa500927c0a07bb92d53cbdaaf6bbd063c6a9a6 100644 (file)
@@ -109,3 +109,4 @@ static void __exit ip_vs_wlc_cleanup(void)
 module_init(ip_vs_wlc_init);
 module_exit(ip_vs_wlc_cleanup);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs weighted least connection scheduler");
index 1bc7a0789d85adcd2bef1d9d567f7de888243db8..85ce0d04afac419730b6fed9e8bd30745baee8e9 100644 (file)
@@ -263,3 +263,4 @@ static void __exit ip_vs_wrr_cleanup(void)
 module_init(ip_vs_wrr_init);
 module_exit(ip_vs_wrr_cleanup);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ipvs weighted round-robin scheduler");
index b21799d468d2815413cc1334cb6c5e5df7fe231d..475358ec821296c6b3fc771e96ba4d00b8da78f1 100644 (file)
@@ -230,9 +230,7 @@ static int _nf_conntrack_btf_struct_access(struct bpf_verifier_log *log,
        return 0;
 }
 
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
-                 "Global functions as their definitions will be in nf_conntrack BTF");
+__bpf_kfunc_start_defs();
 
 /* bpf_xdp_ct_alloc - Allocate a new CT entry
  *
@@ -467,7 +465,7 @@ __bpf_kfunc int bpf_ct_change_status(struct nf_conn *nfct, u32 status)
        return nf_ct_change_status_common(nfct, status);
 }
 
-__diag_pop()
+__bpf_kfunc_end_defs();
 
 BTF_SET8_START(nf_ct_kfunc_set)
 BTF_ID_FLAGS(func, bpf_xdp_ct_alloc, KF_ACQUIRE | KF_RET_NULL)
index 9fb9b80312989beec91d80bdea6ddc344c2b3fa8..cfa0fe0356de67e479b30723aefb3192f391cc9f 100644 (file)
@@ -82,3 +82,4 @@ out:
 EXPORT_SYMBOL_GPL(nf_conntrack_broadcast_help);
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Broadcast connection tracking helper");
index 334db22199c1d640feaf3f9eef883a21fdc75277..fb0ae15e96dfe4b73ca5f587838842dec42d8e79 100644 (file)
@@ -57,6 +57,7 @@
 #include "nf_internals.h"
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("List and change connection tracking table");
 
 struct ctnetlink_list_dump_ctx {
        struct nf_conn *last;
index c928ff63b10e4755b1fe19c4aad04e2f869a318f..f36727ed91e1a41990620e321c98596bbd22c56f 100644 (file)
@@ -699,3 +699,4 @@ MODULE_ALIAS("ip_conntrack");
 MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET));
 MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET6));
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("IPv4 and IPv6 connection tracking");
index 141ee7783223dad0e8511565c233873223c3e2e2..6e3b2f58855fc05d31842a6ebd836d72344ef031 100644 (file)
@@ -12,9 +12,7 @@
 #include <net/netfilter/nf_conntrack_core.h>
 #include <net/netfilter/nf_nat.h>
 
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
-                 "Global functions as their definitions will be in nf_nat BTF");
+__bpf_kfunc_start_defs();
 
 /* bpf_ct_set_nat_info - Set source or destination nat address
  *
@@ -54,7 +52,7 @@ __bpf_kfunc int bpf_ct_set_nat_info(struct nf_conn___init *nfct,
        return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0;
 }
 
-__diag_pop()
+__bpf_kfunc_end_defs();
 
 BTF_SET8_START(nf_nat_kfunc_set)
 BTF_ID_FLAGS(func, bpf_ct_set_nat_info, KF_TRUSTED_ARGS)
index c4e0516a8dfab43e7eb1a9bf6b7b7ae2bd00d18b..c3d7ecbc777ce08525bedee77d637c18682d816c 100644 (file)
@@ -1263,6 +1263,7 @@ static void __exit nf_nat_cleanup(void)
 }
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Network address translation core");
 
 module_init(nf_nat_init);
 module_exit(nf_nat_cleanup);
index 6616ba5d0b0490fe9588e054ff45c6b9ebfc35e5..5b37487d9d11fa30e064ea05aae2f080ec7aa483 100644 (file)
@@ -80,6 +80,26 @@ EXPORT_SYMBOL_GPL(nf_nat_redirect_ipv4);
 
 static const struct in6_addr loopback_addr = IN6ADDR_LOOPBACK_INIT;
 
+static bool nf_nat_redirect_ipv6_usable(const struct inet6_ifaddr *ifa, unsigned int scope)
+{
+       unsigned int ifa_addr_type = ipv6_addr_type(&ifa->addr);
+
+       if (ifa_addr_type & IPV6_ADDR_MAPPED)
+               return false;
+
+       if ((ifa->flags & IFA_F_TENTATIVE) && (!(ifa->flags & IFA_F_OPTIMISTIC)))
+               return false;
+
+       if (scope) {
+               unsigned int ifa_scope = ifa_addr_type & IPV6_ADDR_SCOPE_MASK;
+
+               if (!(scope & ifa_scope))
+                       return false;
+       }
+
+       return true;
+}
+
 unsigned int
 nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
                     unsigned int hooknum)
@@ -89,14 +109,19 @@ nf_nat_redirect_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range,
        if (hooknum == NF_INET_LOCAL_OUT) {
                newdst.in6 = loopback_addr;
        } else {
+               unsigned int scope = ipv6_addr_scope(&ipv6_hdr(skb)->daddr);
                struct inet6_dev *idev;
-               struct inet6_ifaddr *ifa;
                bool addr = false;
 
                idev = __in6_dev_get(skb->dev);
                if (idev != NULL) {
+                       const struct inet6_ifaddr *ifa;
+
                        read_lock_bh(&idev->lock);
                        list_for_each_entry(ifa, &idev->addr_list, if_list) {
+                               if (!nf_nat_redirect_ipv6_usable(ifa, scope))
+                                       continue;
+
                                newdst.in6 = ifa->addr;
                                addr = true;
                                break;
index 3c1fd8283bf4055b3fca6dd3255843c7a80eca07..a761ee6796f6fa448ba6ce8dbc50b34aaebd8e6b 100644 (file)
@@ -6520,6 +6520,12 @@ static int nft_setelem_deactivate(const struct net *net,
        return ret;
 }
 
+static void nft_setelem_catchall_destroy(struct nft_set_elem_catchall *catchall)
+{
+       list_del_rcu(&catchall->list);
+       kfree_rcu(catchall, rcu);
+}
+
 static void nft_setelem_catchall_remove(const struct net *net,
                                        const struct nft_set *set,
                                        struct nft_elem_priv *elem_priv)
@@ -6528,8 +6534,7 @@ static void nft_setelem_catchall_remove(const struct net *net,
 
        list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
                if (catchall->elem == elem_priv) {
-                       list_del_rcu(&catchall->list);
-                       kfree_rcu(catchall, rcu);
+                       nft_setelem_catchall_destroy(catchall);
                        break;
                }
        }
@@ -9678,11 +9683,12 @@ static struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
                                                  unsigned int gc_seq,
                                                  bool sync)
 {
-       struct nft_set_elem_catchall *catchall;
+       struct nft_set_elem_catchall *catchall, *next;
        const struct nft_set *set = gc->set;
+       struct nft_elem_priv *elem_priv;
        struct nft_set_ext *ext;
 
-       list_for_each_entry_rcu(catchall, &set->catchall_list, list) {
+       list_for_each_entry_safe(catchall, next, &set->catchall_list, list) {
                ext = nft_set_elem_ext(set, catchall->elem);
 
                if (!nft_set_elem_expired(ext))
@@ -9700,7 +9706,13 @@ dead_elem:
                if (!gc)
                        return NULL;
 
-               nft_trans_gc_elem_add(gc, catchall->elem);
+               elem_priv = catchall->elem;
+               if (sync) {
+                       nft_setelem_data_deactivate(gc->net, gc->set, elem_priv);
+                       nft_setelem_catchall_destroy(catchall);
+               }
+
+               nft_trans_gc_elem_add(gc, elem_priv);
        }
 
        return gc;
@@ -11386,4 +11398,5 @@ module_exit(nf_tables_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_DESCRIPTION("Framework for packet filtering and classification");
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFTABLES);
index 50723ba0828905737c71dd842be39e28e039218e..c0fc431991e88da49c53a47b628e1e97856d0760 100644 (file)
@@ -447,4 +447,5 @@ module_init(nfnl_osf_init);
 module_exit(nfnl_osf_fini);
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Passive OS fingerprint matching");
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_OSF);
index 98e4946100c5488549be5cb4f02a373f3d556722..40e230d8b712e68bc49ca0ab555de2ac213be3e9 100644 (file)
@@ -137,6 +137,7 @@ module_init(nft_chain_nat_init);
 module_exit(nft_chain_nat_exit);
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("nftables network address translation support");
 #ifdef CONFIG_NF_TABLES_IPV4
 MODULE_ALIAS_NFT_CHAIN(AF_INET, "nat");
 #endif
index 04b51f285332175619ca74a30152d50475df44e4..1bfe258018da45fb8cf9f95fe0d3b871e5023f31 100644 (file)
@@ -204,4 +204,5 @@ bool nft_fib_reduce(struct nft_regs_track *track,
 EXPORT_SYMBOL_GPL(nft_fib_reduce);
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Query routing table from nftables");
 MODULE_AUTHOR("Florian Westphal <fw@strlen.de>");
index a5268e6dd32f1bbb1aa72525afbea5793544aec4..358e742afad703a99aa49acc3b31752f0017d9c5 100644 (file)
@@ -270,4 +270,5 @@ module_exit(nft_fwd_netdev_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("nftables netdev packet forwarding support");
 MODULE_ALIAS_NFT_AF_EXPR(5, "fwd");
index 7ddb9a78e3fc888d3ee4c214870ae8c84171efe7..ef93e0d3bee04c7d984af9b9f45372265642b5ac 100644 (file)
@@ -561,7 +561,7 @@ recent_mt_proc_write(struct file *file, const char __user *input,
 {
        struct recent_table *t = pde_data(file_inode(file));
        struct recent_entry *e;
-       char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")];
+       char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:255.255.255.255")];
        const char *c = buf;
        union nf_inet_addr addr = {};
        u_int16_t family;
index 9c4f231be27572f9d889248c33a04868f22e44de..1eeff9422856eb9006e25b21ef188280b4cff7f6 100644 (file)
@@ -257,5 +257,6 @@ static void __exit netlink_diag_exit(void)
 
 module_init(netlink_diag_init);
 module_exit(netlink_diag_exit);
+MODULE_DESCRIPTION("Netlink-based socket monitoring/diagnostic interface (sock_diag)");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 16 /* AF_NETLINK */);
index 0b9a785dea45951dc625d5c5a05c5610106ea915..3019a4406ca4f72be806ff922e377ea7609c3934 100644 (file)
@@ -985,7 +985,7 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
                if (err)
                        return err;
 
-               nf_conn_act_ct_ext_add(ct);
+               nf_conn_act_ct_ext_add(skb, ct, ctinfo);
        } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
                   labels_nonzero(&info->labels.mask)) {
                err = ovs_ct_set_labels(ct, key, &info->labels.value,
index ac85d4644a3c3a047066129e69c40a626557f1c7..df8a271948a1c157a8b2253ed521c997fab05a50 100644 (file)
@@ -212,7 +212,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
                conn->idle_timestamp = jiffies;
                if (atomic_dec_and_test(&conn->active))
                        rxrpc_set_service_reap_timer(conn->rxnet,
-                                                    jiffies + rxrpc_connection_expiry);
+                                                    jiffies + rxrpc_connection_expiry * HZ);
        }
 
        rxrpc_put_call(call, rxrpc_call_put_io_thread);
index 7d910aee4f8cb20319a42c5497e1aed5456afad6..c553a30e9c8386384cc5f038c3ecb6570349c7b7 100644 (file)
@@ -87,7 +87,7 @@ static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
        struct rxrpc_local *local =
                container_of(timer, struct rxrpc_local, client_conn_reap_timer);
 
-       if (local->kill_all_client_conns &&
+       if (!local->kill_all_client_conns &&
            test_and_set_bit(RXRPC_CLIENT_CONN_REAP_TIMER, &local->client_conn_flags))
                rxrpc_wake_up_io_thread(local);
 }
index 9d3f26bf0440d9d2296e73ad39157e9122cc0b1d..c39252d61ebbb73f63414dc72f1a7bc8264712fc 100644 (file)
@@ -1098,7 +1098,7 @@ repeat:
                        }
                } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
                        if (unlikely(!rcu_access_pointer(a->goto_chain))) {
-                               net_warn_ratelimited("can't go to NULL chain!\n");
+                               tcf_set_drop_reason(res, SKB_DROP_REASON_TC_ERROR);
                                return TC_ACT_SHOT;
                        }
                        tcf_action_goto_chain_exec(a, res);
index 9583645e86c280d33e6681d1b770540a89497443..0db0ecf1d11038a49e487e36b2eb33a028ae8727 100644 (file)
@@ -376,6 +376,17 @@ static void tcf_ct_flow_tc_ifidx(struct flow_offload *entry,
        entry->tuplehash[dir].tuple.tc.iifidx = act_ct_ext->ifindex[dir];
 }
 
+static void tcf_ct_flow_ct_ext_ifidx_update(struct flow_offload *entry)
+{
+       struct nf_conn_act_ct_ext *act_ct_ext;
+
+       act_ct_ext = nf_conn_act_ct_ext_find(entry->ct);
+       if (act_ct_ext) {
+               tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_ORIGINAL);
+               tcf_ct_flow_tc_ifidx(entry, act_ct_ext, FLOW_OFFLOAD_DIR_REPLY);
+       }
+}
+
 static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
                                  struct nf_conn *ct,
                                  bool tcp, bool bidirectional)
@@ -671,6 +682,8 @@ static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
        else
                ctinfo = IP_CT_ESTABLISHED_REPLY;
 
+       nf_conn_act_ct_ext_fill(skb, ct, ctinfo);
+       tcf_ct_flow_ct_ext_ifidx_update(flow);
        flow_offload_refresh(nf_ft, flow, force_refresh);
        if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
                /* Process this flow in SW to allow promoting to ASSURED */
@@ -1034,7 +1047,7 @@ do_nat:
                tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
 
                if (!nf_ct_is_confirmed(ct))
-                       nf_conn_act_ct_ext_add(ct);
+                       nf_conn_act_ct_ext_add(skb, ct, ctinfo);
 
                /* This will take care of sending queued events
                 * even if the connection is already confirmed.
index c9a811f4c7eeedcf33627926bffd78490721e1f6..393b78729216cc3c5b1eeffebcfd9cebd8cda448 100644 (file)
@@ -677,4 +677,5 @@ static void __exit gate_cleanup_module(void)
 
 module_init(gate_init_module);
 module_exit(gate_cleanup_module);
+MODULE_DESCRIPTION("TC gate action");
 MODULE_LICENSE("GPL v2");
index 1daeb2182b70e2dc3915165f26e0a237a02d4edd..1976bd1639863f394040528a6cddbddf8b706d2d 100644 (file)
@@ -1658,6 +1658,7 @@ static inline int __tcf_classify(struct sk_buff *skb,
                                 int act_index,
                                 u32 *last_executed_chain)
 {
+       u32 orig_reason = res->drop_reason;
 #ifdef CONFIG_NET_CLS_ACT
        const int max_reclassify_loop = 16;
        const struct tcf_proto *first_tp;
@@ -1712,8 +1713,14 @@ reclassify:
                        goto reset;
                }
 #endif
-               if (err >= 0)
+               if (err >= 0) {
+                       /* Policy drop or drop reason is over-written by
+                        * classifiers with a bogus value(0) */
+                       if (err == TC_ACT_SHOT &&
+                           res->drop_reason == SKB_NOT_DROPPED_YET)
+                               tcf_set_drop_reason(res, orig_reason);
                        return err;
+               }
        }
 
        if (unlikely(n)) {
index 1b92c33b5f819365d941e5361881044af6872523..a1f56931330cabdc197b741ab4999e8ffff0fe0d 100644 (file)
@@ -341,4 +341,5 @@ static void __exit exit_basic(void)
 
 module_init(init_basic)
 module_exit(exit_basic)
+MODULE_DESCRIPTION("TC basic classifier");
 MODULE_LICENSE("GPL");
index bd9322d71910bfa414e96d15f9bb7cb1f29c2d97..7ee8dbf49ed0d39208290e48f519e031ef8497db 100644 (file)
@@ -222,4 +222,5 @@ static void __exit exit_cgroup_cls(void)
 
 module_init(init_cgroup_cls);
 module_exit(exit_cgroup_cls);
+MODULE_DESCRIPTION("TC cgroup classifier");
 MODULE_LICENSE("GPL");
index c49d6af0e04807275229e817759f88e5272364a8..afc534ee0a18671f1cc49a08e3ad021aa024f88e 100644 (file)
@@ -446,4 +446,5 @@ static void __exit exit_fw(void)
 
 module_init(init_fw)
 module_exit(exit_fw)
+MODULE_DESCRIPTION("SKB mark based TC classifier");
 MODULE_LICENSE("GPL");
index 1424bfeaca73ff986813a9440d444fee22c12bc9..12a505db4183233b8ae30a5d03a0c4a5fd363676 100644 (file)
@@ -684,4 +684,5 @@ static void __exit exit_route4(void)
 
 module_init(init_route4)
 module_exit(exit_route4)
+MODULE_DESCRIPTION("Routing table realm based TC classifier");
 MODULE_LICENSE("GPL");
index 6663e971a13e76e555ddb3cd9b0ea2e75be11c03..d5bdfd4a76558283b0895735f6ca47572b31bf68 100644 (file)
@@ -1489,4 +1489,5 @@ static void __exit exit_u32(void)
 
 module_init(init_u32)
 module_exit(exit_u32)
+MODULE_DESCRIPTION("Universal 32bit based TC Classifier");
 MODULE_LICENSE("GPL");
index cac870eb7897305e952398108a7cb1edce71a27a..9a0b85190a2c65772b59fbfc04b155c988b11076 100644 (file)
@@ -574,3 +574,4 @@ static void __exit cbs_module_exit(void)
 module_init(cbs_module_init)
 module_exit(cbs_module_exit)
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Credit Based shaper");
index 19c851125901f630f9c367e2f711ceeb067aa00a..ae1da08e268f1a5396f3cb204d768b5add6d8054 100644 (file)
@@ -513,3 +513,4 @@ module_init(choke_module_init)
 module_exit(choke_module_exit)
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Choose and keep responsive flows scheduler");
index 19901e77cd3b7f1aab28da8b2469de310b24d920..097740a9afeafc421d6c05028dbb56a9ba878bc6 100644 (file)
@@ -495,3 +495,4 @@ static void __exit drr_exit(void)
 module_init(drr_init);
 module_exit(drr_exit);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Deficit Round Robin scheduler");
index 61d1f0e32cf3561bdce63a2911ab15bd84bd3359..4808159a5466085eb4bdda228b49e085eb993d75 100644 (file)
@@ -513,3 +513,4 @@ static void __exit etf_module_exit(void)
 module_init(etf_module_init)
 module_exit(etf_module_exit)
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Earliest TxTime First (ETF) qdisc");
index b10efeaf0629d2ab8a5bbe2b080c3ef84d52717f..f7c88495946b0bb6a0c51192673cfbe91898728f 100644 (file)
@@ -826,3 +826,4 @@ static void __exit ets_exit(void)
 module_init(ets_init);
 module_exit(ets_exit);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Enhanced Transmission Selection(ETS) scheduler");
index e1040421b79797fefaa26b8d7d3f44b91896e1de..450f5c67ac4956e21b544dfd81f886714171eced 100644 (file)
@@ -269,3 +269,4 @@ struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
        return q ? : ERR_PTR(err);
 }
 EXPORT_SYMBOL(fifo_create_dflt);
+MODULE_DESCRIPTION("Single queue packet and byte based First In First Out(P/BFIFO) scheduler");
index 0fd18c344ab5ae6d53e12fc764c0506a2979b4c8..3a31c47fea9bd97d815f2624d926bf7be62387cd 100644 (file)
@@ -919,14 +919,8 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
        [TCA_FQ_TIMER_SLACK]            = { .type = NLA_U32 },
        [TCA_FQ_HORIZON]                = { .type = NLA_U32 },
        [TCA_FQ_HORIZON_DROP]           = { .type = NLA_U8 },
-       [TCA_FQ_PRIOMAP]                = {
-                       .type = NLA_BINARY,
-                       .len = sizeof(struct tc_prio_qopt),
-               },
-       [TCA_FQ_WEIGHTS]                = {
-                       .type = NLA_BINARY,
-                       .len = FQ_BANDS * sizeof(s32),
-               },
+       [TCA_FQ_PRIOMAP]                = NLA_POLICY_EXACT_LEN(sizeof(struct tc_prio_qopt)),
+       [TCA_FQ_WEIGHTS]                = NLA_POLICY_EXACT_LEN(FQ_BANDS * sizeof(s32)),
 };
 
 /* compress a u8 array with all elems <= 3 to an array of 2-bit fields */
index 872d127c9db42a6eb4d67879f25c665ae6e7f0f1..8c61eb3dc943195f7a3fb6265a9c5cf87aeddfcf 100644 (file)
@@ -945,3 +945,4 @@ module_init(gred_module_init)
 module_exit(gred_module_exit)
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Generic Random Early Detection qdisc");
index 880c5f16b29ccf322f2308450aac4c6f130a2472..16c45da4036a7e3e6e07d8ba7f4ad2bb53c8a072 100644 (file)
@@ -1693,5 +1693,6 @@ hfsc_cleanup(void)
 }
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Hierarchical Fair Service Curve scheduler");
 module_init(hfsc_init);
 module_exit(hfsc_cleanup);
index 0d947414e61611f20f5983e60aafbaa9e4da2598..7349233eaa9b652a96e04f45674a66f3f4bc0f47 100644 (file)
@@ -2179,3 +2179,4 @@ static void __exit htb_module_exit(void)
 module_init(htb_module_init)
 module_exit(htb_module_exit)
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Hierarchical Token Bucket scheduler");
index a463a63192c3c73fd1d346b91c3b0f34cfd1a55b..5fa9eaa79bfc9e5ac86287c96e8fdd1aa5fbf0c4 100644 (file)
@@ -370,3 +370,4 @@ module_exit(ingress_module_exit);
 
 MODULE_ALIAS("sch_clsact");
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Ingress and clsact based ingress and egress qdiscs");
index 793009f445c03bc024a76350d4f54294307ef2e6..43e53ee00a56a755f37aa20e4837542be5bcf8bf 100644 (file)
@@ -789,3 +789,4 @@ module_init(mqprio_module_init);
 module_exit(mqprio_module_exit);
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Classful multiqueue prio qdisc");
index 83b3793c4012e7b1740ab964a1b8a94514c8ccee..b3a5572c167b719f96e2947fa4267f1f93077814 100644 (file)
@@ -129,3 +129,4 @@ void mqprio_fp_to_offload(u32 fp[TC_QOPT_MAX_QUEUE],
 EXPORT_SYMBOL_GPL(mqprio_fp_to_offload);
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Shared mqprio qdisc code currently between taprio and mqprio");
index 75c9c860182b406e06e747455b1b7084fbab415d..d66d5f0ec0805331c4b814e88910965a98bf2d23 100644 (file)
@@ -410,3 +410,4 @@ module_init(multiq_module_init)
 module_exit(multiq_module_exit)
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Multi queue to hardware queue mapping qdisc");
index 6ba2dc191ed941a3acfacb53cc4292388b076704..fa678eb8852841776cf34c33088bdc3bf1a010ad 100644 (file)
@@ -1307,3 +1307,4 @@ static void __exit netem_module_exit(void)
 module_init(netem_module_init)
 module_exit(netem_module_exit)
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Network characteristics emulator qdisc");
index 35f49edf63dbf2cbdb7010871d7e430cd9d6ed97..992f0c8d79886cbf9b944d626fb5c30c28fecebf 100644 (file)
@@ -226,3 +226,4 @@ static void __exit plug_module_exit(void)
 module_init(plug_module_init)
 module_exit(plug_module_exit)
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Qdisc to plug and unplug traffic via netlink control");
index fdc5ef52c3ee92c40232a5af7c68ce920b61b7e6..8ecdd3ef6f8ea05eea06fba5c7a9233fb09f2cb9 100644 (file)
@@ -433,3 +433,4 @@ module_init(prio_module_init)
 module_exit(prio_module_exit)
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Simple 3-band priority qdisc");
index 28315166fe8e1f15775eabd9ea09bd8fd706aa34..48a604c320c76d043c2d98b29143e80dc2242fb8 100644 (file)
@@ -1535,3 +1535,4 @@ static void __exit qfq_exit(void)
 module_init(qfq_init);
 module_exit(qfq_exit);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Quick Fair Queueing Plus qdisc");
index 16277b6a0238dabc18ef78768ad4495d23999025..607b6c8b3a9bf234200be75c726d8899aa6530fa 100644 (file)
@@ -563,3 +563,4 @@ module_init(red_module_init)
 module_exit(red_module_exit)
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Random Early Detection qdisc");
index 66dcb18638fea440f6f5bce2ca45b6179b060408..eb77558fa367eba7d26dd913f76e59762440840c 100644 (file)
@@ -937,3 +937,4 @@ static void __exit sfq_module_exit(void)
 module_init(sfq_module_init)
 module_exit(sfq_module_exit)
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Stochastic Fairness qdisc");
index 5df2dacb7b1abaecdeaa5df5f38d98dc5e2f920d..28beb11762d8a37565136ad873fab526ff80580e 100644 (file)
@@ -307,3 +307,4 @@ module_init(skbprio_module_init)
 module_exit(skbprio_module_exit)
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SKB priority based scheduling qdisc");
index 2e1949de4171add132da0201191e24bc49d741a2..31a8252bd09c9111090f0147df6deb0ad81577af 100644 (file)
@@ -2572,3 +2572,4 @@ static void __exit taprio_module_exit(void)
 module_init(taprio_module_init);
 module_exit(taprio_module_exit);
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Time Aware Priority qdisc");
index 17d2d00ddb182d1318a45cd30ed9e80e8eafb85c..dd6b1a723bf7275ffcb195d4265d243e3e6de214 100644 (file)
@@ -621,3 +621,4 @@ static void __exit tbf_module_exit(void)
 module_init(tbf_module_init)
 module_exit(tbf_module_exit)
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Token Bucket Filter qdisc");
index 7721239c185fb0d54ab36300f9138dbc98521ace..59304611dc0050e525de5f45b2a3b8628b684ff3 100644 (file)
@@ -523,3 +523,4 @@ module_init(teql_init);
 module_exit(teql_exit);
 
 MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("True (or trivial) link equalizer qdisc");
index abd2667734d4cb32ffacd52c0f1b511ad31fc523..da97f946b79b00c82c8dbd496a2c4304dcd164b7 100644 (file)
@@ -275,7 +275,7 @@ static int __smc_release(struct smc_sock *smc)
 
        if (!smc->use_fallback) {
                rc = smc_close_active(smc);
-               sock_set_flag(sk, SOCK_DEAD);
+               smc_sock_set_flag(sk, SOCK_DEAD);
                sk->sk_shutdown |= SHUTDOWN_MASK;
        } else {
                if (sk->sk_state != SMC_CLOSED) {
@@ -1743,7 +1743,7 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
                if (new_clcsock)
                        sock_release(new_clcsock);
                new_sk->sk_state = SMC_CLOSED;
-               sock_set_flag(new_sk, SOCK_DEAD);
+               smc_sock_set_flag(new_sk, SOCK_DEAD);
                sock_put(new_sk); /* final */
                *new_smc = NULL;
                goto out;
index 24745fde4ac2641d307ada3d7f14b61e44f94694..e377980b84145dea53844b0ba080fd2fb25f80c0 100644 (file)
@@ -377,4 +377,9 @@ int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb);
 int smc_nl_enable_hs_limitation(struct sk_buff *skb, struct genl_info *info);
 int smc_nl_disable_hs_limitation(struct sk_buff *skb, struct genl_info *info);
 
+static inline void smc_sock_set_flag(struct sock *sk, enum sock_flags flag)
+{
+       set_bit(flag, &sk->sk_flags);
+}
+
 #endif /* __SMC_H */
index 89105e95b4523f0a0d197e7167f27914430f482e..3c06625ceb200e359c1b4e0d04e4705e0fca51d8 100644 (file)
@@ -28,13 +28,15 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
 {
        struct smc_cdc_tx_pend *cdcpend = (struct smc_cdc_tx_pend *)pnd_snd;
        struct smc_connection *conn = cdcpend->conn;
+       struct smc_buf_desc *sndbuf_desc;
        struct smc_sock *smc;
        int diff;
 
+       sndbuf_desc = conn->sndbuf_desc;
        smc = container_of(conn, struct smc_sock, conn);
        bh_lock_sock(&smc->sk);
-       if (!wc_status) {
-               diff = smc_curs_diff(cdcpend->conn->sndbuf_desc->len,
+       if (!wc_status && sndbuf_desc) {
+               diff = smc_curs_diff(sndbuf_desc->len,
                                     &cdcpend->conn->tx_curs_fin,
                                     &cdcpend->cursor);
                /* sndbuf_space is decreased in smc_sendmsg */
@@ -114,9 +116,6 @@ int smc_cdc_msg_send(struct smc_connection *conn,
        union smc_host_cursor cfed;
        int rc;
 
-       if (unlikely(!READ_ONCE(conn->sndbuf_desc)))
-               return -ENOBUFS;
-
        smc_cdc_add_pending_send(conn, pend);
 
        conn->tx_cdc_seq++;
@@ -385,7 +384,7 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
                smc->sk.sk_shutdown |= RCV_SHUTDOWN;
                if (smc->clcsock && smc->clcsock->sk)
                        smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
-               sock_set_flag(&smc->sk, SOCK_DONE);
+               smc_sock_set_flag(&smc->sk, SOCK_DONE);
                sock_hold(&smc->sk); /* sock_put in close_work */
                if (!queue_work(smc_close_wq, &conn->close_work))
                        sock_put(&smc->sk);
index dbdf03e8aa5b55f29d71e9f7c28169cac358740f..10219f55aad14d795dabe4331458bd1b73c22789 100644 (file)
@@ -116,7 +116,8 @@ static void smc_close_cancel_work(struct smc_sock *smc)
        struct sock *sk = &smc->sk;
 
        release_sock(sk);
-       cancel_work_sync(&smc->conn.close_work);
+       if (cancel_work_sync(&smc->conn.close_work))
+               sock_put(sk);
        cancel_delayed_work_sync(&smc->conn.tx_work);
        lock_sock(sk);
 }
@@ -173,7 +174,7 @@ void smc_close_active_abort(struct smc_sock *smc)
                break;
        }
 
-       sock_set_flag(sk, SOCK_DEAD);
+       smc_sock_set_flag(sk, SOCK_DEAD);
        sk->sk_state_change(sk);
 
        if (release_clcsock) {
index 0d1c4e78fc7f8c7fde79cb57a6c98a12487b24cc..3379c64217a4c1e166f3fc0ef5053c152b2eb80e 100644 (file)
@@ -1685,20 +1685,16 @@ struct file *__sys_socket_file(int family, int type, int protocol)
  *     Therefore, __weak is needed to ensure that the call is still
  *     emitted, by telling the compiler that we don't know what the
  *     function might eventually be.
- *
- *     __diag_* below are needed to dismiss the missing prototype warning.
  */
 
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
-                 "A fmod_ret entry point for BPF programs");
+__bpf_hook_start();
 
 __weak noinline int update_socket_protocol(int family, int type, int protocol)
 {
        return protocol;
 }
 
-__diag_pop();
+__bpf_hook_end();
 
 int __sys_socket(int family, int type, int protocol)
 {
index 9c210273d06b7f51184c08d38c71929a87eef1c2..daa9582ec861aa3a065f81a621e364961c8f4604 100644 (file)
@@ -111,7 +111,8 @@ static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt)
 
        pipefs_sb = rpc_get_sb_net(net);
        if (pipefs_sb) {
-               __rpc_clnt_remove_pipedir(clnt);
+               if (pipefs_sb == clnt->pipefs_sb)
+                       __rpc_clnt_remove_pipedir(clnt);
                rpc_put_sb_net(net);
        }
 }
@@ -151,6 +152,8 @@ rpc_setup_pipedir(struct super_block *pipefs_sb, struct rpc_clnt *clnt)
 {
        struct dentry *dentry;
 
+       clnt->pipefs_sb = pipefs_sb;
+
        if (clnt->cl_program->pipe_dir_name != NULL) {
                dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt);
                if (IS_ERR(dentry))
@@ -2171,6 +2174,7 @@ call_connect_status(struct rpc_task *task)
        task->tk_status = 0;
        switch (status) {
        case -ECONNREFUSED:
+       case -ECONNRESET:
                /* A positive refusal suggests a rebind is needed. */
                if (RPC_IS_SOFTCONN(task))
                        break;
@@ -2179,7 +2183,6 @@ call_connect_status(struct rpc_task *task)
                        goto out_retry;
                }
                fallthrough;
-       case -ECONNRESET:
        case -ECONNABORTED:
        case -ENETDOWN:
        case -ENETUNREACH:
@@ -2220,7 +2223,7 @@ call_connect_status(struct rpc_task *task)
                        }
                        xprt_switch_put(xps);
                        if (!task->tk_xprt)
-                               return;
+                               goto out;
                }
                goto out_retry;
        case -ENOBUFS:
@@ -2235,6 +2238,7 @@ out_next:
 out_retry:
        /* Check for timeouts before looping back to call_bind */
        task->tk_action = call_bind;
+out:
        rpc_check_timeout(task);
 }
 
index 5988a5c5ff3f0c430d622336f2eadd3af95de14a..102c3818bc54d4f9a1fc5f854c3a841289974869 100644 (file)
@@ -769,6 +769,10 @@ void rpcb_getport_async(struct rpc_task *task)
 
        child = rpcb_call_async(rpcb_clnt, map, proc);
        rpc_release_client(rpcb_clnt);
+       if (IS_ERR(child)) {
+               /* rpcb_map_release() has freed the arguments */
+               return;
+       }
 
        xprt->stat.bind_count++;
        rpc_put_task(child);
index ab453ede54f0cd7be590495952a3feb71d92aef1..2364c485540c66fd32ba089bb014628842b468a4 100644 (file)
@@ -283,7 +283,7 @@ out_unlock:
        xprt_clear_locked(xprt);
 out_sleep:
        task->tk_status = -EAGAIN;
-       if  (RPC_IS_SOFT(task))
+       if (RPC_IS_SOFT(task) || RPC_IS_SOFTCONN(task))
                rpc_sleep_on_timeout(&xprt->sending, task, NULL,
                                xprt_request_timeout(req));
        else
@@ -349,7 +349,7 @@ out_unlock:
        xprt_clear_locked(xprt);
 out_sleep:
        task->tk_status = -EAGAIN;
-       if (RPC_IS_SOFT(task))
+       if (RPC_IS_SOFT(task) || RPC_IS_SOFTCONN(task))
                rpc_sleep_on_timeout(&xprt->sending, task, NULL,
                                xprt_request_timeout(req));
        else
index a15bf2ede89bf5f09843022b7b24004b433b4832..58f3dc8d0d71c3b252588a9e6470650a04a7749e 100644 (file)
@@ -1181,6 +1181,7 @@ static void xs_sock_reset_state_flags(struct rpc_xprt *xprt)
 {
        struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
 
+       transport->xprt_err = 0;
        clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
        clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state);
        clear_bit(XPRT_SOCK_WAKE_WRITE, &transport->sock_state);
@@ -2772,18 +2773,13 @@ static void xs_wake_error(struct sock_xprt *transport)
 {
        int sockerr;
 
-       if (!test_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
-               return;
-       mutex_lock(&transport->recv_mutex);
-       if (transport->sock == NULL)
-               goto out;
        if (!test_and_clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
-               goto out;
+               return;
        sockerr = xchg(&transport->xprt_err, 0);
-       if (sockerr < 0)
+       if (sockerr < 0) {
                xprt_wake_pending_tasks(&transport->xprt, sockerr);
-out:
-       mutex_unlock(&transport->recv_mutex);
+               xs_tcp_force_close(&transport->xprt);
+       }
 }
 
 static void xs_wake_pending(struct sock_xprt *transport)
index e8fd257c0e6888b18164f4a63a68ad358ed0459e..1a9a5bdaccf4fcb41c59fe60fd0600490bcc5698 100644 (file)
@@ -88,7 +88,7 @@ const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
 
 const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
        [TIPC_NLA_LINK_UNSPEC]          = { .type = NLA_UNSPEC },
-       [TIPC_NLA_LINK_NAME]            = { .type = NLA_STRING,
+       [TIPC_NLA_LINK_NAME]            = { .type = NLA_NUL_STRING,
                                            .len = TIPC_MAX_LINK_NAME },
        [TIPC_NLA_LINK_MTU]             = { .type = NLA_U32 },
        [TIPC_NLA_LINK_BROADCAST]       = { .type = NLA_FLAG },
@@ -125,7 +125,7 @@ const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
 
 const struct nla_policy tipc_nl_bearer_policy[TIPC_NLA_BEARER_MAX + 1] = {
        [TIPC_NLA_BEARER_UNSPEC]        = { .type = NLA_UNSPEC },
-       [TIPC_NLA_BEARER_NAME]          = { .type = NLA_STRING,
+       [TIPC_NLA_BEARER_NAME]          = { .type = NLA_NUL_STRING,
                                            .len = TIPC_MAX_BEARER_NAME },
        [TIPC_NLA_BEARER_PROP]          = { .type = NLA_NESTED },
        [TIPC_NLA_BEARER_DOMAIN]        = { .type = NLA_U32 }
index e22c81435ef7df909d5448004982b897aab0b345..f6dc896bf44c6e3b0952d783c9d633cd6596dbee 100644 (file)
@@ -130,6 +130,8 @@ static void virtio_transport_init_hdr(struct sk_buff *skb,
        hdr->dst_port   = cpu_to_le32(dst_port);
        hdr->flags      = cpu_to_le32(info->flags);
        hdr->len        = cpu_to_le32(payload_len);
+       hdr->buf_alloc  = cpu_to_le32(0);
+       hdr->fwd_cnt    = cpu_to_le32(0);
 }
 
 static void virtio_transport_copy_nonlinear_skb(const struct sk_buff *skb,
@@ -1369,11 +1371,17 @@ virtio_transport_recv_connected(struct sock *sk,
                        vsk->peer_shutdown |= RCV_SHUTDOWN;
                if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
                        vsk->peer_shutdown |= SEND_SHUTDOWN;
-               if (vsk->peer_shutdown == SHUTDOWN_MASK &&
-                   vsock_stream_has_data(vsk) <= 0 &&
-                   !sock_flag(sk, SOCK_DONE)) {
-                       (void)virtio_transport_reset(vsk, NULL);
-                       virtio_transport_do_close(vsk, true);
+               if (vsk->peer_shutdown == SHUTDOWN_MASK) {
+                       if (vsock_stream_has_data(vsk) <= 0 && !sock_flag(sk, SOCK_DONE)) {
+                               (void)virtio_transport_reset(vsk, NULL);
+                               virtio_transport_do_close(vsk, true);
+                       }
+                       /* Remove this socket anyway because the remote peer sent
+                        * the shutdown. This way a new connection will succeed
+                        * if the remote peer uses the same source port,
+                        * even if the old socket is still unreleased, but now disconnected.
+                        */
+                       vsock_remove_sock(vsk);
                }
                if (le32_to_cpu(virtio_vsock_hdr(skb)->flags))
                        sk->sk_state_change(sk);
index d74f3fd20f2bf25f0166b293fc378823b2612e8b..7d5e920141e9b71c0f020f7b4c7700fb8aa07eda 100644 (file)
@@ -27,9 +27,7 @@ struct bpf_xfrm_info {
        int link;
 };
 
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
-                 "Global functions as their definitions will be in xfrm_interface BTF");
+__bpf_kfunc_start_defs();
 
 /* bpf_skb_get_xfrm_info - Get XFRM metadata
  *
@@ -93,7 +91,7 @@ __bpf_kfunc int bpf_skb_set_xfrm_info(struct __sk_buff *skb_ctx, const struct bp
        return 0;
 }
 
-__diag_pop()
+__bpf_kfunc_end_defs();
 
 BTF_SET8_START(xfrm_ifc_kfunc_set)
 BTF_ID_FLAGS(func, bpf_skb_get_xfrm_info)
index 0b2f04dcb58979bacdff7012c9f1044ef67c079e..e2f302e55bbb20de0ee95593a34d26c4745d0ca3 100644 (file)
@@ -56,7 +56,7 @@ struct snd_info_private_data {
 };
 
 static int snd_info_version_init(void);
-static void snd_info_disconnect(struct snd_info_entry *entry);
+static void snd_info_clear_entries(struct snd_info_entry *entry);
 
 /*
 
@@ -569,11 +569,16 @@ void snd_info_card_disconnect(struct snd_card *card)
 {
        if (!card)
                return;
-       mutex_lock(&info_mutex);
+
        proc_remove(card->proc_root_link);
-       card->proc_root_link = NULL;
        if (card->proc_root)
-               snd_info_disconnect(card->proc_root);
+               proc_remove(card->proc_root->p);
+
+       mutex_lock(&info_mutex);
+       if (card->proc_root)
+               snd_info_clear_entries(card->proc_root);
+       card->proc_root_link = NULL;
+       card->proc_root = NULL;
        mutex_unlock(&info_mutex);
 }
 
@@ -745,15 +750,14 @@ struct snd_info_entry *snd_info_create_card_entry(struct snd_card *card,
 }
 EXPORT_SYMBOL(snd_info_create_card_entry);
 
-static void snd_info_disconnect(struct snd_info_entry *entry)
+static void snd_info_clear_entries(struct snd_info_entry *entry)
 {
        struct snd_info_entry *p;
 
        if (!entry->p)
                return;
        list_for_each_entry(p, &entry->children, list)
-               snd_info_disconnect(p);
-       proc_remove(entry->p);
+               snd_info_clear_entries(p);
        entry->p = NULL;
 }
 
@@ -770,8 +774,9 @@ void snd_info_free_entry(struct snd_info_entry * entry)
        if (!entry)
                return;
        if (entry->p) {
+               proc_remove(entry->p);
                mutex_lock(&info_mutex);
-               snd_info_disconnect(entry);
+               snd_info_clear_entries(entry);
                mutex_unlock(&info_mutex);
        }
 
index 23cf8284ce369ed4d694d86326e72ddec72851aa..0ba8f0c4cd99a27aaff1d3b43edd306daf0ad857 100644 (file)
@@ -720,15 +720,14 @@ static int __init amiga_audio_probe(struct platform_device *pdev)
        return dmasound_init();
 }
 
-static int __exit amiga_audio_remove(struct platform_device *pdev)
+static void __exit amiga_audio_remove(struct platform_device *pdev)
 {
        dmasound_deinit();
-       return 0;
 }
 
 static struct platform_driver amiga_audio_driver = {
-       .remove = __exit_p(amiga_audio_remove),
-       .driver   = {
+       .remove_new = __exit_p(amiga_audio_remove),
+       .driver = {
                .name   = "amiga-audio",
        },
 };
index 3c9e6e97ad0f7382722d1328105a864dbb627804..03264915c618332a214fe1ee35678502625f8765 100644 (file)
@@ -2068,6 +2068,7 @@ static const struct pci_device_id driver_denylist[] = {
        { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1043, 0x874f) }, /* ASUS ROG Zenith II / Strix */
        { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1462, 0xcb59) }, /* MSI TRX40 Creator */
        { PCI_DEVICE_SUB(0x1022, 0x1487, 0x1462, 0xcb60) }, /* MSI TRX40 */
+       { PCI_DEVICE_SUB(0x1022, 0x15e3, 0x1022, 0xd601) }, /* ASRock X670E Taichi */
        {}
 };
 
index 58006c8bcfb917d8f8cd8af7ab9388e93b5e61cd..669ae3d6e447e6058dfc2b35d188c8eb590f0d32 100644 (file)
@@ -7343,8 +7343,10 @@ enum {
        ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
        ALC299_FIXUP_PREDATOR_SPK,
        ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE,
+       ALC289_FIXUP_DELL_SPK1,
        ALC289_FIXUP_DELL_SPK2,
        ALC289_FIXUP_DUAL_SPK,
+       ALC289_FIXUP_RTK_AMP_DUAL_SPK,
        ALC294_FIXUP_SPK2_TO_DAC1,
        ALC294_FIXUP_ASUS_DUAL_SPK,
        ALC285_FIXUP_THINKPAD_X1_GEN7,
@@ -7444,6 +7446,8 @@ enum {
        ALC287_FIXUP_THINKPAD_I2S_SPK,
        ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD,
        ALC2XX_FIXUP_HEADSET_MIC,
+       ALC289_FIXUP_DELL_CS35L41_SPI_2,
+       ALC294_FIXUP_CS35L41_I2C_2,
 };
 
 /* A special fixup for Lenovo C940 and Yoga Duet 7;
@@ -8670,6 +8674,15 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
        },
+       [ALC289_FIXUP_DELL_SPK1] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x14, 0x90170140 },
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE
+       },
        [ALC289_FIXUP_DELL_SPK2] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
@@ -8685,6 +8698,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC289_FIXUP_DELL_SPK2
        },
+       [ALC289_FIXUP_RTK_AMP_DUAL_SPK] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc285_fixup_speaker2_to_dac1,
+               .chained = true,
+               .chain_id = ALC289_FIXUP_DELL_SPK1
+       },
        [ALC294_FIXUP_SPK2_TO_DAC1] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc285_fixup_speaker2_to_dac1,
@@ -9552,6 +9571,16 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc_fixup_headset_mic,
        },
+       [ALC289_FIXUP_DELL_CS35L41_SPI_2] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = cs35l41_fixup_spi_two,
+               .chained = true,
+               .chain_id = ALC289_FIXUP_DUAL_SPK
+       },
+       [ALC294_FIXUP_CS35L41_I2C_2] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = cs35l41_fixup_i2c_two,
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -9662,13 +9691,15 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0c1c, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS),
        SND_PCI_QUIRK(0x1028, 0x0c1d, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS),
        SND_PCI_QUIRK(0x1028, 0x0c1e, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS),
-       SND_PCI_QUIRK(0x1028, 0x0cbd, "Dell Oasis 13 CS MTL-U", ALC245_FIXUP_CS35L41_SPI_2),
-       SND_PCI_QUIRK(0x1028, 0x0cbe, "Dell Oasis 13 2-IN-1 MTL-U", ALC245_FIXUP_CS35L41_SPI_2),
-       SND_PCI_QUIRK(0x1028, 0x0cbf, "Dell Oasis 13 Low Weight MTU-L", ALC245_FIXUP_CS35L41_SPI_2),
-       SND_PCI_QUIRK(0x1028, 0x0cc1, "Dell Oasis 14 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2),
-       SND_PCI_QUIRK(0x1028, 0x0cc2, "Dell Oasis 14 2-in-1 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2),
-       SND_PCI_QUIRK(0x1028, 0x0cc3, "Dell Oasis 14 Low Weight MTL-U", ALC245_FIXUP_CS35L41_SPI_2),
-       SND_PCI_QUIRK(0x1028, 0x0cc4, "Dell Oasis 16 MTL-H/U", ALC245_FIXUP_CS35L41_SPI_2),
+       SND_PCI_QUIRK(0x1028, 0x0cbd, "Dell Oasis 13 CS MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
+       SND_PCI_QUIRK(0x1028, 0x0cbe, "Dell Oasis 13 2-IN-1 MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
+       SND_PCI_QUIRK(0x1028, 0x0cbf, "Dell Oasis 13 Low Weight MTU-L", ALC289_FIXUP_DELL_CS35L41_SPI_2),
+       SND_PCI_QUIRK(0x1028, 0x0cc0, "Dell Oasis 13", ALC289_FIXUP_RTK_AMP_DUAL_SPK),
+       SND_PCI_QUIRK(0x1028, 0x0cc1, "Dell Oasis 14 MTL-H/U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
+       SND_PCI_QUIRK(0x1028, 0x0cc2, "Dell Oasis 14 2-in-1 MTL-H/U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
+       SND_PCI_QUIRK(0x1028, 0x0cc3, "Dell Oasis 14 Low Weight MTL-U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
+       SND_PCI_QUIRK(0x1028, 0x0cc4, "Dell Oasis 16 MTL-H/U", ALC289_FIXUP_DELL_CS35L41_SPI_2),
+       SND_PCI_QUIRK(0x1028, 0x0cc5, "Dell Oasis 14", ALC289_FIXUP_RTK_AMP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -9913,6 +9944,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x19e1, "ASUS UX581LV", ALC295_FIXUP_ASUS_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
        SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
+       SND_PCI_QUIRK(0x1043, 0x1a83, "ASUS UM5302LA", ALC294_FIXUP_CS35L41_I2C_2),
        SND_PCI_QUIRK(0x1043, 0x1a8f, "ASUS UX582ZS", ALC245_FIXUP_CS35L41_SPI_2),
        SND_PCI_QUIRK(0x1043, 0x1b11, "ASUS UX431DA", ALC294_FIXUP_ASUS_COEF_1B),
        SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
@@ -9929,6 +9961,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE),
        SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2),
        SND_PCI_QUIRK(0x1043, 0x16a3, "ASUS UX3402VA", ALC245_FIXUP_CS35L41_SPI_2),
+       SND_PCI_QUIRK(0x1043, 0x1f62, "ASUS UX7602ZM", ALC245_FIXUP_CS35L41_SPI_2),
        SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
        SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
        SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS),
index 1185e5aac5233f9ecd5c7ecc30dea998a8d4df23..60cbc881be6e10bd5ba3c58de992bd5508702489 100644 (file)
@@ -26,7 +26,6 @@
 
 #define DRV_NAME "acp_i2s_playcap"
 #define        I2S_MASTER_MODE_ENABLE          1
-#define        I2S_MODE_ENABLE                 0
 #define        LRCLK_DIV_FIELD                 GENMASK(10, 2)
 #define        BCLK_DIV_FIELD                  GENMASK(23, 11)
 #define        ACP63_LRCLK_DIV_FIELD           GENMASK(12, 2)
@@ -56,7 +55,8 @@ static inline void acp_set_i2s_clk(struct acp_dev_data *adata, int dai_id)
        }
 
        val  = I2S_MASTER_MODE_ENABLE;
-       val |= I2S_MODE_ENABLE & BIT(1);
+       if (adata->tdm_mode)
+               val |= BIT(1);
 
        switch (chip->acp_rev) {
        case ACP63_DEV:
index 77227c8f01f60d8e63f9cd91030dcd9a93e1100e..3c459a67ad0c981d8e3d4252efaf9ddefec9e9b0 100644 (file)
@@ -356,7 +356,7 @@ static const struct snd_kcontrol_new aw88395_controls[] = {
                aw88395_get_fade_in_time, aw88395_set_fade_in_time),
        SOC_SINGLE_EXT("Volume Ramp Down Step", 0, 0, FADE_TIME_MAX, FADE_TIME_MIN,
                aw88395_get_fade_out_time, aw88395_set_fade_out_time),
-       SOC_SINGLE_EXT("Calib", 0, 0, 100, 0,
+       SOC_SINGLE_EXT("Calib", 0, 0, AW88395_CALI_RE_MAX, 0,
                aw88395_re_get, aw88395_re_set),
        AW88395_PROFILE_EXT("Profile Set", aw88395_profile_info,
                aw88395_profile_get, aw88395_profile_set),
index ce30bc7cdea9f34ccfe0eca56a21f05fcd8379cd..54f8457e8497043425437efbd153503778714afe 100644 (file)
@@ -438,7 +438,7 @@ static int aw_dev_set_vcalb(struct aw88399 *aw88399)
        if (ret)
                return ret;
 
-       vsense_select = vsense_select & (~AW88399_VDSEL_MASK);
+       vsense_select = vsense_value & (~AW88399_VDSEL_MASK);
 
        ret = aw88399_dev_get_icalk(aw88399, &icalk);
        if (ret) {
@@ -486,8 +486,8 @@ static int aw_dev_update_cali_re(struct aw_cali_desc *cali_desc)
        u32 cali_re;
        int ret;
 
-       if ((aw_dev->cali_desc.cali_re <= AW88399_CALI_RE_MAX) ||
-                       (aw_dev->cali_desc.cali_re >= AW88399_CALI_RE_MIN))
+       if ((aw_dev->cali_desc.cali_re >= AW88399_CALI_RE_MAX) ||
+                       (aw_dev->cali_desc.cali_re <= AW88399_CALI_RE_MIN))
                return -EINVAL;
 
        cali_re = AW88399_SHOW_RE_TO_DSP_RE((aw_dev->cali_desc.cali_re +
@@ -1710,7 +1710,7 @@ static const struct snd_kcontrol_new aw88399_controls[] = {
                aw88399_get_fade_in_time, aw88399_set_fade_in_time),
        SOC_SINGLE_EXT("Volume Ramp Down Step", 0, 0, FADE_TIME_MAX, FADE_TIME_MIN,
                aw88399_get_fade_out_time, aw88399_set_fade_out_time),
-       SOC_SINGLE_EXT("Calib", 0, 0, 100, 0,
+       SOC_SINGLE_EXT("Calib", 0, 0, AW88399_CALI_RE_MAX, 0,
                aw88399_re_get, aw88399_re_set),
        AW88399_PROFILE_EXT("AW88399 Profile Set", aw88399_profile_info,
                aw88399_profile_get, aw88399_profile_set),
index 8b3f1e10198542114cc9902671213eb1541d7abb..4f391099d0f2cc796d26f303c65709fe840ba18e 100644 (file)
@@ -522,7 +522,7 @@ enum {
 
 enum {
        AW88399_DEV_VDSEL_DAC = 0,
-       AW88399_DEV_VDSEL_VSENSE = 1,
+       AW88399_DEV_VDSEL_VSENSE = 32,
 };
 
 enum {
index 3bbe850916493249522c10e5bad4256112344efd..4c44059427793822d6b8e0836a05d7c1273db42d 100644 (file)
@@ -927,10 +927,15 @@ void da7219_aad_suspend(struct snd_soc_component *component)
        struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
        u8 micbias_ctrl;
 
+       disable_irq(da7219_aad->irq);
+
        if (da7219_aad->jack) {
                /* Disable jack detection during suspend */
                snd_soc_component_update_bits(component, DA7219_ACCDET_CONFIG_1,
                                    DA7219_ACCDET_EN_MASK, 0);
+               cancel_delayed_work_sync(&da7219_aad->jack_det_work);
+               /* Disable ground switch */
+               snd_soc_component_update_bits(component, 0xFB, 0x01, 0x00);
 
                /*
                 * If we have a 4-pole jack inserted, then micbias will be
@@ -947,8 +952,6 @@ void da7219_aad_suspend(struct snd_soc_component *component)
                        }
                }
        }
-
-       synchronize_irq(da7219_aad->irq);
 }
 
 void da7219_aad_resume(struct snd_soc_component *component)
@@ -971,6 +974,8 @@ void da7219_aad_resume(struct snd_soc_component *component)
                                    DA7219_ACCDET_EN_MASK,
                                    DA7219_ACCDET_EN_MASK);
        }
+
+       enable_irq(da7219_aad->irq);
 }
 
 
index 09eef6042aad6d53ad91fcef1cfe365f91c768be..20da1eaa4f1c7e9991a7b46901d1d38ada60dbce 100644 (file)
@@ -877,18 +877,13 @@ static int hdmi_codec_set_jack(struct snd_soc_component *component,
                               void *data)
 {
        struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
-       int ret = -ENOTSUPP;
 
        if (hcp->hcd.ops->hook_plugged_cb) {
                hcp->jack = jack;
-               ret = hcp->hcd.ops->hook_plugged_cb(component->dev->parent,
-                                                   hcp->hcd.data,
-                                                   plugged_cb,
-                                                   component->dev);
-               if (ret)
-                       hcp->jack = NULL;
+               return 0;
        }
-       return ret;
+
+       return -ENOTSUPP;
 }
 
 static int hdmi_dai_spdif_probe(struct snd_soc_dai *dai)
@@ -982,6 +977,21 @@ static int hdmi_of_xlate_dai_id(struct snd_soc_component *component,
        return ret;
 }
 
+static int hdmi_probe(struct snd_soc_component *component)
+{
+       struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
+       int ret = 0;
+
+       if (hcp->hcd.ops->hook_plugged_cb) {
+               ret = hcp->hcd.ops->hook_plugged_cb(component->dev->parent,
+                                                   hcp->hcd.data,
+                                                   plugged_cb,
+                                                   component->dev);
+       }
+
+       return ret;
+}
+
 static void hdmi_remove(struct snd_soc_component *component)
 {
        struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
@@ -992,6 +1002,7 @@ static void hdmi_remove(struct snd_soc_component *component)
 }
 
 static const struct snd_soc_component_driver hdmi_driver = {
+       .probe                  = hdmi_probe,
        .remove                 = hdmi_remove,
        .dapm_widgets           = hdmi_widgets,
        .num_dapm_widgets       = ARRAY_SIZE(hdmi_widgets),
index 5cf28d034f094ceb2a739286f6b1004c41252036..f66417a0f29f636ef8b6776697c081a808a9f08a 100644 (file)
@@ -530,12 +530,61 @@ static int nau8540_set_tdm_slot(struct snd_soc_dai *dai,
        return 0;
 }
 
+static int nau8540_dai_trigger(struct snd_pcm_substream *substream,
+                              int cmd, struct snd_soc_dai *dai)
+{
+       struct snd_soc_component *component = dai->component;
+       struct nau8540 *nau8540 = snd_soc_component_get_drvdata(component);
+       struct regmap *regmap = nau8540->regmap;
+       unsigned int val;
+       int ret = 0;
+
+       /* Reading the peak data to detect abnormal data in the ADC channel.
+        * If abnormal data happens, the driver takes recovery actions to
+        * refresh the ADC channel.
+        */
+       switch (cmd) {
+       case SNDRV_PCM_TRIGGER_START:
+               regmap_update_bits(regmap, NAU8540_REG_CLOCK_CTRL,
+                                  NAU8540_CLK_AGC_EN, NAU8540_CLK_AGC_EN);
+               regmap_update_bits(regmap, NAU8540_REG_ALC_CONTROL_3,
+                                  NAU8540_ALC_CH_ALL_EN, NAU8540_ALC_CH_ALL_EN);
+
+               regmap_read(regmap, NAU8540_REG_PEAK_CH1, &val);
+               dev_dbg(nau8540->dev, "1.ADC CH1 peak data %x", val);
+               if (!val) {
+                       regmap_update_bits(regmap, NAU8540_REG_MUTE,
+                                          NAU8540_PGA_CH_ALL_MUTE, NAU8540_PGA_CH_ALL_MUTE);
+                       regmap_update_bits(regmap, NAU8540_REG_MUTE,
+                                          NAU8540_PGA_CH_ALL_MUTE, 0);
+                       regmap_write(regmap, NAU8540_REG_RST, 0x1);
+                       regmap_write(regmap, NAU8540_REG_RST, 0);
+                       regmap_read(regmap, NAU8540_REG_PEAK_CH1, &val);
+                       dev_dbg(nau8540->dev, "2.ADC CH1 peak data %x", val);
+                       if (!val) {
+                               dev_err(nau8540->dev, "Channel recovery failed!!");
+                               ret = -EIO;
+                       }
+               }
+               regmap_update_bits(regmap, NAU8540_REG_CLOCK_CTRL,
+                                  NAU8540_CLK_AGC_EN, 0);
+               regmap_update_bits(regmap, NAU8540_REG_ALC_CONTROL_3,
+                                  NAU8540_ALC_CH_ALL_EN, 0);
+               break;
+
+       default:
+               break;
+       }
+
+       return ret;
+}
 
 static const struct snd_soc_dai_ops nau8540_dai_ops = {
        .startup = nau8540_dai_startup,
        .hw_params = nau8540_hw_params,
        .set_fmt = nau8540_set_fmt,
        .set_tdm_slot = nau8540_set_tdm_slot,
+       .trigger = nau8540_dai_trigger,
 };
 
 #define NAU8540_RATES SNDRV_PCM_RATE_8000_48000
index 305ea9207cf0b40e4a5c9b66504f03192c891814..2ce6063d462b916343fae8f0fcac3ccbf018b83d 100644 (file)
@@ -85,6 +85,7 @@
 
 /* CLOCK_CTRL (0x02) */
 #define NAU8540_CLK_ADC_EN             (0x1 << 15)
+#define NAU8540_CLK_AGC_EN             (0x1 << 3)
 #define NAU8540_CLK_I2S_EN             (0x1 << 1)
 
 /* CLOCK_SRC (0x03) */
 #define NAU8540_TDM_OFFSET_EN          (0x1 << 14)
 #define NAU8540_TDM_TX_MASK            0xf
 
+/* ALC_CONTROL_3 (0x22) */
+#define NAU8540_ALC_CH1_EN             (0x1 << 12)
+#define NAU8540_ALC_CH2_EN             (0x1 << 13)
+#define NAU8540_ALC_CH3_EN             (0x1 << 14)
+#define NAU8540_ALC_CH4_EN             (0x1 << 15)
+#define NAU8540_ALC_CH_ALL_EN          (0xf << 12)
+
 /* ADC_SAMPLE_RATE (0x3A) */
 #define NAU8540_CH_SYNC                (0x1 << 14)
 #define NAU8540_ADC_OSR_MASK           0x3
 #define NAU8540_VMID_SEL_SFT           4
 #define NAU8540_VMID_SEL_MASK          (0x3 << NAU8540_VMID_SEL_SFT)
 
+/* MUTE (0x61) */
+#define NAU8540_PGA_CH1_MUTE           0x1
+#define NAU8540_PGA_CH2_MUTE           0x2
+#define NAU8540_PGA_CH3_MUTE           0x4
+#define NAU8540_PGA_CH4_MUTE           0x8
+#define NAU8540_PGA_CH_ALL_MUTE                0xf
+
 /* MIC_BIAS (0x67) */
 #define NAU8540_PU_PRE                 (0x1 << 8)
 
index 7077ff6ba1f4bc30cbde0b69ff147c349f8ea710..6954fbe7ec5f3bb79f8693c23f302a7a1003e11e 100644 (file)
@@ -963,13 +963,6 @@ static int rt712_sdca_probe(struct snd_soc_component *component)
        rt712_sdca_parse_dt(rt712, &rt712->slave->dev);
        rt712->component = component;
 
-       if (!rt712->first_hw_init)
-               return 0;
-
-       ret = pm_runtime_resume(component->dev);
-       if (ret < 0 && ret != -EACCES)
-               return ret;
-
        /* add SPK route */
        if (rt712->hw_id != RT712_DEV_ID_713) {
                snd_soc_add_component_controls(component,
@@ -980,6 +973,13 @@ static int rt712_sdca_probe(struct snd_soc_component *component)
                        rt712_sdca_spk_dapm_routes, ARRAY_SIZE(rt712_sdca_spk_dapm_routes));
        }
 
+       if (!rt712->first_hw_init)
+               return 0;
+
+       ret = pm_runtime_resume(component->dev);
+       if (ret < 0 && ret != -EACCES)
+               return ret;
+
        return 0;
 }
 
index 6dfcfcf47cab39c0c015d7fec6bad3ed415c9d4a..f78197c8e582bb2e3f088faddd085aef138212b9 100644 (file)
@@ -1216,7 +1216,7 @@ static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev)
        playback_codec = of_get_child_by_name(pdev->dev.of_node, "playback-codecs");
        if (!playback_codec) {
                ret = -EINVAL;
-               dev_err_probe(&pdev->dev, ret, "Property 'speaker-codecs' missing or invalid\n");
+               dev_err_probe(&pdev->dev, ret, "Property 'playback-codecs' missing or invalid\n");
                goto err_playback_codec;
        }
 
@@ -1230,7 +1230,7 @@ static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev)
        for_each_card_prelinks(card, i, dai_link) {
                ret = mt8186_mt6366_card_set_be_link(card, dai_link, playback_codec, "I2S3");
                if (ret) {
-                       dev_err_probe(&pdev->dev, ret, "%s set speaker_codec fail\n",
+                       dev_err_probe(&pdev->dev, ret, "%s set playback_codec fail\n",
                                      dai_link->name);
                        goto err_probe;
                }
index 5c51dbef6e8645f8a213353b8bd8c511f0c5fa3c..860e66ec85e8a3748c4769e691e4201fe1b12695 100644 (file)
@@ -1757,7 +1757,7 @@ static struct platform_driver rockchip_i2s_tdm_driver = {
        .remove_new = rockchip_i2s_tdm_remove,
        .driver = {
                .name = DRV_NAME,
-               .of_match_table = of_match_ptr(rockchip_i2s_tdm_match),
+               .of_match_table = rockchip_i2s_tdm_match,
                .pm = &rockchip_i2s_tdm_pm_ops,
        },
 };
index 4e2beda6f9bf2eb89a78096f8f46cdb1ac7e23dd..3844f777c87bbb30d8e80fb835bc646d80b19521 100644 (file)
@@ -3670,7 +3670,7 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
                dapm_pinctrl_event(w, NULL, SND_SOC_DAPM_POST_PMD);
                break;
        case snd_soc_dapm_clock_supply:
-               w->clk = devm_clk_get(dapm->dev, w->name);
+               w->clk = devm_clk_get(dapm->dev, widget->name);
                if (IS_ERR(w->clk)) {
                        ret = PTR_ERR(w->clk);
                        goto request_failed;
index 9dce7f53b482131e5db23da720ca2f598dc32996..54dca91255a0a5bf80cf4f94eccffc2dd8719ebd 100644 (file)
@@ -176,7 +176,7 @@ int sof_register_clients(struct snd_sof_dev *sdev)
                goto err_kernel_injector;
        }
 
-       /* Platform depndent client device registration */
+       /* Platform dependent client device registration */
 
        if (sof_ops(sdev) && sof_ops(sdev)->register_ipc_clients)
                ret = sof_ops(sdev)->register_ipc_clients(sdev);
index 7643a54592f56377e57d20e754c63c999d605620..2110ffe5281cec68e2000641e615bb35f051d3fc 100644 (file)
@@ -73,14 +73,16 @@ static int omap2_mcbsp_set_clks_src(struct omap_mcbsp *mcbsp, u8 fck_src_id)
                return 0;
        }
 
-       pm_runtime_put_sync(mcbsp->dev);
+       if (mcbsp->active)
+               pm_runtime_put_sync(mcbsp->dev);
 
        r = clk_set_parent(mcbsp->fclk, fck_src);
        if (r)
                dev_err(mcbsp->dev, "CLKS: could not clk_set_parent() to %s\n",
                        src);
 
-       pm_runtime_get_sync(mcbsp->dev);
+       if (mcbsp->active)
+               pm_runtime_get_sync(mcbsp->dev);
 
        clk_put(fck_src);
 
index 75b744b479863021349349fcfdb1407491115450..bc5065bd99b2f05f4835da18d8a2a1a6fd28ce26 100644 (file)
@@ -121,6 +121,8 @@ const char *devlink_port_fn_opstate_str(enum devlink_port_fn_opstate value)
 static const char * const devlink_port_fn_attr_cap_strmap[] = {
        [0] = "roce-bit",
        [1] = "migratable-bit",
+       [2] = "ipsec-crypto-bit",
+       [3] = "ipsec-packet-bit",
 };
 
 const char *devlink_port_fn_attr_cap_str(enum devlink_port_fn_attr_cap value)
index fec6828680ce99b877cdf4d2a3ceec97cc469053..360b6448c6e9172af16c86d8223d4d6f9c78312b 100644 (file)
@@ -50,9 +50,116 @@ struct ynl_policy_nest nfsd_rpc_status_nest = {
 /* Common nested types */
 /* ============== NFSD_CMD_RPC_STATUS_GET ============== */
 /* NFSD_CMD_RPC_STATUS_GET - dump */
-void nfsd_rpc_status_get_list_free(struct nfsd_rpc_status_get_list *rsp)
+int nfsd_rpc_status_get_rsp_dump_parse(const struct nlmsghdr *nlh, void *data)
 {
-       struct nfsd_rpc_status_get_list *next = rsp;
+       struct nfsd_rpc_status_get_rsp_dump *dst;
+       struct ynl_parse_arg *yarg = data;
+       unsigned int n_compound_ops = 0;
+       const struct nlattr *attr;
+       int i;
+
+       dst = yarg->data;
+
+       if (dst->compound_ops)
+               return ynl_error_parse(yarg, "attribute already present (rpc-status.compound-ops)");
+
+       mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+               unsigned int type = mnl_attr_get_type(attr);
+
+               if (type == NFSD_A_RPC_STATUS_XID) {
+                       if (ynl_attr_validate(yarg, attr))
+                               return MNL_CB_ERROR;
+                       dst->_present.xid = 1;
+                       dst->xid = mnl_attr_get_u32(attr);
+               } else if (type == NFSD_A_RPC_STATUS_FLAGS) {
+                       if (ynl_attr_validate(yarg, attr))
+                               return MNL_CB_ERROR;
+                       dst->_present.flags = 1;
+                       dst->flags = mnl_attr_get_u32(attr);
+               } else if (type == NFSD_A_RPC_STATUS_PROG) {
+                       if (ynl_attr_validate(yarg, attr))
+                               return MNL_CB_ERROR;
+                       dst->_present.prog = 1;
+                       dst->prog = mnl_attr_get_u32(attr);
+               } else if (type == NFSD_A_RPC_STATUS_VERSION) {
+                       if (ynl_attr_validate(yarg, attr))
+                               return MNL_CB_ERROR;
+                       dst->_present.version = 1;
+                       dst->version = mnl_attr_get_u8(attr);
+               } else if (type == NFSD_A_RPC_STATUS_PROC) {
+                       if (ynl_attr_validate(yarg, attr))
+                               return MNL_CB_ERROR;
+                       dst->_present.proc = 1;
+                       dst->proc = mnl_attr_get_u32(attr);
+               } else if (type == NFSD_A_RPC_STATUS_SERVICE_TIME) {
+                       if (ynl_attr_validate(yarg, attr))
+                               return MNL_CB_ERROR;
+                       dst->_present.service_time = 1;
+                       dst->service_time = mnl_attr_get_u64(attr);
+               } else if (type == NFSD_A_RPC_STATUS_SADDR4) {
+                       if (ynl_attr_validate(yarg, attr))
+                               return MNL_CB_ERROR;
+                       dst->_present.saddr4 = 1;
+                       dst->saddr4 = mnl_attr_get_u32(attr);
+               } else if (type == NFSD_A_RPC_STATUS_DADDR4) {
+                       if (ynl_attr_validate(yarg, attr))
+                               return MNL_CB_ERROR;
+                       dst->_present.daddr4 = 1;
+                       dst->daddr4 = mnl_attr_get_u32(attr);
+               } else if (type == NFSD_A_RPC_STATUS_SADDR6) {
+                       unsigned int len;
+
+                       if (ynl_attr_validate(yarg, attr))
+                               return MNL_CB_ERROR;
+
+                       len = mnl_attr_get_payload_len(attr);
+                       dst->_present.saddr6_len = len;
+                       dst->saddr6 = malloc(len);
+                       memcpy(dst->saddr6, mnl_attr_get_payload(attr), len);
+               } else if (type == NFSD_A_RPC_STATUS_DADDR6) {
+                       unsigned int len;
+
+                       if (ynl_attr_validate(yarg, attr))
+                               return MNL_CB_ERROR;
+
+                       len = mnl_attr_get_payload_len(attr);
+                       dst->_present.daddr6_len = len;
+                       dst->daddr6 = malloc(len);
+                       memcpy(dst->daddr6, mnl_attr_get_payload(attr), len);
+               } else if (type == NFSD_A_RPC_STATUS_SPORT) {
+                       if (ynl_attr_validate(yarg, attr))
+                               return MNL_CB_ERROR;
+                       dst->_present.sport = 1;
+                       dst->sport = mnl_attr_get_u16(attr);
+               } else if (type == NFSD_A_RPC_STATUS_DPORT) {
+                       if (ynl_attr_validate(yarg, attr))
+                               return MNL_CB_ERROR;
+                       dst->_present.dport = 1;
+                       dst->dport = mnl_attr_get_u16(attr);
+               } else if (type == NFSD_A_RPC_STATUS_COMPOUND_OPS) {
+                       n_compound_ops++;
+               }
+       }
+
+       if (n_compound_ops) {
+               dst->compound_ops = calloc(n_compound_ops, sizeof(*dst->compound_ops));
+               dst->n_compound_ops = n_compound_ops;
+               i = 0;
+               mnl_attr_for_each(attr, nlh, sizeof(struct genlmsghdr)) {
+                       if (mnl_attr_get_type(attr) == NFSD_A_RPC_STATUS_COMPOUND_OPS) {
+                               dst->compound_ops[i] = mnl_attr_get_u32(attr);
+                               i++;
+                       }
+               }
+       }
+
+       return MNL_CB_OK;
+}
+
+void
+nfsd_rpc_status_get_rsp_list_free(struct nfsd_rpc_status_get_rsp_list *rsp)
+{
+       struct nfsd_rpc_status_get_rsp_list *next = rsp;
 
        while ((void *)next != YNL_LIST_END) {
                rsp = next;
@@ -65,15 +172,16 @@ void nfsd_rpc_status_get_list_free(struct nfsd_rpc_status_get_list *rsp)
        }
 }
 
-struct nfsd_rpc_status_get_list *nfsd_rpc_status_get_dump(struct ynl_sock *ys)
+struct nfsd_rpc_status_get_rsp_list *
+nfsd_rpc_status_get_dump(struct ynl_sock *ys)
 {
        struct ynl_dump_state yds = {};
        struct nlmsghdr *nlh;
        int err;
 
        yds.ys = ys;
-       yds.alloc_sz = sizeof(struct nfsd_rpc_status_get_list);
-       yds.cb = nfsd_rpc_status_get_rsp_parse;
+       yds.alloc_sz = sizeof(struct nfsd_rpc_status_get_rsp_list);
+       yds.cb = nfsd_rpc_status_get_rsp_dump_parse;
        yds.rsp_cmd = NFSD_CMD_RPC_STATUS_GET;
        yds.rsp_policy = &nfsd_rpc_status_nest;
 
@@ -86,7 +194,7 @@ struct nfsd_rpc_status_get_list *nfsd_rpc_status_get_dump(struct ynl_sock *ys)
        return yds.first;
 
 free_list:
-       nfsd_rpc_status_get_list_free(yds.first);
+       nfsd_rpc_status_get_rsp_list_free(yds.first);
        return NULL;
 }
 
index b6b69501031a07378b0e846c4910f61d03e04479..989c6e209ced0ad07c3534f691944d06a2bf09a1 100644 (file)
@@ -21,13 +21,47 @@ const char *nfsd_op_str(int op);
 /* Common nested types */
 /* ============== NFSD_CMD_RPC_STATUS_GET ============== */
 /* NFSD_CMD_RPC_STATUS_GET - dump */
-struct nfsd_rpc_status_get_list {
-       struct nfsd_rpc_status_get_list *next;
-       struct nfsd_rpc_status_get_rsp obj __attribute__ ((aligned (8)));
+struct nfsd_rpc_status_get_rsp_dump {
+       struct {
+               __u32 xid:1;
+               __u32 flags:1;
+               __u32 prog:1;
+               __u32 version:1;
+               __u32 proc:1;
+               __u32 service_time:1;
+               __u32 saddr4:1;
+               __u32 daddr4:1;
+               __u32 saddr6_len;
+               __u32 daddr6_len;
+               __u32 sport:1;
+               __u32 dport:1;
+       } _present;
+
+       __u32 xid /* big-endian */;
+       __u32 flags;
+       __u32 prog;
+       __u8 version;
+       __u32 proc;
+       __s64 service_time;
+       __u32 saddr4 /* big-endian */;
+       __u32 daddr4 /* big-endian */;
+       void *saddr6;
+       void *daddr6;
+       __u16 sport /* big-endian */;
+       __u16 dport /* big-endian */;
+       unsigned int n_compound_ops;
+       __u32 *compound_ops;
+};
+
+struct nfsd_rpc_status_get_rsp_list {
+       struct nfsd_rpc_status_get_rsp_list *next;
+       struct nfsd_rpc_status_get_rsp_dump obj __attribute__((aligned(8)));
 };
 
-void nfsd_rpc_status_get_list_free(struct nfsd_rpc_status_get_list *rsp);
+void
+nfsd_rpc_status_get_rsp_list_free(struct nfsd_rpc_status_get_rsp_list *rsp);
 
-struct nfsd_rpc_status_get_list *nfsd_rpc_status_get_dump(struct ynl_sock *ys);
+struct nfsd_rpc_status_get_rsp_list *
+nfsd_rpc_status_get_dump(struct ynl_sock *ys);
 
 #endif /* _LINUX_NFSD_GEN_H */
index 13427436bfb77b96fb72bef7f71a98eaea768c7d..c4003a83cd5d87dc1764f83f230b0aca5ce7a1a3 100755 (executable)
@@ -3,6 +3,7 @@
 
 import argparse
 import collections
+import filecmp
 import os
 import re
 import shutil
@@ -1168,7 +1169,7 @@ class CodeWriter:
         if out_file is None:
             self._out = os.sys.stdout
         else:
-            self._out = tempfile.TemporaryFile('w+')
+            self._out = tempfile.NamedTemporaryFile('w+')
             self._out_file = out_file
 
     def __del__(self):
@@ -1177,6 +1178,10 @@ class CodeWriter:
     def close_out_file(self):
         if self._out == os.sys.stdout:
             return
+        # Avoid modifying the file if contents didn't change
+        self._out.flush()
+        if os.path.isfile(self._out_file) and filecmp.cmp(self._out.name, self._out_file, shallow=False):
+            return
         with open(self._out_file, 'w+') as out_file:
             self._out.seek(0)
             shutil.copyfileobj(self._out, out_file)
index df3087000efb82b8e702e7d15e0f2179ad6dcafd..145d6f06fa72dfdf9d66bbad1eecb81090745b0b 100644 (file)
@@ -17,7 +17,7 @@ settings of all cores, see cpupower(1) how to choose specific cores.
 .SH "DOCUMENTATION"
 
 kernel sources:
-Documentation/power/powercap/powercap.txt
+Documentation/power/powercap/powercap.rst
 
 
 .SH "SEE ALSO"
index fb6ab9cef84f773665ef83ff1ccc9f6510a143e7..b8854629990227d9f3f984298db0e70ce95849d4 100644 (file)
@@ -831,7 +831,7 @@ static void mock_init_hdm_decoder(struct cxl_decoder *cxld)
                        cxld->interleave_ways = 2;
                else
                        cxld->interleave_ways = 1;
-               cxld->interleave_granularity = 256;
+               cxld->interleave_granularity = 4096;
                cxld->hpa_range = (struct range) {
                        .start = base,
                        .end = base + size - 1,
index 464fc39ed2776b5ea1f89d2e82b7d072fd21424c..ee61fa3a2411f8c2acc7272a20252fad95a8a811 100644 (file)
@@ -89,6 +89,12 @@ static struct cxl_cel_entry mock_cel[] = {
                .effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) |
                                      EFFECT(CONF_CHANGE_IMMEDIATE)),
        },
+       {
+               .opcode = cpu_to_le16(CXL_MBOX_OP_SANITIZE),
+               .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE) |
+                                     EFFECT(SECURITY_CHANGE_IMMEDIATE) |
+                                     EFFECT(BACKGROUND_OP)),
+       },
 };
 
 /* See CXL 2.0 Table 181 Get Health Info Output Payload */
@@ -133,7 +139,6 @@ struct mock_event_log {
 };
 
 struct mock_event_store {
-       struct cxl_memdev_state *mds;
        struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX];
        u32 ev_status;
 };
@@ -150,8 +155,10 @@ struct cxl_mockmem_data {
        int user_limit;
        int master_limit;
        struct mock_event_store mes;
+       struct cxl_memdev_state *mds;
        u8 event_buf[SZ_4K];
        u64 timestamp;
+       unsigned long sanitize_timeout;
 };
 
 static struct mock_event_log *event_find_log(struct device *dev, int log_type)
@@ -326,7 +333,7 @@ static void cxl_mock_event_trigger(struct device *dev)
                        event_reset_log(log);
        }
 
-       cxl_mem_get_event_records(mes->mds, mes->ev_status);
+       cxl_mem_get_event_records(mdata->mds, mes->ev_status);
 }
 
 struct cxl_event_record_raw maint_needed = {
@@ -567,9 +574,26 @@ static int mock_partition_info(struct cxl_mbox_cmd *cmd)
        return 0;
 }
 
+void cxl_mockmem_sanitize_work(struct work_struct *work)
+{
+       struct cxl_memdev_state *mds =
+               container_of(work, typeof(*mds), security.poll_dwork.work);
+
+       mutex_lock(&mds->mbox_mutex);
+       if (mds->security.sanitize_node)
+               sysfs_notify_dirent(mds->security.sanitize_node);
+       mds->security.sanitize_active = false;
+       mutex_unlock(&mds->mbox_mutex);
+
+       dev_dbg(mds->cxlds.dev, "sanitize complete\n");
+}
+
 static int mock_sanitize(struct cxl_mockmem_data *mdata,
                         struct cxl_mbox_cmd *cmd)
 {
+       struct cxl_memdev_state *mds = mdata->mds;
+       int rc = 0;
+
        if (cmd->size_in != 0)
                return -EINVAL;
 
@@ -585,7 +609,16 @@ static int mock_sanitize(struct cxl_mockmem_data *mdata,
                return -ENXIO;
        }
 
-       return 0; /* assume less than 2 secs, no bg */
+       mutex_lock(&mds->mbox_mutex);
+       if (schedule_delayed_work(&mds->security.poll_dwork,
+                                 msecs_to_jiffies(mdata->sanitize_timeout))) {
+               mds->security.sanitize_active = true;
+               dev_dbg(mds->cxlds.dev, "sanitize issued\n");
+       } else
+               rc = -EBUSY;
+       mutex_unlock(&mds->mbox_mutex);
+
+       return rc;
 }
 
 static int mock_secure_erase(struct cxl_mockmem_data *mdata,
@@ -1237,6 +1270,7 @@ static int mock_transfer_fw(struct cxl_mockmem_data *mdata,
        }
 
        memcpy(fw + offset, transfer->data, length);
+       usleep_range(1500, 2000);
        return 0;
 }
 
@@ -1415,16 +1449,16 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
        if (IS_ERR(mds))
                return PTR_ERR(mds);
 
+       mdata->mds = mds;
        mds->mbox_send = cxl_mock_mbox_send;
        mds->payload_size = SZ_4K;
        mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf;
+       INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mockmem_sanitize_work);
 
        cxlds = &mds->cxlds;
        cxlds->serial = pdev->id;
-       if (is_rcd(pdev)) {
+       if (is_rcd(pdev))
                cxlds->rcd = true;
-               cxlds->component_reg_phys = CXL_RESOURCE_NONE;
-       }
 
        rc = cxl_enumerate_cmds(mds);
        if (rc)
@@ -1447,14 +1481,17 @@ static int cxl_mock_mem_probe(struct platform_device *pdev)
        if (rc)
                return rc;
 
-       mdata->mes.mds = mds;
        cxl_mock_add_event_logs(&mdata->mes);
 
-       cxlmd = devm_cxl_add_memdev(cxlds);
+       cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds);
        if (IS_ERR(cxlmd))
                return PTR_ERR(cxlmd);
 
-       rc = cxl_memdev_setup_fw_upload(mds);
+       rc = devm_cxl_setup_fw_upload(&pdev->dev, mds);
+       if (rc)
+               return rc;
+
+       rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd);
        if (rc)
                return rc;
 
@@ -1526,10 +1563,38 @@ static ssize_t fw_buf_checksum_show(struct device *dev,
 
 static DEVICE_ATTR_RO(fw_buf_checksum);
 
+static ssize_t sanitize_timeout_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
+
+       return sysfs_emit(buf, "%lu\n", mdata->sanitize_timeout);
+}
+
+static ssize_t sanitize_timeout_store(struct device *dev,
+                                     struct device_attribute *attr,
+                                     const char *buf, size_t count)
+{
+       struct cxl_mockmem_data *mdata = dev_get_drvdata(dev);
+       unsigned long val;
+       int rc;
+
+       rc = kstrtoul(buf, 0, &val);
+       if (rc)
+               return rc;
+
+       mdata->sanitize_timeout = val;
+
+       return count;
+}
+
+static DEVICE_ATTR_RW(sanitize_timeout);
+
 static struct attribute *cxl_mock_mem_attrs[] = {
        &dev_attr_security_lock.attr,
        &dev_attr_event_trigger.attr,
        &dev_attr_fw_buf_checksum.attr,
+       &dev_attr_sanitize_timeout.attr,
        NULL
 };
 ATTRIBUTE_GROUPS(cxl_mock_mem);
index a5e246f7b202748b142e0b53b6b21c6f3fe220e5..91907b321f913408ba4ecd31cf679ea07f672e4e 100644 (file)
@@ -39,9 +39,7 @@ struct bpf_testmod_struct_arg_4 {
        int b;
 };
 
-__diag_push();
-__diag_ignore_all("-Wmissing-prototypes",
-                 "Global functions as their definitions will be in bpf_testmod.ko BTF");
+__bpf_hook_start();
 
 noinline int
 bpf_testmod_test_struct_arg_1(struct bpf_testmod_struct_arg_2 a, int b, int c) {
@@ -335,7 +333,7 @@ noinline int bpf_fentry_shadow_test(int a)
 }
 EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test);
 
-__diag_pop();
+__bpf_hook_end();
 
 static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
        .attr = { .name = "bpf_testmod", .mode = 0666, },
index 1a9eeefda9a8701f967d727813d46db3e8f79850..8bf497a9843e1b97d0e23cd80e82c5787e45c981 100644 (file)
@@ -326,20 +326,14 @@ static int map_create(__u32 type, const char *name, struct bpf_map_create_opts *
 
 static int create_hash(void)
 {
-       struct bpf_map_create_opts map_opts = {
-               .sz = sizeof(map_opts),
-               .map_flags = BPF_F_NO_PREALLOC,
-       };
+       LIBBPF_OPTS(bpf_map_create_opts, map_opts, .map_flags = BPF_F_NO_PREALLOC);
 
        return map_create(BPF_MAP_TYPE_HASH, "hash", &map_opts);
 }
 
 static int create_percpu_hash(void)
 {
-       struct bpf_map_create_opts map_opts = {
-               .sz = sizeof(map_opts),
-               .map_flags = BPF_F_NO_PREALLOC,
-       };
+       LIBBPF_OPTS(bpf_map_create_opts, map_opts, .map_flags = BPF_F_NO_PREALLOC);
 
        return map_create(BPF_MAP_TYPE_PERCPU_HASH, "percpu_hash", &map_opts);
 }
@@ -356,21 +350,17 @@ static int create_percpu_hash_prealloc(void)
 
 static int create_lru_hash(__u32 type, __u32 map_flags)
 {
-       struct bpf_map_create_opts map_opts = {
-               .sz = sizeof(map_opts),
-               .map_flags = map_flags,
-       };
+       LIBBPF_OPTS(bpf_map_create_opts, map_opts, .map_flags = map_flags);
 
        return map_create(type, "lru_hash", &map_opts);
 }
 
 static int create_hash_of_maps(void)
 {
-       struct bpf_map_create_opts map_opts = {
-               .sz = sizeof(map_opts),
+       LIBBPF_OPTS(bpf_map_create_opts, map_opts,
                .map_flags = BPF_F_NO_PREALLOC,
                .inner_map_fd = create_small_hash(),
-       };
+       );
        int ret;
 
        ret = map_create_opts(BPF_MAP_TYPE_HASH_OF_MAPS, "hash_of_maps",
index e02feb5fae97dd814775c0149893b4507cef2332..574d9a0cdc8e4a33c730bcb11a4136d60f718e86 100644 (file)
@@ -4,6 +4,7 @@
 #include <test_progs.h>
 #include <bpf/libbpf.h>
 #include <bpf/btf.h>
+#include "iters_css_task.skel.h"
 #include "cgroup_iter.skel.h"
 #include "cgroup_helpers.h"
 
@@ -263,6 +264,35 @@ close_cgrp:
        close(cgrp_fd);
 }
 
+static void test_walk_self_only_css_task(void)
+{
+       struct iters_css_task *skel;
+       int err;
+
+       skel = iters_css_task__open();
+       if (!ASSERT_OK_PTR(skel, "skel_open"))
+               return;
+
+       bpf_program__set_autoload(skel->progs.cgroup_id_printer, true);
+
+       err = iters_css_task__load(skel);
+       if (!ASSERT_OK(err, "skel_load"))
+               goto cleanup;
+
+       err = join_cgroup(cg_path[CHILD2]);
+       if (!ASSERT_OK(err, "join_cgroup"))
+               goto cleanup;
+
+       skel->bss->target_pid = getpid();
+       snprintf(expected_output, sizeof(expected_output),
+               PROLOGUE "%8llu\n" EPILOGUE, cg_id[CHILD2]);
+       read_from_cgroup_iter(skel->progs.cgroup_id_printer, cg_fd[CHILD2],
+               BPF_CGROUP_ITER_SELF_ONLY, "test_walk_self_only_css_task");
+       ASSERT_EQ(skel->bss->css_task_cnt, 1, "css_task_cnt");
+cleanup:
+       iters_css_task__destroy(skel);
+}
+
 void test_cgroup_iter(void)
 {
        struct cgroup_iter *skel = NULL;
@@ -293,6 +323,9 @@ void test_cgroup_iter(void)
                test_walk_self_only(skel);
        if (test__start_subtest("cgroup_iter__dead_self_only"))
                test_walk_dead_self_only(skel);
+       if (test__start_subtest("cgroup_iter__self_only_css_task"))
+               test_walk_self_only_css_task();
+
 out:
        cgroup_iter__destroy(skel);
        cleanup_cgroups();
index c2425791c9232f96b9430ef59058b88ee5cece7a..bf84d4a1d9ae2c68ceeac9f25373fd9df01b6935 100644 (file)
@@ -294,6 +294,7 @@ void test_iters(void)
        RUN_TESTS(iters_state_safety);
        RUN_TESTS(iters_looping);
        RUN_TESTS(iters);
+       RUN_TESTS(iters_css_task);
 
        if (env.has_testmod)
                RUN_TESTS(iters_testmod_seq);
index 214d9f4a94a538c2675337671ccaf92ef1012d8d..ea933fd151c389f11ba1559a397d5cbfe2c4583c 100644 (file)
@@ -8,7 +8,8 @@
 #include <sys/types.h>
 #include <test_progs.h>
 
-#define TDIR "/sys/kernel/debug"
+/* TDIR must be in a location we can create a directory in. */
+#define TDIR "/tmp/test_bpffs_testdir"
 
 static int read_iter(char *file)
 {
@@ -43,8 +44,11 @@ static int fn(void)
        if (!ASSERT_OK(err, "mount /"))
                goto out;
 
-       err = umount(TDIR);
-       if (!ASSERT_OK(err, "umount " TDIR))
+       err =  mkdir(TDIR, 0777);
+       /* If the directory already exists we can carry on. It may be left over
+        * from a previous run.
+        */
+       if ((err && errno != EEXIST) && !ASSERT_OK(err, "mkdir " TDIR))
                goto out;
 
        err = mount("none", TDIR, "tmpfs", 0, NULL);
@@ -138,6 +142,7 @@ out:
        rmdir(TDIR "/fs1");
        rmdir(TDIR "/fs2");
        umount(TDIR);
+       rmdir(TDIR);
        exit(err);
 }
 
index e3e68c97b40cfdff5f0a2de0dac8e30cfa2cf0c0..e5c61aa6604ace3fb1b70c22d353a065a4c9162d 100644 (file)
@@ -46,6 +46,7 @@
 #include "verifier_movsx.skel.h"
 #include "verifier_netfilter_ctx.skel.h"
 #include "verifier_netfilter_retcode.skel.h"
+#include "verifier_precision.skel.h"
 #include "verifier_prevent_map_lookup.skel.h"
 #include "verifier_raw_stack.skel.h"
 #include "verifier_raw_tp_writable.skel.h"
@@ -153,6 +154,7 @@ void test_verifier_meta_access(void)          { RUN(verifier_meta_access); }
 void test_verifier_movsx(void)                 { RUN(verifier_movsx); }
 void test_verifier_netfilter_ctx(void)        { RUN(verifier_netfilter_ctx); }
 void test_verifier_netfilter_retcode(void)    { RUN(verifier_netfilter_retcode); }
+void test_verifier_precision(void)            { RUN(verifier_precision); }
 void test_verifier_prevent_map_lookup(void)   { RUN(verifier_prevent_map_lookup); }
 void test_verifier_raw_stack(void)            { RUN(verifier_raw_stack); }
 void test_verifier_raw_tp_writable(void)      { RUN(verifier_raw_tp_writable); }
index 5089ce384a1c7afa593ef273eaed8b043770e24e..9ac758649cb8ccd0af666a865ea314c9e7fa5819 100644 (file)
@@ -10,6 +10,7 @@
 
 char _license[] SEC("license") = "GPL";
 
+struct cgroup *bpf_cgroup_acquire(struct cgroup *p) __ksym;
 struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym;
 void bpf_cgroup_release(struct cgroup *p) __ksym;
 
@@ -45,3 +46,57 @@ int BPF_PROG(iter_css_task_for_each, struct vm_area_struct *vma,
 
        return -EPERM;
 }
+
+static inline u64 cgroup_id(struct cgroup *cgrp)
+{
+       return cgrp->kn->id;
+}
+
+SEC("?iter/cgroup")
+int cgroup_id_printer(struct bpf_iter__cgroup *ctx)
+{
+       struct seq_file *seq = ctx->meta->seq;
+       struct cgroup *cgrp = ctx->cgroup;
+       struct cgroup_subsys_state *css;
+       struct task_struct *task;
+
+       /* epilogue */
+       if (cgrp == NULL) {
+               BPF_SEQ_PRINTF(seq, "epilogue\n");
+               return 0;
+       }
+
+       /* prologue */
+       if (ctx->meta->seq_num == 0)
+               BPF_SEQ_PRINTF(seq, "prologue\n");
+
+       BPF_SEQ_PRINTF(seq, "%8llu\n", cgroup_id(cgrp));
+
+       css = &cgrp->self;
+       css_task_cnt = 0;
+       bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) {
+               if (task->pid == target_pid)
+                       css_task_cnt++;
+       }
+
+       return 0;
+}
+
+SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
+int BPF_PROG(iter_css_task_for_each_sleep)
+{
+       u64 cgrp_id = bpf_get_current_cgroup_id();
+       struct cgroup *cgrp = bpf_cgroup_from_id(cgrp_id);
+       struct cgroup_subsys_state *css;
+       struct task_struct *task;
+
+       if (cgrp == NULL)
+               return 0;
+       css = &cgrp->self;
+
+       bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) {
+
+       }
+       bpf_cgroup_release(cgrp);
+       return 0;
+}
index c3bf96a67dba9d11fcd5ee0119a88568315bb133..6b1588d706527321f15cfa2dae9e900bd4393bea 100644 (file)
@@ -84,8 +84,8 @@ int BPF_PROG(iter_css_lock_and_unlock)
        return 0;
 }
 
-SEC("?fentry.s/" SYS_PREFIX "sys_getpgid")
-__failure __msg("css_task_iter is only allowed in bpf_lsm and bpf iter-s")
+SEC("?fentry/" SYS_PREFIX "sys_getpgid")
+__failure __msg("css_task_iter is only allowed in bpf_lsm, bpf_iter and sleepable progs")
 int BPF_PROG(iter_css_task_for_each)
 {
        u64 cg_id = bpf_get_current_cgroup_id();
diff --git a/tools/testing/selftests/bpf/progs/verifier_precision.c b/tools/testing/selftests/bpf/progs/verifier_precision.c
new file mode 100644 (file)
index 0000000..193c0f8
--- /dev/null
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023 SUSE LLC */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
+__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0xfffffff8 goto pc+2")
+__msg("mark_precise: frame0: regs=r2 stack= before 1: (87) r2 = -r2")
+__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 8")
+__naked int bpf_neg(void)
+{
+       asm volatile (
+               "r2 = 8;"
+               "r2 = -r2;"
+               "if r2 != -8 goto 1f;"
+               "r1 = r10;"
+               "r1 += r2;"
+       "1:"
+               "r0 = 0;"
+               "exit;"
+               ::: __clobber_all);
+}
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
+__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
+__msg("mark_precise: frame0: regs=r2 stack= before 1: (d4) r2 = le16 r2")
+__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
+__naked int bpf_end_to_le(void)
+{
+       asm volatile (
+               "r2 = 0;"
+               "r2 = le16 r2;"
+               "if r2 != 0 goto 1f;"
+               "r1 = r10;"
+               "r1 += r2;"
+       "1:"
+               "r0 = 0;"
+               "exit;"
+               ::: __clobber_all);
+}
+
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
+__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
+__msg("mark_precise: frame0: regs=r2 stack= before 1: (dc) r2 = be16 r2")
+__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
+__naked int bpf_end_to_be(void)
+{
+       asm volatile (
+               "r2 = 0;"
+               "r2 = be16 r2;"
+               "if r2 != 0 goto 1f;"
+               "r1 = r10;"
+               "r1 += r2;"
+       "1:"
+               "r0 = 0;"
+               "exit;"
+               ::: __clobber_all);
+}
+
+#if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
+       (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
+       defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390)) && \
+       __clang_major__ >= 18
+
+SEC("?raw_tp")
+__success __log_level(2)
+__msg("mark_precise: frame0: regs=r2 stack= before 3: (bf) r1 = r10")
+__msg("mark_precise: frame0: regs=r2 stack= before 2: (55) if r2 != 0x0 goto pc+2")
+__msg("mark_precise: frame0: regs=r2 stack= before 1: (d7) r2 = bswap16 r2")
+__msg("mark_precise: frame0: regs=r2 stack= before 0: (b7) r2 = 0")
+__naked int bpf_end_bswap(void)
+{
+       asm volatile (
+               "r2 = 0;"
+               "r2 = bswap16 r2;"
+               "if r2 != 0 goto 1f;"
+               "r1 = r10;"
+               "r1 += r2;"
+       "1:"
+               "r0 = 0;"
+               "exit;"
+               ::: __clobber_all);
+}
+
+#endif /* v4 instruction */
index 3af2501082b29b625f7de508059971cb2f996acb..b616575c3b00a5b74a5e4af180fe91004258e506 100644 (file)
        .expected_attach_type = BPF_SK_LOOKUP,
        .runs = -1,
 },
+{
+       "BPF_ST_MEM stack imm sign",
+       /* Check if verifier correctly reasons about sign of an
+        * immediate spilled to stack by BPF_ST instruction.
+        *
+        *   fp[-8] = -44;
+        *   r0 = fp[-8];
+        *   if r0 s< 0 goto ret0;
+        *   r0 = -1;
+        *   exit;
+        * ret0:
+        *   r0 = 0;
+        *   exit;
+        */
+       .insns = {
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, -44),
+       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+       BPF_JMP_IMM(BPF_JSLT, BPF_REG_0, 0, 2),
+       BPF_MOV64_IMM(BPF_REG_0, -1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       /* Use prog type that requires return value in range [0, 1] */
+       .prog_type = BPF_PROG_TYPE_SK_LOOKUP,
+       .expected_attach_type = BPF_SK_LOOKUP,
+       .result = VERBOSE_ACCEPT,
+       .runs = -1,
+       .errstr = "0: (7a) *(u64 *)(r10 -8) = -44        ; R10=fp0 fp-8_w=-44\
+       2: (c5) if r0 s< 0x0 goto pc+2\
+       R0_w=-44",
+},
index 17c0f92ff160294705157a9448a4ec0a15400bca..c3ba40d0b9de4617eea41095d1a69fee2a872a98 100644 (file)
@@ -430,7 +430,7 @@ static void print_usage(void)
 
 static void read_args(int argc, char *argv[])
 {
-       char opt;
+       int opt;
 
        while ((opt = getopt(argc, argv, "mh")) != -1) {
                switch (opt) {
index f838dd370f6af3ef252f0f77bf11e72aa80bed84..b3b2dc5a630cf2bd84df3c74e86652769e844a01 100755 (executable)
@@ -2048,7 +2048,7 @@ run_test() {
        case $ret in
                0)
                        all_skipped=false
-                       [ $exitcode=$ksft_skip ] && exitcode=0
+                       [ $exitcode -eq $ksft_skip ] && exitcode=0
                ;;
                $ksft_skip)
                        [ $all_skipped = true ] && exitcode=$ksft_skip
index ebdbb3c22e5426dfaec5b9102dd42c6788bada5c..f224b84591fbfca4bc05f997fd5137928680da0c 100644 (file)
@@ -2,9 +2,14 @@
 # Copyright (C) 2021 ARM Limited
 # Originally tools/testing/arm64/abi/Makefile
 
-TEST_GEN_PROGS := hwprobe
+CFLAGS += -I$(top_srcdir)/tools/include
+
+TEST_GEN_PROGS := hwprobe cbo
 
 include ../../lib.mk
 
 $(OUTPUT)/hwprobe: hwprobe.c sys_hwprobe.S
-       $(CC) -o$@ $(CFLAGS) $(LDFLAGS) $^
+       $(CC) -static -o$@ $(CFLAGS) $(LDFLAGS) $^
+
+$(OUTPUT)/cbo: cbo.c sys_hwprobe.S
+       $(CC) -static -o$@ $(CFLAGS) $(LDFLAGS) $^
diff --git a/tools/testing/selftests/riscv/hwprobe/cbo.c b/tools/testing/selftests/riscv/hwprobe/cbo.c
new file mode 100644 (file)
index 0000000..50a2cc8
--- /dev/null
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2023 Ventana Micro Systems Inc.
+ *
+ * Run with 'taskset -c <cpu-list> cbo' to only execute hwprobe on a
+ * subset of cpus, as well as only executing the tests on those cpus.
+ */
+#define _GNU_SOURCE
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+#include <sched.h>
+#include <signal.h>
+#include <assert.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <asm/ucontext.h>
+
+#include "hwprobe.h"
+#include "../../kselftest.h"
+
+#define MK_CBO(fn) cpu_to_le32((fn) << 20 | 10 << 15 | 2 << 12 | 0 << 7 | 15)
+
+static char mem[4096] __aligned(4096) = { [0 ... 4095] = 0xa5 };
+
+static bool illegal_insn;
+
+static void sigill_handler(int sig, siginfo_t *info, void *context)
+{
+       unsigned long *regs = (unsigned long *)&((ucontext_t *)context)->uc_mcontext;
+       uint32_t insn = *(uint32_t *)regs[0];
+
+       assert(insn == MK_CBO(regs[11]));
+
+       illegal_insn = true;
+       regs[0] += 4;
+}
+
+static void cbo_insn(char *base, int fn)
+{
+       uint32_t insn = MK_CBO(fn);
+
+       asm volatile(
+       "mv     a0, %0\n"
+       "li     a1, %1\n"
+       ".4byte %2\n"
+       : : "r" (base), "i" (fn), "i" (insn) : "a0", "a1", "memory");
+}
+
+static void cbo_inval(char *base) { cbo_insn(base, 0); }
+static void cbo_clean(char *base) { cbo_insn(base, 1); }
+static void cbo_flush(char *base) { cbo_insn(base, 2); }
+static void cbo_zero(char *base)  { cbo_insn(base, 4); }
+
+static void test_no_zicbom(void *arg)
+{
+       ksft_print_msg("Testing Zicbom instructions remain privileged\n");
+
+       illegal_insn = false;
+       cbo_clean(&mem[0]);
+       ksft_test_result(illegal_insn, "No cbo.clean\n");
+
+       illegal_insn = false;
+       cbo_flush(&mem[0]);
+       ksft_test_result(illegal_insn, "No cbo.flush\n");
+
+       illegal_insn = false;
+       cbo_inval(&mem[0]);
+       ksft_test_result(illegal_insn, "No cbo.inval\n");
+}
+
+static void test_no_zicboz(void *arg)
+{
+       ksft_print_msg("No Zicboz, testing cbo.zero remains privileged\n");
+
+       illegal_insn = false;
+       cbo_zero(&mem[0]);
+       ksft_test_result(illegal_insn, "No cbo.zero\n");
+}
+
+static bool is_power_of_2(__u64 n)
+{
+       return n != 0 && (n & (n - 1)) == 0;
+}
+
+static void test_zicboz(void *arg)
+{
+       struct riscv_hwprobe pair = {
+               .key = RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE,
+       };
+       cpu_set_t *cpus = (cpu_set_t *)arg;
+       __u64 block_size;
+       int i, j;
+       long rc;
+
+       rc = riscv_hwprobe(&pair, 1, sizeof(cpu_set_t), (unsigned long *)cpus, 0);
+       block_size = pair.value;
+       ksft_test_result(rc == 0 && pair.key == RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE &&
+                        is_power_of_2(block_size), "Zicboz block size\n");
+       ksft_print_msg("Zicboz block size: %ld\n", block_size);
+
+       illegal_insn = false;
+       cbo_zero(&mem[block_size]);
+       ksft_test_result(!illegal_insn, "cbo.zero\n");
+
+       if (illegal_insn || !is_power_of_2(block_size)) {
+               ksft_test_result_skip("cbo.zero check\n");
+               return;
+       }
+
+       assert(block_size <= 1024);
+
+       for (i = 0; i < 4096 / block_size; ++i) {
+               if (i % 2)
+                       cbo_zero(&mem[i * block_size]);
+       }
+
+       for (i = 0; i < 4096 / block_size; ++i) {
+               char expected = i % 2 ? 0x0 : 0xa5;
+
+               for (j = 0; j < block_size; ++j) {
+                       if (mem[i * block_size + j] != expected) {
+                               ksft_test_result_fail("cbo.zero check\n");
+                               ksft_print_msg("cbo.zero check: mem[%d] != 0x%x\n",
+                                              i * block_size + j, expected);
+                               return;
+                       }
+               }
+       }
+
+       ksft_test_result_pass("cbo.zero check\n");
+}
+
+static void check_no_zicboz_cpus(cpu_set_t *cpus)
+{
+       struct riscv_hwprobe pair = {
+               .key = RISCV_HWPROBE_KEY_IMA_EXT_0,
+       };
+       cpu_set_t one_cpu;
+       int i = 0, c = 0;
+       long rc;
+
+       while (i++ < CPU_COUNT(cpus)) {
+               while (!CPU_ISSET(c, cpus))
+                       ++c;
+
+               CPU_ZERO(&one_cpu);
+               CPU_SET(c, &one_cpu);
+
+               rc = riscv_hwprobe(&pair, 1, sizeof(cpu_set_t), (unsigned long *)&one_cpu, 0);
+               assert(rc == 0 && pair.key == RISCV_HWPROBE_KEY_IMA_EXT_0);
+
+               if (pair.value & RISCV_HWPROBE_EXT_ZICBOZ)
+                       ksft_exit_fail_msg("Zicboz is only present on a subset of harts.\n"
+                                          "Use taskset to select a set of harts where Zicboz\n"
+                                          "presence (present or not) is consistent for each hart\n");
+               ++c;
+       }
+}
+
+enum {
+       TEST_ZICBOZ,
+       TEST_NO_ZICBOZ,
+       TEST_NO_ZICBOM,
+};
+
+static struct test_info {
+       bool enabled;
+       unsigned int nr_tests;
+       void (*test_fn)(void *arg);
+} tests[] = {
+       [TEST_ZICBOZ]           = { .nr_tests = 3, test_zicboz },
+       [TEST_NO_ZICBOZ]        = { .nr_tests = 1, test_no_zicboz },
+       [TEST_NO_ZICBOM]        = { .nr_tests = 3, test_no_zicbom },
+};
+
+int main(int argc, char **argv)
+{
+       struct sigaction act = {
+               .sa_sigaction = &sigill_handler,
+               .sa_flags = SA_SIGINFO,
+       };
+       struct riscv_hwprobe pair;
+       unsigned int plan = 0;
+       cpu_set_t cpus;
+       long rc;
+       int i;
+
+       if (argc > 1 && !strcmp(argv[1], "--sigill")) {
+               rc = sigaction(SIGILL, &act, NULL);
+               assert(rc == 0);
+               tests[TEST_NO_ZICBOZ].enabled = true;
+               tests[TEST_NO_ZICBOM].enabled = true;
+       }
+
+       rc = sched_getaffinity(0, sizeof(cpu_set_t), &cpus);
+       assert(rc == 0);
+
+       ksft_print_header();
+
+       pair.key = RISCV_HWPROBE_KEY_IMA_EXT_0;
+       rc = riscv_hwprobe(&pair, 1, sizeof(cpu_set_t), (unsigned long *)&cpus, 0);
+       if (rc < 0)
+               ksft_exit_fail_msg("hwprobe() failed with %d\n", rc);
+       assert(rc == 0 && pair.key == RISCV_HWPROBE_KEY_IMA_EXT_0);
+
+       if (pair.value & RISCV_HWPROBE_EXT_ZICBOZ) {
+               tests[TEST_ZICBOZ].enabled = true;
+               tests[TEST_NO_ZICBOZ].enabled = false;
+       } else {
+               check_no_zicboz_cpus(&cpus);
+       }
+
+       for (i = 0; i < ARRAY_SIZE(tests); ++i)
+               plan += tests[i].enabled ? tests[i].nr_tests : 0;
+
+       if (plan == 0)
+               ksft_print_msg("No tests enabled.\n");
+       else
+               ksft_set_plan(plan);
+
+       for (i = 0; i < ARRAY_SIZE(tests); ++i) {
+               if (tests[i].enabled)
+                       tests[i].test_fn(&cpus);
+       }
+
+       ksft_finished();
+}
index 09f290a674206a9837b855eb07af0b2ab2bb8ae6..c474891df307140fdd21a2a89b4d495790b47181 100644 (file)
@@ -1,14 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
-#include <stddef.h>
-#include <asm/hwprobe.h>
-
-/*
- * Rather than relying on having a new enough libc to define this, just do it
- * ourselves.  This way we don't need to be coupled to a new-enough libc to
- * contain the call.
- */
-long riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
-                  size_t cpu_count, unsigned long *cpus, unsigned int flags);
+#include "hwprobe.h"
+#include "../../kselftest.h"
 
 int main(int argc, char **argv)
 {
@@ -16,6 +8,9 @@ int main(int argc, char **argv)
        unsigned long cpus;
        long out;
 
+       ksft_print_header();
+       ksft_set_plan(5);
+
        /* Fake the CPU_SET ops. */
        cpus = -1;
 
@@ -25,13 +20,16 @@ int main(int argc, char **argv)
         */
        for (long i = 0; i < 8; i++)
                pairs[i].key = i;
+
        out = riscv_hwprobe(pairs, 8, 1, &cpus, 0);
        if (out != 0)
-               return -1;
+               ksft_exit_fail_msg("hwprobe() failed with %ld\n", out);
+
        for (long i = 0; i < 4; ++i) {
                /* Fail if the kernel claims not to recognize a base key. */
                if ((i < 4) && (pairs[i].key != i))
-                       return -2;
+                       ksft_exit_fail_msg("Failed to recognize base key: key != i, "
+                                          "key=%ld, i=%ld\n", pairs[i].key, i);
 
                if (pairs[i].key != RISCV_HWPROBE_KEY_BASE_BEHAVIOR)
                        continue;
@@ -39,52 +37,30 @@ int main(int argc, char **argv)
                if (pairs[i].value & RISCV_HWPROBE_BASE_BEHAVIOR_IMA)
                        continue;
 
-               return -3;
+               ksft_exit_fail_msg("Unexpected pair: (%ld, %ld)\n", pairs[i].key, pairs[i].value);
        }
 
-       /*
-        * This should also work with a NULL CPU set, but should not work
-        * with an improperly supplied CPU set.
-        */
        out = riscv_hwprobe(pairs, 8, 0, 0, 0);
-       if (out != 0)
-               return -4;
+       ksft_test_result(out == 0, "NULL CPU set\n");
 
        out = riscv_hwprobe(pairs, 8, 0, &cpus, 0);
-       if (out == 0)
-               return -5;
+       ksft_test_result(out != 0, "Bad CPU set\n");
 
        out = riscv_hwprobe(pairs, 8, 1, 0, 0);
-       if (out == 0)
-               return -6;
+       ksft_test_result(out != 0, "NULL CPU set with non-zero count\n");
 
-       /*
-        * Check that keys work by providing one that we know exists, and
-        * checking to make sure the resultig pair is what we asked for.
-        */
        pairs[0].key = RISCV_HWPROBE_KEY_BASE_BEHAVIOR;
        out = riscv_hwprobe(pairs, 1, 1, &cpus, 0);
-       if (out != 0)
-               return -7;
-       if (pairs[0].key != RISCV_HWPROBE_KEY_BASE_BEHAVIOR)
-               return -8;
+       ksft_test_result(out == 0 && pairs[0].key == RISCV_HWPROBE_KEY_BASE_BEHAVIOR,
+                        "Existing key is maintained\n");
 
-       /*
-        * Check that an unknown key gets overwritten with -1,
-        * but doesn't block elements after it.
-        */
        pairs[0].key = 0x5555;
        pairs[1].key = 1;
        pairs[1].value = 0xAAAA;
        out = riscv_hwprobe(pairs, 2, 0, 0, 0);
-       if (out != 0)
-               return -9;
-
-       if (pairs[0].key != -1)
-               return -10;
-
-       if ((pairs[1].key != 1) || (pairs[1].value == 0xAAAA))
-               return -11;
+       ksft_test_result(out == 0 && pairs[0].key == -1 &&
+                        pairs[1].key == 1 && pairs[1].value != 0xAAAA,
+                        "Unknown key overwritten with -1 and doesn't block other elements\n");
 
-       return 0;
+       ksft_finished();
 }
diff --git a/tools/testing/selftests/riscv/hwprobe/hwprobe.h b/tools/testing/selftests/riscv/hwprobe/hwprobe.h
new file mode 100644 (file)
index 0000000..721b0ce
--- /dev/null
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef SELFTEST_RISCV_HWPROBE_H
+#define SELFTEST_RISCV_HWPROBE_H
+#include <stddef.h>
+#include <asm/hwprobe.h>
+
+/*
+ * Rather than relying on having a new enough libc to define this, just do it
+ * ourselves.  This way we don't need to be coupled to a new-enough libc to
+ * contain the call.
+ */
+long riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
+                  size_t cpu_count, unsigned long *cpus, unsigned int flags);
+
+#endif
index 92336721321af7f1760c609d07db07e0a0bc65ce..ae2b33c21c452a7305bf4239aee33036327f8e8c 100644 (file)
@@ -85,6 +85,48 @@ void vsock_wait_remote_close(int fd)
        close(epollfd);
 }
 
+/* Bind to <bind_port>, connect to <cid, port> and return the file descriptor. */
+int vsock_bind_connect(unsigned int cid, unsigned int port, unsigned int bind_port, int type)
+{
+       struct sockaddr_vm sa_client = {
+               .svm_family = AF_VSOCK,
+               .svm_cid = VMADDR_CID_ANY,
+               .svm_port = bind_port,
+       };
+       struct sockaddr_vm sa_server = {
+               .svm_family = AF_VSOCK,
+               .svm_cid = cid,
+               .svm_port = port,
+       };
+
+       int client_fd, ret;
+
+       client_fd = socket(AF_VSOCK, type, 0);
+       if (client_fd < 0) {
+               perror("socket");
+               exit(EXIT_FAILURE);
+       }
+
+       if (bind(client_fd, (struct sockaddr *)&sa_client, sizeof(sa_client))) {
+               perror("bind");
+               exit(EXIT_FAILURE);
+       }
+
+       timeout_begin(TIMEOUT);
+       do {
+               ret = connect(client_fd, (struct sockaddr *)&sa_server, sizeof(sa_server));
+               timeout_check("connect");
+       } while (ret < 0 && errno == EINTR);
+       timeout_end();
+
+       if (ret < 0) {
+               perror("connect");
+               exit(EXIT_FAILURE);
+       }
+
+       return client_fd;
+}
+
 /* Connect to <cid, port> and return the file descriptor. */
 static int vsock_connect(unsigned int cid, unsigned int port, int type)
 {
@@ -104,6 +146,10 @@ static int vsock_connect(unsigned int cid, unsigned int port, int type)
        control_expectln("LISTENING");
 
        fd = socket(AF_VSOCK, type, 0);
+       if (fd < 0) {
+               perror("socket");
+               exit(EXIT_FAILURE);
+       }
 
        timeout_begin(TIMEOUT);
        do {
@@ -132,11 +178,8 @@ int vsock_seqpacket_connect(unsigned int cid, unsigned int port)
        return vsock_connect(cid, port, SOCK_SEQPACKET);
 }
 
-/* Listen on <cid, port> and return the first incoming connection.  The remote
- * address is stored to clientaddrp.  clientaddrp may be NULL.
- */
-static int vsock_accept(unsigned int cid, unsigned int port,
-                       struct sockaddr_vm *clientaddrp, int type)
+/* Listen on <cid, port> and return the file descriptor. */
+static int vsock_listen(unsigned int cid, unsigned int port, int type)
 {
        union {
                struct sockaddr sa;
@@ -148,16 +191,13 @@ static int vsock_accept(unsigned int cid, unsigned int port,
                        .svm_cid = cid,
                },
        };
-       union {
-               struct sockaddr sa;
-               struct sockaddr_vm svm;
-       } clientaddr;
-       socklen_t clientaddr_len = sizeof(clientaddr.svm);
        int fd;
-       int client_fd;
-       int old_errno;
 
        fd = socket(AF_VSOCK, type, 0);
+       if (fd < 0) {
+               perror("socket");
+               exit(EXIT_FAILURE);
+       }
 
        if (bind(fd, &addr.sa, sizeof(addr.svm)) < 0) {
                perror("bind");
@@ -169,6 +209,24 @@ static int vsock_accept(unsigned int cid, unsigned int port,
                exit(EXIT_FAILURE);
        }
 
+       return fd;
+}
+
+/* Listen on <cid, port> and return the first incoming connection.  The remote
+ * address is stored to clientaddrp.  clientaddrp may be NULL.
+ */
+static int vsock_accept(unsigned int cid, unsigned int port,
+                       struct sockaddr_vm *clientaddrp, int type)
+{
+       union {
+               struct sockaddr sa;
+               struct sockaddr_vm svm;
+       } clientaddr;
+       socklen_t clientaddr_len = sizeof(clientaddr.svm);
+       int fd, client_fd, old_errno;
+
+       fd = vsock_listen(cid, port, type);
+
        control_writeln("LISTENING");
 
        timeout_begin(TIMEOUT);
@@ -207,6 +265,11 @@ int vsock_stream_accept(unsigned int cid, unsigned int port,
        return vsock_accept(cid, port, clientaddrp, SOCK_STREAM);
 }
 
+int vsock_stream_listen(unsigned int cid, unsigned int port)
+{
+       return vsock_listen(cid, port, SOCK_STREAM);
+}
+
 int vsock_seqpacket_accept(unsigned int cid, unsigned int port,
                           struct sockaddr_vm *clientaddrp)
 {
index a77175d25864c7a628400c01dff790e4a8141095..03c88d0cb8610b5d106379fe36a61c8734cd4f5f 100644 (file)
@@ -36,9 +36,12 @@ struct test_case {
 void init_signals(void);
 unsigned int parse_cid(const char *str);
 int vsock_stream_connect(unsigned int cid, unsigned int port);
+int vsock_bind_connect(unsigned int cid, unsigned int port,
+                      unsigned int bind_port, int type);
 int vsock_seqpacket_connect(unsigned int cid, unsigned int port);
 int vsock_stream_accept(unsigned int cid, unsigned int port,
                        struct sockaddr_vm *clientaddrp);
+int vsock_stream_listen(unsigned int cid, unsigned int port);
 int vsock_seqpacket_accept(unsigned int cid, unsigned int port,
                           struct sockaddr_vm *clientaddrp);
 void vsock_wait_remote_close(int fd);
index c1f7bc9abd22319fb34fcf8b437071f0493153b0..5b0e93f9996cb18cc390d33645132532ab6bb85c 100644 (file)
@@ -1180,6 +1180,51 @@ static void test_stream_shutrd_server(const struct test_opts *opts)
        close(fd);
 }
 
+static void test_double_bind_connect_server(const struct test_opts *opts)
+{
+       int listen_fd, client_fd, i;
+       struct sockaddr_vm sa_client;
+       socklen_t socklen_client = sizeof(sa_client);
+
+       listen_fd = vsock_stream_listen(VMADDR_CID_ANY, 1234);
+
+       for (i = 0; i < 2; i++) {
+               control_writeln("LISTENING");
+
+               timeout_begin(TIMEOUT);
+               do {
+                       client_fd = accept(listen_fd, (struct sockaddr *)&sa_client,
+                                          &socklen_client);
+                       timeout_check("accept");
+               } while (client_fd < 0 && errno == EINTR);
+               timeout_end();
+
+               if (client_fd < 0) {
+                       perror("accept");
+                       exit(EXIT_FAILURE);
+               }
+
+               /* Waiting for remote peer to close connection */
+               vsock_wait_remote_close(client_fd);
+       }
+
+       close(listen_fd);
+}
+
+static void test_double_bind_connect_client(const struct test_opts *opts)
+{
+       int i, client_fd;
+
+       for (i = 0; i < 2; i++) {
+               /* Wait until server is ready to accept a new connection */
+               control_expectln("LISTENING");
+
+               client_fd = vsock_bind_connect(opts->peer_cid, 1234, 4321, SOCK_STREAM);
+
+               close(client_fd);
+       }
+}
+
 static struct test_case test_cases[] = {
        {
                .name = "SOCK_STREAM connection reset",
@@ -1285,6 +1330,11 @@ static struct test_case test_cases[] = {
                .run_client = test_stream_msgzcopy_empty_errq_client,
                .run_server = test_stream_msgzcopy_empty_errq_server,
        },
+       {
+               .name = "SOCK_STREAM double bind connect",
+               .run_client = test_double_bind_connect_client,
+               .run_server = test_double_bind_connect_server,
+       },
        {},
 };